4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
28 #include "qemu/bitops.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
34 #include "trace-tcg.h"
37 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
38 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
39 /* currently all emulated v5 cores are also v5TE, so don't bother */
40 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
41 #define ENABLE_ARCH_5J 0
42 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
43 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
44 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
45 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
46 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
48 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
50 #include "translate.h"
52 #if defined(CONFIG_USER_ONLY)
55 #define IS_USER(s) (s->user)
59 /* We reuse the same 64-bit temporaries for efficiency. */
60 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
61 static TCGv_i32 cpu_R
[16];
62 TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
63 TCGv_i64 cpu_exclusive_addr
;
64 TCGv_i64 cpu_exclusive_val
;
65 #ifdef CONFIG_USER_ONLY
66 TCGv_i64 cpu_exclusive_test
;
67 TCGv_i32 cpu_exclusive_info
;
70 /* FIXME: These should be removed. */
71 static TCGv_i32 cpu_F0s
, cpu_F1s
;
72 static TCGv_i64 cpu_F0d
, cpu_F1d
;
74 #include "exec/gen-icount.h"
76 static const char *regnames
[] =
77 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
78 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
80 /* initialize TCG globals. */
81 void arm_translate_init(void)
85 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
87 for (i
= 0; i
< 16; i
++) {
88 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
89 offsetof(CPUARMState
, regs
[i
]),
92 cpu_CF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, CF
), "CF");
93 cpu_NF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, NF
), "NF");
94 cpu_VF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, VF
), "VF");
95 cpu_ZF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, ZF
), "ZF");
97 cpu_exclusive_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
98 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
99 cpu_exclusive_val
= tcg_global_mem_new_i64(TCG_AREG0
,
100 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
101 #ifdef CONFIG_USER_ONLY
102 cpu_exclusive_test
= tcg_global_mem_new_i64(TCG_AREG0
,
103 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
104 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
105 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
108 a64_translate_init();
111 static inline ARMMMUIdx
get_a32_user_mem_index(DisasContext
*s
)
113 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
115 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
116 * otherwise, access as if at PL0.
118 switch (s
->mmu_idx
) {
119 case ARMMMUIdx_S1E2
: /* this one is UNPREDICTABLE */
120 case ARMMMUIdx_S12NSE0
:
121 case ARMMMUIdx_S12NSE1
:
122 return ARMMMUIdx_S12NSE0
;
124 case ARMMMUIdx_S1SE0
:
125 case ARMMMUIdx_S1SE1
:
126 return ARMMMUIdx_S1SE0
;
129 g_assert_not_reached();
133 static inline TCGv_i32
load_cpu_offset(int offset
)
135 TCGv_i32 tmp
= tcg_temp_new_i32();
136 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
140 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
142 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
144 tcg_gen_st_i32(var
, cpu_env
, offset
);
145 tcg_temp_free_i32(var
);
148 #define store_cpu_field(var, name) \
149 store_cpu_offset(var, offsetof(CPUARMState, name))
151 /* Set a variable to the value of a CPU register. */
152 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
156 /* normally, since we updated PC, we need only to add one insn */
158 addr
= (long)s
->pc
+ 2;
160 addr
= (long)s
->pc
+ 4;
161 tcg_gen_movi_i32(var
, addr
);
163 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
167 /* Create a new temporary and set it to the value of a CPU register. */
168 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
170 TCGv_i32 tmp
= tcg_temp_new_i32();
171 load_reg_var(s
, tmp
, reg
);
175 /* Set a CPU register. The source must be a temporary and will be
177 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
180 tcg_gen_andi_i32(var
, var
, ~1);
181 s
->is_jmp
= DISAS_JUMP
;
183 tcg_gen_mov_i32(cpu_R
[reg
], var
);
184 tcg_temp_free_i32(var
);
187 /* Value extensions. */
188 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
189 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
190 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
191 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
193 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
194 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
197 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
199 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
200 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
201 tcg_temp_free_i32(tmp_mask
);
203 /* Set NZCV flags from the high 4 bits of var. */
204 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
206 static void gen_exception_internal(int excp
)
208 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
210 assert(excp_is_internal(excp
));
211 gen_helper_exception_internal(cpu_env
, tcg_excp
);
212 tcg_temp_free_i32(tcg_excp
);
215 static void gen_exception(int excp
, uint32_t syndrome
, uint32_t target_el
)
217 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
218 TCGv_i32 tcg_syn
= tcg_const_i32(syndrome
);
219 TCGv_i32 tcg_el
= tcg_const_i32(target_el
);
221 gen_helper_exception_with_syndrome(cpu_env
, tcg_excp
,
224 tcg_temp_free_i32(tcg_el
);
225 tcg_temp_free_i32(tcg_syn
);
226 tcg_temp_free_i32(tcg_excp
);
229 static void gen_ss_advance(DisasContext
*s
)
231 /* If the singlestep state is Active-not-pending, advance to
236 gen_helper_clear_pstate_ss(cpu_env
);
240 static void gen_step_complete_exception(DisasContext
*s
)
242 /* We just completed step of an insn. Move from Active-not-pending
243 * to Active-pending, and then also take the swstep exception.
244 * This corresponds to making the (IMPDEF) choice to prioritize
245 * swstep exceptions over asynchronous exceptions taken to an exception
246 * level where debug is disabled. This choice has the advantage that
247 * we do not need to maintain internal state corresponding to the
248 * ISV/EX syndrome bits between completion of the step and generation
249 * of the exception, and our syndrome information is always correct.
252 gen_exception(EXCP_UDEF
, syn_swstep(s
->ss_same_el
, 1, s
->is_ldex
),
253 default_exception_el(s
));
254 s
->is_jmp
= DISAS_EXC
;
257 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
259 TCGv_i32 tmp1
= tcg_temp_new_i32();
260 TCGv_i32 tmp2
= tcg_temp_new_i32();
261 tcg_gen_ext16s_i32(tmp1
, a
);
262 tcg_gen_ext16s_i32(tmp2
, b
);
263 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
264 tcg_temp_free_i32(tmp2
);
265 tcg_gen_sari_i32(a
, a
, 16);
266 tcg_gen_sari_i32(b
, b
, 16);
267 tcg_gen_mul_i32(b
, b
, a
);
268 tcg_gen_mov_i32(a
, tmp1
);
269 tcg_temp_free_i32(tmp1
);
272 /* Byteswap each halfword. */
273 static void gen_rev16(TCGv_i32 var
)
275 TCGv_i32 tmp
= tcg_temp_new_i32();
276 tcg_gen_shri_i32(tmp
, var
, 8);
277 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
278 tcg_gen_shli_i32(var
, var
, 8);
279 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
280 tcg_gen_or_i32(var
, var
, tmp
);
281 tcg_temp_free_i32(tmp
);
284 /* Byteswap low halfword and sign extend. */
285 static void gen_revsh(TCGv_i32 var
)
287 tcg_gen_ext16u_i32(var
, var
);
288 tcg_gen_bswap16_i32(var
, var
);
289 tcg_gen_ext16s_i32(var
, var
);
292 /* Unsigned bitfield extract. */
293 static void gen_ubfx(TCGv_i32 var
, int shift
, uint32_t mask
)
296 tcg_gen_shri_i32(var
, var
, shift
);
297 tcg_gen_andi_i32(var
, var
, mask
);
300 /* Signed bitfield extract. */
301 static void gen_sbfx(TCGv_i32 var
, int shift
, int width
)
306 tcg_gen_sari_i32(var
, var
, shift
);
307 if (shift
+ width
< 32) {
308 signbit
= 1u << (width
- 1);
309 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
310 tcg_gen_xori_i32(var
, var
, signbit
);
311 tcg_gen_subi_i32(var
, var
, signbit
);
315 /* Return (b << 32) + a. Mark inputs as dead */
316 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv_i32 b
)
318 TCGv_i64 tmp64
= tcg_temp_new_i64();
320 tcg_gen_extu_i32_i64(tmp64
, b
);
321 tcg_temp_free_i32(b
);
322 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
323 tcg_gen_add_i64(a
, tmp64
, a
);
325 tcg_temp_free_i64(tmp64
);
329 /* Return (b << 32) - a. Mark inputs as dead. */
330 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv_i32 b
)
332 TCGv_i64 tmp64
= tcg_temp_new_i64();
334 tcg_gen_extu_i32_i64(tmp64
, b
);
335 tcg_temp_free_i32(b
);
336 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
337 tcg_gen_sub_i64(a
, tmp64
, a
);
339 tcg_temp_free_i64(tmp64
);
343 /* 32x32->64 multiply. Marks inputs as dead. */
344 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
346 TCGv_i32 lo
= tcg_temp_new_i32();
347 TCGv_i32 hi
= tcg_temp_new_i32();
350 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
351 tcg_temp_free_i32(a
);
352 tcg_temp_free_i32(b
);
354 ret
= tcg_temp_new_i64();
355 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
356 tcg_temp_free_i32(lo
);
357 tcg_temp_free_i32(hi
);
362 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
364 TCGv_i32 lo
= tcg_temp_new_i32();
365 TCGv_i32 hi
= tcg_temp_new_i32();
368 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
369 tcg_temp_free_i32(a
);
370 tcg_temp_free_i32(b
);
372 ret
= tcg_temp_new_i64();
373 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
374 tcg_temp_free_i32(lo
);
375 tcg_temp_free_i32(hi
);
380 /* Swap low and high halfwords. */
381 static void gen_swap_half(TCGv_i32 var
)
383 TCGv_i32 tmp
= tcg_temp_new_i32();
384 tcg_gen_shri_i32(tmp
, var
, 16);
385 tcg_gen_shli_i32(var
, var
, 16);
386 tcg_gen_or_i32(var
, var
, tmp
);
387 tcg_temp_free_i32(tmp
);
390 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
391 tmp = (t0 ^ t1) & 0x8000;
394 t0 = (t0 + t1) ^ tmp;
397 static void gen_add16(TCGv_i32 t0
, TCGv_i32 t1
)
399 TCGv_i32 tmp
= tcg_temp_new_i32();
400 tcg_gen_xor_i32(tmp
, t0
, t1
);
401 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
402 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
403 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
404 tcg_gen_add_i32(t0
, t0
, t1
);
405 tcg_gen_xor_i32(t0
, t0
, tmp
);
406 tcg_temp_free_i32(tmp
);
407 tcg_temp_free_i32(t1
);
410 /* Set CF to the top bit of var. */
411 static void gen_set_CF_bit31(TCGv_i32 var
)
413 tcg_gen_shri_i32(cpu_CF
, var
, 31);
416 /* Set N and Z flags from var. */
417 static inline void gen_logic_CC(TCGv_i32 var
)
419 tcg_gen_mov_i32(cpu_NF
, var
);
420 tcg_gen_mov_i32(cpu_ZF
, var
);
424 static void gen_adc(TCGv_i32 t0
, TCGv_i32 t1
)
426 tcg_gen_add_i32(t0
, t0
, t1
);
427 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
430 /* dest = T0 + T1 + CF. */
431 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
433 tcg_gen_add_i32(dest
, t0
, t1
);
434 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
437 /* dest = T0 - T1 + CF - 1. */
438 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
440 tcg_gen_sub_i32(dest
, t0
, t1
);
441 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
442 tcg_gen_subi_i32(dest
, dest
, 1);
445 /* dest = T0 + T1. Compute C, N, V and Z flags */
446 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
448 TCGv_i32 tmp
= tcg_temp_new_i32();
449 tcg_gen_movi_i32(tmp
, 0);
450 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
451 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
452 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
453 tcg_gen_xor_i32(tmp
, t0
, t1
);
454 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
455 tcg_temp_free_i32(tmp
);
456 tcg_gen_mov_i32(dest
, cpu_NF
);
459 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
460 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
462 TCGv_i32 tmp
= tcg_temp_new_i32();
463 if (TCG_TARGET_HAS_add2_i32
) {
464 tcg_gen_movi_i32(tmp
, 0);
465 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
466 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
468 TCGv_i64 q0
= tcg_temp_new_i64();
469 TCGv_i64 q1
= tcg_temp_new_i64();
470 tcg_gen_extu_i32_i64(q0
, t0
);
471 tcg_gen_extu_i32_i64(q1
, t1
);
472 tcg_gen_add_i64(q0
, q0
, q1
);
473 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
474 tcg_gen_add_i64(q0
, q0
, q1
);
475 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
476 tcg_temp_free_i64(q0
);
477 tcg_temp_free_i64(q1
);
479 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
480 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
481 tcg_gen_xor_i32(tmp
, t0
, t1
);
482 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
483 tcg_temp_free_i32(tmp
);
484 tcg_gen_mov_i32(dest
, cpu_NF
);
487 /* dest = T0 - T1. Compute C, N, V and Z flags */
488 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
491 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
492 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
493 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
494 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
495 tmp
= tcg_temp_new_i32();
496 tcg_gen_xor_i32(tmp
, t0
, t1
);
497 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
498 tcg_temp_free_i32(tmp
);
499 tcg_gen_mov_i32(dest
, cpu_NF
);
502 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
503 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
505 TCGv_i32 tmp
= tcg_temp_new_i32();
506 tcg_gen_not_i32(tmp
, t1
);
507 gen_adc_CC(dest
, t0
, tmp
);
508 tcg_temp_free_i32(tmp
);
511 #define GEN_SHIFT(name) \
512 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
514 TCGv_i32 tmp1, tmp2, tmp3; \
515 tmp1 = tcg_temp_new_i32(); \
516 tcg_gen_andi_i32(tmp1, t1, 0xff); \
517 tmp2 = tcg_const_i32(0); \
518 tmp3 = tcg_const_i32(0x1f); \
519 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
520 tcg_temp_free_i32(tmp3); \
521 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
522 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
523 tcg_temp_free_i32(tmp2); \
524 tcg_temp_free_i32(tmp1); \
530 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
533 tmp1
= tcg_temp_new_i32();
534 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
535 tmp2
= tcg_const_i32(0x1f);
536 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
537 tcg_temp_free_i32(tmp2
);
538 tcg_gen_sar_i32(dest
, t0
, tmp1
);
539 tcg_temp_free_i32(tmp1
);
542 static void tcg_gen_abs_i32(TCGv_i32 dest
, TCGv_i32 src
)
544 TCGv_i32 c0
= tcg_const_i32(0);
545 TCGv_i32 tmp
= tcg_temp_new_i32();
546 tcg_gen_neg_i32(tmp
, src
);
547 tcg_gen_movcond_i32(TCG_COND_GT
, dest
, src
, c0
, src
, tmp
);
548 tcg_temp_free_i32(c0
);
549 tcg_temp_free_i32(tmp
);
552 static void shifter_out_im(TCGv_i32 var
, int shift
)
555 tcg_gen_andi_i32(cpu_CF
, var
, 1);
557 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
559 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
564 /* Shift by immediate. Includes special handling for shift == 0. */
565 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
566 int shift
, int flags
)
572 shifter_out_im(var
, 32 - shift
);
573 tcg_gen_shli_i32(var
, var
, shift
);
579 tcg_gen_shri_i32(cpu_CF
, var
, 31);
581 tcg_gen_movi_i32(var
, 0);
584 shifter_out_im(var
, shift
- 1);
585 tcg_gen_shri_i32(var
, var
, shift
);
592 shifter_out_im(var
, shift
- 1);
595 tcg_gen_sari_i32(var
, var
, shift
);
597 case 3: /* ROR/RRX */
600 shifter_out_im(var
, shift
- 1);
601 tcg_gen_rotri_i32(var
, var
, shift
); break;
603 TCGv_i32 tmp
= tcg_temp_new_i32();
604 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
606 shifter_out_im(var
, 0);
607 tcg_gen_shri_i32(var
, var
, 1);
608 tcg_gen_or_i32(var
, var
, tmp
);
609 tcg_temp_free_i32(tmp
);
614 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
615 TCGv_i32 shift
, int flags
)
619 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
620 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
621 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
622 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
627 gen_shl(var
, var
, shift
);
630 gen_shr(var
, var
, shift
);
633 gen_sar(var
, var
, shift
);
635 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
636 tcg_gen_rotr_i32(var
, var
, shift
); break;
639 tcg_temp_free_i32(shift
);
642 #define PAS_OP(pfx) \
644 case 0: gen_pas_helper(glue(pfx,add16)); break; \
645 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
646 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
647 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
648 case 4: gen_pas_helper(glue(pfx,add8)); break; \
649 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
651 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
656 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
658 tmp
= tcg_temp_new_ptr();
659 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
661 tcg_temp_free_ptr(tmp
);
664 tmp
= tcg_temp_new_ptr();
665 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
667 tcg_temp_free_ptr(tmp
);
669 #undef gen_pas_helper
670 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
683 #undef gen_pas_helper
688 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
689 #define PAS_OP(pfx) \
691 case 0: gen_pas_helper(glue(pfx,add8)); break; \
692 case 1: gen_pas_helper(glue(pfx,add16)); break; \
693 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
694 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
695 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
696 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
698 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
703 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
705 tmp
= tcg_temp_new_ptr();
706 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
708 tcg_temp_free_ptr(tmp
);
711 tmp
= tcg_temp_new_ptr();
712 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
714 tcg_temp_free_ptr(tmp
);
716 #undef gen_pas_helper
717 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
730 #undef gen_pas_helper
736 * Generate a conditional based on ARM condition code cc.
737 * This is common between ARM and Aarch64 targets.
739 void arm_test_cc(DisasCompare
*cmp
, int cc
)
770 case 8: /* hi: C && !Z */
771 case 9: /* ls: !C || Z -> !(C && !Z) */
773 value
= tcg_temp_new_i32();
775 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
776 ZF is non-zero for !Z; so AND the two subexpressions. */
777 tcg_gen_neg_i32(value
, cpu_CF
);
778 tcg_gen_and_i32(value
, value
, cpu_ZF
);
781 case 10: /* ge: N == V -> N ^ V == 0 */
782 case 11: /* lt: N != V -> N ^ V != 0 */
783 /* Since we're only interested in the sign bit, == 0 is >= 0. */
785 value
= tcg_temp_new_i32();
787 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
790 case 12: /* gt: !Z && N == V */
791 case 13: /* le: Z || N != V */
793 value
= tcg_temp_new_i32();
795 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
796 * the sign bit then AND with ZF to yield the result. */
797 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
798 tcg_gen_sari_i32(value
, value
, 31);
799 tcg_gen_andc_i32(value
, cpu_ZF
, value
);
802 case 14: /* always */
803 case 15: /* always */
804 /* Use the ALWAYS condition, which will fold early.
805 * It doesn't matter what we use for the value. */
806 cond
= TCG_COND_ALWAYS
;
811 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
816 cond
= tcg_invert_cond(cond
);
822 cmp
->value_global
= global
;
825 void arm_free_cc(DisasCompare
*cmp
)
827 if (!cmp
->value_global
) {
828 tcg_temp_free_i32(cmp
->value
);
832 void arm_jump_cc(DisasCompare
*cmp
, TCGLabel
*label
)
834 tcg_gen_brcondi_i32(cmp
->cond
, cmp
->value
, 0, label
);
837 void arm_gen_test_cc(int cc
, TCGLabel
*label
)
840 arm_test_cc(&cmp
, cc
);
841 arm_jump_cc(&cmp
, label
);
845 static const uint8_t table_logic_cc
[16] = {
864 /* Set PC and Thumb state from an immediate address. */
865 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
869 s
->is_jmp
= DISAS_JUMP
;
870 if (s
->thumb
!= (addr
& 1)) {
871 tmp
= tcg_temp_new_i32();
872 tcg_gen_movi_i32(tmp
, addr
& 1);
873 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
874 tcg_temp_free_i32(tmp
);
876 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
879 /* Set PC and Thumb state from var. var is marked as dead. */
880 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
882 s
->is_jmp
= DISAS_JUMP
;
883 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
884 tcg_gen_andi_i32(var
, var
, 1);
885 store_cpu_field(var
, thumb
);
888 /* Variant of store_reg which uses branch&exchange logic when storing
889 to r15 in ARM architecture v7 and above. The source must be a temporary
890 and will be marked as dead. */
891 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv_i32 var
)
893 if (reg
== 15 && ENABLE_ARCH_7
) {
896 store_reg(s
, reg
, var
);
900 /* Variant of store_reg which uses branch&exchange logic when storing
901 * to r15 in ARM architecture v5T and above. This is used for storing
902 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
903 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
904 static inline void store_reg_from_load(DisasContext
*s
, int reg
, TCGv_i32 var
)
906 if (reg
== 15 && ENABLE_ARCH_5
) {
909 store_reg(s
, reg
, var
);
913 /* Abstractions of "generate code to do a guest load/store for
914 * AArch32", where a vaddr is always 32 bits (and is zero
915 * extended if we're a 64 bit core) and data is also
916 * 32 bits unless specifically doing a 64 bit access.
917 * These functions work like tcg_gen_qemu_{ld,st}* except
918 * that the address argument is TCGv_i32 rather than TCGv.
920 #if TARGET_LONG_BITS == 32
922 #define DO_GEN_LD(SUFF, OPC) \
923 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
925 tcg_gen_qemu_ld_i32(val, addr, index, (OPC)); \
928 #define DO_GEN_ST(SUFF, OPC) \
929 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
931 tcg_gen_qemu_st_i32(val, addr, index, (OPC)); \
934 static inline void gen_aa32_ld64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
936 tcg_gen_qemu_ld_i64(val
, addr
, index
, MO_TEQ
);
939 static inline void gen_aa32_st64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
941 tcg_gen_qemu_st_i64(val
, addr
, index
, MO_TEQ
);
946 #define DO_GEN_LD(SUFF, OPC) \
947 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
949 TCGv addr64 = tcg_temp_new(); \
950 tcg_gen_extu_i32_i64(addr64, addr); \
951 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
952 tcg_temp_free(addr64); \
955 #define DO_GEN_ST(SUFF, OPC) \
956 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
958 TCGv addr64 = tcg_temp_new(); \
959 tcg_gen_extu_i32_i64(addr64, addr); \
960 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
961 tcg_temp_free(addr64); \
964 static inline void gen_aa32_ld64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
966 TCGv addr64
= tcg_temp_new();
967 tcg_gen_extu_i32_i64(addr64
, addr
);
968 tcg_gen_qemu_ld_i64(val
, addr64
, index
, MO_TEQ
);
969 tcg_temp_free(addr64
);
972 static inline void gen_aa32_st64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
974 TCGv addr64
= tcg_temp_new();
975 tcg_gen_extu_i32_i64(addr64
, addr
);
976 tcg_gen_qemu_st_i64(val
, addr64
, index
, MO_TEQ
);
977 tcg_temp_free(addr64
);
984 DO_GEN_LD(16s
, MO_TESW
)
985 DO_GEN_LD(16u, MO_TEUW
)
986 DO_GEN_LD(32u, MO_TEUL
)
987 /* 'a' variants include an alignment check */
988 DO_GEN_LD(16ua
, MO_TEUW
| MO_ALIGN
)
989 DO_GEN_LD(32ua
, MO_TEUL
| MO_ALIGN
)
991 DO_GEN_ST(16, MO_TEUW
)
992 DO_GEN_ST(32, MO_TEUL
)
994 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
996 tcg_gen_movi_i32(cpu_R
[15], val
);
999 static inline void gen_hvc(DisasContext
*s
, int imm16
)
1001 /* The pre HVC helper handles cases when HVC gets trapped
1002 * as an undefined insn by runtime configuration (ie before
1003 * the insn really executes).
1005 gen_set_pc_im(s
, s
->pc
- 4);
1006 gen_helper_pre_hvc(cpu_env
);
1007 /* Otherwise we will treat this as a real exception which
1008 * happens after execution of the insn. (The distinction matters
1009 * for the PC value reported to the exception handler and also
1010 * for single stepping.)
1013 gen_set_pc_im(s
, s
->pc
);
1014 s
->is_jmp
= DISAS_HVC
;
1017 static inline void gen_smc(DisasContext
*s
)
1019 /* As with HVC, we may take an exception either before or after
1020 * the insn executes.
1024 gen_set_pc_im(s
, s
->pc
- 4);
1025 tmp
= tcg_const_i32(syn_aa32_smc());
1026 gen_helper_pre_smc(cpu_env
, tmp
);
1027 tcg_temp_free_i32(tmp
);
1028 gen_set_pc_im(s
, s
->pc
);
1029 s
->is_jmp
= DISAS_SMC
;
1033 gen_set_condexec (DisasContext
*s
)
1035 if (s
->condexec_mask
) {
1036 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
1037 TCGv_i32 tmp
= tcg_temp_new_i32();
1038 tcg_gen_movi_i32(tmp
, val
);
1039 store_cpu_field(tmp
, condexec_bits
);
1043 static void gen_exception_internal_insn(DisasContext
*s
, int offset
, int excp
)
1045 gen_set_condexec(s
);
1046 gen_set_pc_im(s
, s
->pc
- offset
);
1047 gen_exception_internal(excp
);
1048 s
->is_jmp
= DISAS_JUMP
;
1051 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
,
1052 int syn
, uint32_t target_el
)
1054 gen_set_condexec(s
);
1055 gen_set_pc_im(s
, s
->pc
- offset
);
1056 gen_exception(excp
, syn
, target_el
);
1057 s
->is_jmp
= DISAS_JUMP
;
1060 /* Force a TB lookup after an instruction that changes the CPU state. */
1061 static inline void gen_lookup_tb(DisasContext
*s
)
1063 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
1064 s
->is_jmp
= DISAS_JUMP
;
1067 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
1070 int val
, rm
, shift
, shiftop
;
1073 if (!(insn
& (1 << 25))) {
1076 if (!(insn
& (1 << 23)))
1079 tcg_gen_addi_i32(var
, var
, val
);
1081 /* shift/register */
1083 shift
= (insn
>> 7) & 0x1f;
1084 shiftop
= (insn
>> 5) & 3;
1085 offset
= load_reg(s
, rm
);
1086 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
1087 if (!(insn
& (1 << 23)))
1088 tcg_gen_sub_i32(var
, var
, offset
);
1090 tcg_gen_add_i32(var
, var
, offset
);
1091 tcg_temp_free_i32(offset
);
1095 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
1096 int extra
, TCGv_i32 var
)
1101 if (insn
& (1 << 22)) {
1103 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
1104 if (!(insn
& (1 << 23)))
1108 tcg_gen_addi_i32(var
, var
, val
);
1112 tcg_gen_addi_i32(var
, var
, extra
);
1114 offset
= load_reg(s
, rm
);
1115 if (!(insn
& (1 << 23)))
1116 tcg_gen_sub_i32(var
, var
, offset
);
1118 tcg_gen_add_i32(var
, var
, offset
);
1119 tcg_temp_free_i32(offset
);
1123 static TCGv_ptr
get_fpstatus_ptr(int neon
)
1125 TCGv_ptr statusptr
= tcg_temp_new_ptr();
1128 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
1130 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
1132 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
1136 #define VFP_OP2(name) \
1137 static inline void gen_vfp_##name(int dp) \
1139 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1141 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1143 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1145 tcg_temp_free_ptr(fpst); \
1155 static inline void gen_vfp_F1_mul(int dp
)
1157 /* Like gen_vfp_mul() but put result in F1 */
1158 TCGv_ptr fpst
= get_fpstatus_ptr(0);
1160 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
1162 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
1164 tcg_temp_free_ptr(fpst
);
1167 static inline void gen_vfp_F1_neg(int dp
)
1169 /* Like gen_vfp_neg() but put result in F1 */
1171 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
1173 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
1177 static inline void gen_vfp_abs(int dp
)
1180 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
1182 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
1185 static inline void gen_vfp_neg(int dp
)
1188 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1190 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1193 static inline void gen_vfp_sqrt(int dp
)
1196 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
1198 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1201 static inline void gen_vfp_cmp(int dp
)
1204 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1206 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1209 static inline void gen_vfp_cmpe(int dp
)
1212 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1214 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1217 static inline void gen_vfp_F1_ld0(int dp
)
1220 tcg_gen_movi_i64(cpu_F1d
, 0);
1222 tcg_gen_movi_i32(cpu_F1s
, 0);
1225 #define VFP_GEN_ITOF(name) \
1226 static inline void gen_vfp_##name(int dp, int neon) \
1228 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1230 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1232 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1234 tcg_temp_free_ptr(statusptr); \
1241 #define VFP_GEN_FTOI(name) \
1242 static inline void gen_vfp_##name(int dp, int neon) \
1244 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1246 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1248 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1250 tcg_temp_free_ptr(statusptr); \
1259 #define VFP_GEN_FIX(name, round) \
1260 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1262 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1263 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1265 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1268 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1271 tcg_temp_free_i32(tmp_shift); \
1272 tcg_temp_free_ptr(statusptr); \
1274 VFP_GEN_FIX(tosh
, _round_to_zero
)
1275 VFP_GEN_FIX(tosl
, _round_to_zero
)
1276 VFP_GEN_FIX(touh
, _round_to_zero
)
1277 VFP_GEN_FIX(toul
, _round_to_zero
)
1284 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1287 gen_aa32_ld64(cpu_F0d
, addr
, get_mem_index(s
));
1289 gen_aa32_ld32u(cpu_F0s
, addr
, get_mem_index(s
));
1293 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1296 gen_aa32_st64(cpu_F0d
, addr
, get_mem_index(s
));
1298 gen_aa32_st32(cpu_F0s
, addr
, get_mem_index(s
));
1303 vfp_reg_offset (int dp
, int reg
)
1306 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1308 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1309 + offsetof(CPU_DoubleU
, l
.upper
);
1311 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1312 + offsetof(CPU_DoubleU
, l
.lower
);
1316 /* Return the offset of a 32-bit piece of a NEON register.
1317 zero is the least significant end of the register. */
1319 neon_reg_offset (int reg
, int n
)
1323 return vfp_reg_offset(0, sreg
);
1326 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1328 TCGv_i32 tmp
= tcg_temp_new_i32();
1329 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1333 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1335 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1336 tcg_temp_free_i32(var
);
1339 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1341 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1344 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1346 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1349 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1350 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1351 #define tcg_gen_st_f32 tcg_gen_st_i32
1352 #define tcg_gen_st_f64 tcg_gen_st_i64
1354 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1357 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1359 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1362 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1365 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1367 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1370 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1373 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1375 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1378 #define ARM_CP_RW_BIT (1 << 20)
1380 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1382 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1385 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1387 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1390 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1392 TCGv_i32 var
= tcg_temp_new_i32();
1393 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1397 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1399 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1400 tcg_temp_free_i32(var
);
1403 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1405 iwmmxt_store_reg(cpu_M0
, rn
);
1408 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1410 iwmmxt_load_reg(cpu_M0
, rn
);
1413 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1415 iwmmxt_load_reg(cpu_V1
, rn
);
1416 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1419 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1421 iwmmxt_load_reg(cpu_V1
, rn
);
1422 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1425 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1427 iwmmxt_load_reg(cpu_V1
, rn
);
1428 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1431 #define IWMMXT_OP(name) \
1432 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1434 iwmmxt_load_reg(cpu_V1, rn); \
1435 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1438 #define IWMMXT_OP_ENV(name) \
1439 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1441 iwmmxt_load_reg(cpu_V1, rn); \
1442 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1445 #define IWMMXT_OP_ENV_SIZE(name) \
1446 IWMMXT_OP_ENV(name##b) \
1447 IWMMXT_OP_ENV(name##w) \
1448 IWMMXT_OP_ENV(name##l)
1450 #define IWMMXT_OP_ENV1(name) \
1451 static inline void gen_op_iwmmxt_##name##_M0(void) \
1453 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1467 IWMMXT_OP_ENV_SIZE(unpackl
)
1468 IWMMXT_OP_ENV_SIZE(unpackh
)
1470 IWMMXT_OP_ENV1(unpacklub
)
1471 IWMMXT_OP_ENV1(unpackluw
)
1472 IWMMXT_OP_ENV1(unpacklul
)
1473 IWMMXT_OP_ENV1(unpackhub
)
1474 IWMMXT_OP_ENV1(unpackhuw
)
1475 IWMMXT_OP_ENV1(unpackhul
)
1476 IWMMXT_OP_ENV1(unpacklsb
)
1477 IWMMXT_OP_ENV1(unpacklsw
)
1478 IWMMXT_OP_ENV1(unpacklsl
)
1479 IWMMXT_OP_ENV1(unpackhsb
)
1480 IWMMXT_OP_ENV1(unpackhsw
)
1481 IWMMXT_OP_ENV1(unpackhsl
)
1483 IWMMXT_OP_ENV_SIZE(cmpeq
)
1484 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1485 IWMMXT_OP_ENV_SIZE(cmpgts
)
1487 IWMMXT_OP_ENV_SIZE(mins
)
1488 IWMMXT_OP_ENV_SIZE(minu
)
1489 IWMMXT_OP_ENV_SIZE(maxs
)
1490 IWMMXT_OP_ENV_SIZE(maxu
)
1492 IWMMXT_OP_ENV_SIZE(subn
)
1493 IWMMXT_OP_ENV_SIZE(addn
)
1494 IWMMXT_OP_ENV_SIZE(subu
)
1495 IWMMXT_OP_ENV_SIZE(addu
)
1496 IWMMXT_OP_ENV_SIZE(subs
)
1497 IWMMXT_OP_ENV_SIZE(adds
)
1499 IWMMXT_OP_ENV(avgb0
)
1500 IWMMXT_OP_ENV(avgb1
)
1501 IWMMXT_OP_ENV(avgw0
)
1502 IWMMXT_OP_ENV(avgw1
)
1504 IWMMXT_OP_ENV(packuw
)
1505 IWMMXT_OP_ENV(packul
)
1506 IWMMXT_OP_ENV(packuq
)
1507 IWMMXT_OP_ENV(packsw
)
1508 IWMMXT_OP_ENV(packsl
)
1509 IWMMXT_OP_ENV(packsq
)
1511 static void gen_op_iwmmxt_set_mup(void)
1514 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1515 tcg_gen_ori_i32(tmp
, tmp
, 2);
1516 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1519 static void gen_op_iwmmxt_set_cup(void)
1522 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1523 tcg_gen_ori_i32(tmp
, tmp
, 1);
1524 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1527 static void gen_op_iwmmxt_setpsr_nz(void)
1529 TCGv_i32 tmp
= tcg_temp_new_i32();
1530 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1531 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1534 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1536 iwmmxt_load_reg(cpu_V1
, rn
);
1537 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1538 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1541 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1548 rd
= (insn
>> 16) & 0xf;
1549 tmp
= load_reg(s
, rd
);
1551 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1552 if (insn
& (1 << 24)) {
1554 if (insn
& (1 << 23))
1555 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1557 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1558 tcg_gen_mov_i32(dest
, tmp
);
1559 if (insn
& (1 << 21))
1560 store_reg(s
, rd
, tmp
);
1562 tcg_temp_free_i32(tmp
);
1563 } else if (insn
& (1 << 21)) {
1565 tcg_gen_mov_i32(dest
, tmp
);
1566 if (insn
& (1 << 23))
1567 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1569 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1570 store_reg(s
, rd
, tmp
);
1571 } else if (!(insn
& (1 << 23)))
1576 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1578 int rd
= (insn
>> 0) & 0xf;
1581 if (insn
& (1 << 8)) {
1582 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1585 tmp
= iwmmxt_load_creg(rd
);
1588 tmp
= tcg_temp_new_i32();
1589 iwmmxt_load_reg(cpu_V0
, rd
);
1590 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
1592 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1593 tcg_gen_mov_i32(dest
, tmp
);
1594 tcg_temp_free_i32(tmp
);
1598 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1599 (ie. an undefined instruction). */
1600 static int disas_iwmmxt_insn(DisasContext
*s
, uint32_t insn
)
1603 int rdhi
, rdlo
, rd0
, rd1
, i
;
1605 TCGv_i32 tmp
, tmp2
, tmp3
;
1607 if ((insn
& 0x0e000e00) == 0x0c000000) {
1608 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1610 rdlo
= (insn
>> 12) & 0xf;
1611 rdhi
= (insn
>> 16) & 0xf;
1612 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1613 iwmmxt_load_reg(cpu_V0
, wrd
);
1614 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1615 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1616 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1617 } else { /* TMCRR */
1618 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1619 iwmmxt_store_reg(cpu_V0
, wrd
);
1620 gen_op_iwmmxt_set_mup();
1625 wrd
= (insn
>> 12) & 0xf;
1626 addr
= tcg_temp_new_i32();
1627 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1628 tcg_temp_free_i32(addr
);
1631 if (insn
& ARM_CP_RW_BIT
) {
1632 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1633 tmp
= tcg_temp_new_i32();
1634 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
1635 iwmmxt_store_creg(wrd
, tmp
);
1638 if (insn
& (1 << 8)) {
1639 if (insn
& (1 << 22)) { /* WLDRD */
1640 gen_aa32_ld64(cpu_M0
, addr
, get_mem_index(s
));
1642 } else { /* WLDRW wRd */
1643 tmp
= tcg_temp_new_i32();
1644 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
1647 tmp
= tcg_temp_new_i32();
1648 if (insn
& (1 << 22)) { /* WLDRH */
1649 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
1650 } else { /* WLDRB */
1651 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
1655 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1656 tcg_temp_free_i32(tmp
);
1658 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1661 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1662 tmp
= iwmmxt_load_creg(wrd
);
1663 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
1665 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1666 tmp
= tcg_temp_new_i32();
1667 if (insn
& (1 << 8)) {
1668 if (insn
& (1 << 22)) { /* WSTRD */
1669 gen_aa32_st64(cpu_M0
, addr
, get_mem_index(s
));
1670 } else { /* WSTRW wRd */
1671 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1672 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
1675 if (insn
& (1 << 22)) { /* WSTRH */
1676 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1677 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
1678 } else { /* WSTRB */
1679 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1680 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
1684 tcg_temp_free_i32(tmp
);
1686 tcg_temp_free_i32(addr
);
1690 if ((insn
& 0x0f000000) != 0x0e000000)
1693 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1694 case 0x000: /* WOR */
1695 wrd
= (insn
>> 12) & 0xf;
1696 rd0
= (insn
>> 0) & 0xf;
1697 rd1
= (insn
>> 16) & 0xf;
1698 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1699 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1700 gen_op_iwmmxt_setpsr_nz();
1701 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1702 gen_op_iwmmxt_set_mup();
1703 gen_op_iwmmxt_set_cup();
1705 case 0x011: /* TMCR */
1708 rd
= (insn
>> 12) & 0xf;
1709 wrd
= (insn
>> 16) & 0xf;
1711 case ARM_IWMMXT_wCID
:
1712 case ARM_IWMMXT_wCASF
:
1714 case ARM_IWMMXT_wCon
:
1715 gen_op_iwmmxt_set_cup();
1717 case ARM_IWMMXT_wCSSF
:
1718 tmp
= iwmmxt_load_creg(wrd
);
1719 tmp2
= load_reg(s
, rd
);
1720 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1721 tcg_temp_free_i32(tmp2
);
1722 iwmmxt_store_creg(wrd
, tmp
);
1724 case ARM_IWMMXT_wCGR0
:
1725 case ARM_IWMMXT_wCGR1
:
1726 case ARM_IWMMXT_wCGR2
:
1727 case ARM_IWMMXT_wCGR3
:
1728 gen_op_iwmmxt_set_cup();
1729 tmp
= load_reg(s
, rd
);
1730 iwmmxt_store_creg(wrd
, tmp
);
1736 case 0x100: /* WXOR */
1737 wrd
= (insn
>> 12) & 0xf;
1738 rd0
= (insn
>> 0) & 0xf;
1739 rd1
= (insn
>> 16) & 0xf;
1740 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1741 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1742 gen_op_iwmmxt_setpsr_nz();
1743 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1744 gen_op_iwmmxt_set_mup();
1745 gen_op_iwmmxt_set_cup();
1747 case 0x111: /* TMRC */
1750 rd
= (insn
>> 12) & 0xf;
1751 wrd
= (insn
>> 16) & 0xf;
1752 tmp
= iwmmxt_load_creg(wrd
);
1753 store_reg(s
, rd
, tmp
);
1755 case 0x300: /* WANDN */
1756 wrd
= (insn
>> 12) & 0xf;
1757 rd0
= (insn
>> 0) & 0xf;
1758 rd1
= (insn
>> 16) & 0xf;
1759 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1760 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1761 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1762 gen_op_iwmmxt_setpsr_nz();
1763 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1764 gen_op_iwmmxt_set_mup();
1765 gen_op_iwmmxt_set_cup();
1767 case 0x200: /* WAND */
1768 wrd
= (insn
>> 12) & 0xf;
1769 rd0
= (insn
>> 0) & 0xf;
1770 rd1
= (insn
>> 16) & 0xf;
1771 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1772 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1773 gen_op_iwmmxt_setpsr_nz();
1774 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1775 gen_op_iwmmxt_set_mup();
1776 gen_op_iwmmxt_set_cup();
1778 case 0x810: case 0xa10: /* WMADD */
1779 wrd
= (insn
>> 12) & 0xf;
1780 rd0
= (insn
>> 0) & 0xf;
1781 rd1
= (insn
>> 16) & 0xf;
1782 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1783 if (insn
& (1 << 21))
1784 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1786 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1787 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1788 gen_op_iwmmxt_set_mup();
1790 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1791 wrd
= (insn
>> 12) & 0xf;
1792 rd0
= (insn
>> 16) & 0xf;
1793 rd1
= (insn
>> 0) & 0xf;
1794 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1795 switch ((insn
>> 22) & 3) {
1797 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1800 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1803 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1808 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1809 gen_op_iwmmxt_set_mup();
1810 gen_op_iwmmxt_set_cup();
1812 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1813 wrd
= (insn
>> 12) & 0xf;
1814 rd0
= (insn
>> 16) & 0xf;
1815 rd1
= (insn
>> 0) & 0xf;
1816 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1817 switch ((insn
>> 22) & 3) {
1819 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1822 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1825 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1830 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1831 gen_op_iwmmxt_set_mup();
1832 gen_op_iwmmxt_set_cup();
1834 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1835 wrd
= (insn
>> 12) & 0xf;
1836 rd0
= (insn
>> 16) & 0xf;
1837 rd1
= (insn
>> 0) & 0xf;
1838 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1839 if (insn
& (1 << 22))
1840 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1842 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1843 if (!(insn
& (1 << 20)))
1844 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1845 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1846 gen_op_iwmmxt_set_mup();
1848 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1849 wrd
= (insn
>> 12) & 0xf;
1850 rd0
= (insn
>> 16) & 0xf;
1851 rd1
= (insn
>> 0) & 0xf;
1852 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1853 if (insn
& (1 << 21)) {
1854 if (insn
& (1 << 20))
1855 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1857 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1859 if (insn
& (1 << 20))
1860 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1862 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1864 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1865 gen_op_iwmmxt_set_mup();
1867 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1868 wrd
= (insn
>> 12) & 0xf;
1869 rd0
= (insn
>> 16) & 0xf;
1870 rd1
= (insn
>> 0) & 0xf;
1871 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1872 if (insn
& (1 << 21))
1873 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1875 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1876 if (!(insn
& (1 << 20))) {
1877 iwmmxt_load_reg(cpu_V1
, wrd
);
1878 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1880 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1881 gen_op_iwmmxt_set_mup();
1883 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1884 wrd
= (insn
>> 12) & 0xf;
1885 rd0
= (insn
>> 16) & 0xf;
1886 rd1
= (insn
>> 0) & 0xf;
1887 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1888 switch ((insn
>> 22) & 3) {
1890 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1893 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1896 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1901 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1902 gen_op_iwmmxt_set_mup();
1903 gen_op_iwmmxt_set_cup();
1905 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1906 wrd
= (insn
>> 12) & 0xf;
1907 rd0
= (insn
>> 16) & 0xf;
1908 rd1
= (insn
>> 0) & 0xf;
1909 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1910 if (insn
& (1 << 22)) {
1911 if (insn
& (1 << 20))
1912 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1914 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1916 if (insn
& (1 << 20))
1917 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1919 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1921 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1922 gen_op_iwmmxt_set_mup();
1923 gen_op_iwmmxt_set_cup();
1925 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1926 wrd
= (insn
>> 12) & 0xf;
1927 rd0
= (insn
>> 16) & 0xf;
1928 rd1
= (insn
>> 0) & 0xf;
1929 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1930 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1931 tcg_gen_andi_i32(tmp
, tmp
, 7);
1932 iwmmxt_load_reg(cpu_V1
, rd1
);
1933 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1934 tcg_temp_free_i32(tmp
);
1935 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1936 gen_op_iwmmxt_set_mup();
1938 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1939 if (((insn
>> 6) & 3) == 3)
1941 rd
= (insn
>> 12) & 0xf;
1942 wrd
= (insn
>> 16) & 0xf;
1943 tmp
= load_reg(s
, rd
);
1944 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1945 switch ((insn
>> 6) & 3) {
1947 tmp2
= tcg_const_i32(0xff);
1948 tmp3
= tcg_const_i32((insn
& 7) << 3);
1951 tmp2
= tcg_const_i32(0xffff);
1952 tmp3
= tcg_const_i32((insn
& 3) << 4);
1955 tmp2
= tcg_const_i32(0xffffffff);
1956 tmp3
= tcg_const_i32((insn
& 1) << 5);
1959 TCGV_UNUSED_I32(tmp2
);
1960 TCGV_UNUSED_I32(tmp3
);
1962 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1963 tcg_temp_free_i32(tmp3
);
1964 tcg_temp_free_i32(tmp2
);
1965 tcg_temp_free_i32(tmp
);
1966 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1967 gen_op_iwmmxt_set_mup();
1969 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1970 rd
= (insn
>> 12) & 0xf;
1971 wrd
= (insn
>> 16) & 0xf;
1972 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1974 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1975 tmp
= tcg_temp_new_i32();
1976 switch ((insn
>> 22) & 3) {
1978 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1979 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1981 tcg_gen_ext8s_i32(tmp
, tmp
);
1983 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1987 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1988 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1990 tcg_gen_ext16s_i32(tmp
, tmp
);
1992 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1996 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1997 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2000 store_reg(s
, rd
, tmp
);
2002 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2003 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2005 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2006 switch ((insn
>> 22) & 3) {
2008 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
2011 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
2014 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
2017 tcg_gen_shli_i32(tmp
, tmp
, 28);
2019 tcg_temp_free_i32(tmp
);
2021 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2022 if (((insn
>> 6) & 3) == 3)
2024 rd
= (insn
>> 12) & 0xf;
2025 wrd
= (insn
>> 16) & 0xf;
2026 tmp
= load_reg(s
, rd
);
2027 switch ((insn
>> 6) & 3) {
2029 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
2032 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
2035 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
2038 tcg_temp_free_i32(tmp
);
2039 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2040 gen_op_iwmmxt_set_mup();
2042 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2043 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2045 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2046 tmp2
= tcg_temp_new_i32();
2047 tcg_gen_mov_i32(tmp2
, tmp
);
2048 switch ((insn
>> 22) & 3) {
2050 for (i
= 0; i
< 7; i
++) {
2051 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2052 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2056 for (i
= 0; i
< 3; i
++) {
2057 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2058 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2062 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2063 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2067 tcg_temp_free_i32(tmp2
);
2068 tcg_temp_free_i32(tmp
);
2070 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2071 wrd
= (insn
>> 12) & 0xf;
2072 rd0
= (insn
>> 16) & 0xf;
2073 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2074 switch ((insn
>> 22) & 3) {
2076 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
2079 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
2082 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
2087 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2088 gen_op_iwmmxt_set_mup();
2090 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2091 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2093 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2094 tmp2
= tcg_temp_new_i32();
2095 tcg_gen_mov_i32(tmp2
, tmp
);
2096 switch ((insn
>> 22) & 3) {
2098 for (i
= 0; i
< 7; i
++) {
2099 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2100 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2104 for (i
= 0; i
< 3; i
++) {
2105 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2106 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2110 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2111 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2115 tcg_temp_free_i32(tmp2
);
2116 tcg_temp_free_i32(tmp
);
2118 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2119 rd
= (insn
>> 12) & 0xf;
2120 rd0
= (insn
>> 16) & 0xf;
2121 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
2123 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2124 tmp
= tcg_temp_new_i32();
2125 switch ((insn
>> 22) & 3) {
2127 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
2130 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
2133 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
2136 store_reg(s
, rd
, tmp
);
2138 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2139 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2140 wrd
= (insn
>> 12) & 0xf;
2141 rd0
= (insn
>> 16) & 0xf;
2142 rd1
= (insn
>> 0) & 0xf;
2143 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2144 switch ((insn
>> 22) & 3) {
2146 if (insn
& (1 << 21))
2147 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2149 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2152 if (insn
& (1 << 21))
2153 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2155 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2158 if (insn
& (1 << 21))
2159 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2161 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2166 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2167 gen_op_iwmmxt_set_mup();
2168 gen_op_iwmmxt_set_cup();
2170 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2171 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2172 wrd
= (insn
>> 12) & 0xf;
2173 rd0
= (insn
>> 16) & 0xf;
2174 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2175 switch ((insn
>> 22) & 3) {
2177 if (insn
& (1 << 21))
2178 gen_op_iwmmxt_unpacklsb_M0();
2180 gen_op_iwmmxt_unpacklub_M0();
2183 if (insn
& (1 << 21))
2184 gen_op_iwmmxt_unpacklsw_M0();
2186 gen_op_iwmmxt_unpackluw_M0();
2189 if (insn
& (1 << 21))
2190 gen_op_iwmmxt_unpacklsl_M0();
2192 gen_op_iwmmxt_unpacklul_M0();
2197 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2198 gen_op_iwmmxt_set_mup();
2199 gen_op_iwmmxt_set_cup();
2201 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2202 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2203 wrd
= (insn
>> 12) & 0xf;
2204 rd0
= (insn
>> 16) & 0xf;
2205 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2206 switch ((insn
>> 22) & 3) {
2208 if (insn
& (1 << 21))
2209 gen_op_iwmmxt_unpackhsb_M0();
2211 gen_op_iwmmxt_unpackhub_M0();
2214 if (insn
& (1 << 21))
2215 gen_op_iwmmxt_unpackhsw_M0();
2217 gen_op_iwmmxt_unpackhuw_M0();
2220 if (insn
& (1 << 21))
2221 gen_op_iwmmxt_unpackhsl_M0();
2223 gen_op_iwmmxt_unpackhul_M0();
2228 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2229 gen_op_iwmmxt_set_mup();
2230 gen_op_iwmmxt_set_cup();
2232 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2233 case 0x214: case 0x614: case 0xa14: case 0xe14:
2234 if (((insn
>> 22) & 3) == 0)
2236 wrd
= (insn
>> 12) & 0xf;
2237 rd0
= (insn
>> 16) & 0xf;
2238 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2239 tmp
= tcg_temp_new_i32();
2240 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2241 tcg_temp_free_i32(tmp
);
2244 switch ((insn
>> 22) & 3) {
2246 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2249 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2252 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2255 tcg_temp_free_i32(tmp
);
2256 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2257 gen_op_iwmmxt_set_mup();
2258 gen_op_iwmmxt_set_cup();
2260 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2261 case 0x014: case 0x414: case 0x814: case 0xc14:
2262 if (((insn
>> 22) & 3) == 0)
2264 wrd
= (insn
>> 12) & 0xf;
2265 rd0
= (insn
>> 16) & 0xf;
2266 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2267 tmp
= tcg_temp_new_i32();
2268 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2269 tcg_temp_free_i32(tmp
);
2272 switch ((insn
>> 22) & 3) {
2274 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2277 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2280 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2283 tcg_temp_free_i32(tmp
);
2284 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2285 gen_op_iwmmxt_set_mup();
2286 gen_op_iwmmxt_set_cup();
2288 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2289 case 0x114: case 0x514: case 0x914: case 0xd14:
2290 if (((insn
>> 22) & 3) == 0)
2292 wrd
= (insn
>> 12) & 0xf;
2293 rd0
= (insn
>> 16) & 0xf;
2294 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2295 tmp
= tcg_temp_new_i32();
2296 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2297 tcg_temp_free_i32(tmp
);
2300 switch ((insn
>> 22) & 3) {
2302 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2305 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2308 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2311 tcg_temp_free_i32(tmp
);
2312 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2313 gen_op_iwmmxt_set_mup();
2314 gen_op_iwmmxt_set_cup();
2316 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2317 case 0x314: case 0x714: case 0xb14: case 0xf14:
2318 if (((insn
>> 22) & 3) == 0)
2320 wrd
= (insn
>> 12) & 0xf;
2321 rd0
= (insn
>> 16) & 0xf;
2322 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2323 tmp
= tcg_temp_new_i32();
2324 switch ((insn
>> 22) & 3) {
2326 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2327 tcg_temp_free_i32(tmp
);
2330 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2333 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2334 tcg_temp_free_i32(tmp
);
2337 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2340 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2341 tcg_temp_free_i32(tmp
);
2344 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2347 tcg_temp_free_i32(tmp
);
2348 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2349 gen_op_iwmmxt_set_mup();
2350 gen_op_iwmmxt_set_cup();
2352 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2353 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2354 wrd
= (insn
>> 12) & 0xf;
2355 rd0
= (insn
>> 16) & 0xf;
2356 rd1
= (insn
>> 0) & 0xf;
2357 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2358 switch ((insn
>> 22) & 3) {
2360 if (insn
& (1 << 21))
2361 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2363 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2366 if (insn
& (1 << 21))
2367 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2369 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2372 if (insn
& (1 << 21))
2373 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2375 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2380 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2381 gen_op_iwmmxt_set_mup();
2383 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2384 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2385 wrd
= (insn
>> 12) & 0xf;
2386 rd0
= (insn
>> 16) & 0xf;
2387 rd1
= (insn
>> 0) & 0xf;
2388 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2389 switch ((insn
>> 22) & 3) {
2391 if (insn
& (1 << 21))
2392 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2394 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2397 if (insn
& (1 << 21))
2398 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2400 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2403 if (insn
& (1 << 21))
2404 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2406 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2411 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2412 gen_op_iwmmxt_set_mup();
2414 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2415 case 0x402: case 0x502: case 0x602: case 0x702:
2416 wrd
= (insn
>> 12) & 0xf;
2417 rd0
= (insn
>> 16) & 0xf;
2418 rd1
= (insn
>> 0) & 0xf;
2419 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2420 tmp
= tcg_const_i32((insn
>> 20) & 3);
2421 iwmmxt_load_reg(cpu_V1
, rd1
);
2422 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2423 tcg_temp_free_i32(tmp
);
2424 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2425 gen_op_iwmmxt_set_mup();
2427 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2428 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2429 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2430 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2431 wrd
= (insn
>> 12) & 0xf;
2432 rd0
= (insn
>> 16) & 0xf;
2433 rd1
= (insn
>> 0) & 0xf;
2434 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2435 switch ((insn
>> 20) & 0xf) {
2437 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2440 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2443 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2446 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2449 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2452 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2455 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2458 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2461 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2466 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2467 gen_op_iwmmxt_set_mup();
2468 gen_op_iwmmxt_set_cup();
2470 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2471 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2472 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2473 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2474 wrd
= (insn
>> 12) & 0xf;
2475 rd0
= (insn
>> 16) & 0xf;
2476 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2477 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2478 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2479 tcg_temp_free_i32(tmp
);
2480 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2481 gen_op_iwmmxt_set_mup();
2482 gen_op_iwmmxt_set_cup();
2484 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2485 case 0x418: case 0x518: case 0x618: case 0x718:
2486 case 0x818: case 0x918: case 0xa18: case 0xb18:
2487 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2488 wrd
= (insn
>> 12) & 0xf;
2489 rd0
= (insn
>> 16) & 0xf;
2490 rd1
= (insn
>> 0) & 0xf;
2491 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2492 switch ((insn
>> 20) & 0xf) {
2494 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2497 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2500 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2503 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2506 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2509 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2512 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2515 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2518 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2523 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2524 gen_op_iwmmxt_set_mup();
2525 gen_op_iwmmxt_set_cup();
2527 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2528 case 0x408: case 0x508: case 0x608: case 0x708:
2529 case 0x808: case 0x908: case 0xa08: case 0xb08:
2530 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2531 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2533 wrd
= (insn
>> 12) & 0xf;
2534 rd0
= (insn
>> 16) & 0xf;
2535 rd1
= (insn
>> 0) & 0xf;
2536 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2537 switch ((insn
>> 22) & 3) {
2539 if (insn
& (1 << 21))
2540 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2542 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2545 if (insn
& (1 << 21))
2546 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2548 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2551 if (insn
& (1 << 21))
2552 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2554 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2557 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2558 gen_op_iwmmxt_set_mup();
2559 gen_op_iwmmxt_set_cup();
2561 case 0x201: case 0x203: case 0x205: case 0x207:
2562 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2563 case 0x211: case 0x213: case 0x215: case 0x217:
2564 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2565 wrd
= (insn
>> 5) & 0xf;
2566 rd0
= (insn
>> 12) & 0xf;
2567 rd1
= (insn
>> 0) & 0xf;
2568 if (rd0
== 0xf || rd1
== 0xf)
2570 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2571 tmp
= load_reg(s
, rd0
);
2572 tmp2
= load_reg(s
, rd1
);
2573 switch ((insn
>> 16) & 0xf) {
2574 case 0x0: /* TMIA */
2575 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2577 case 0x8: /* TMIAPH */
2578 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2580 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2581 if (insn
& (1 << 16))
2582 tcg_gen_shri_i32(tmp
, tmp
, 16);
2583 if (insn
& (1 << 17))
2584 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2585 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2588 tcg_temp_free_i32(tmp2
);
2589 tcg_temp_free_i32(tmp
);
2592 tcg_temp_free_i32(tmp2
);
2593 tcg_temp_free_i32(tmp
);
2594 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2595 gen_op_iwmmxt_set_mup();
2604 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2605 (ie. an undefined instruction). */
2606 static int disas_dsp_insn(DisasContext
*s
, uint32_t insn
)
2608 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2611 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2612 /* Multiply with Internal Accumulate Format */
2613 rd0
= (insn
>> 12) & 0xf;
2615 acc
= (insn
>> 5) & 7;
2620 tmp
= load_reg(s
, rd0
);
2621 tmp2
= load_reg(s
, rd1
);
2622 switch ((insn
>> 16) & 0xf) {
2624 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2626 case 0x8: /* MIAPH */
2627 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2629 case 0xc: /* MIABB */
2630 case 0xd: /* MIABT */
2631 case 0xe: /* MIATB */
2632 case 0xf: /* MIATT */
2633 if (insn
& (1 << 16))
2634 tcg_gen_shri_i32(tmp
, tmp
, 16);
2635 if (insn
& (1 << 17))
2636 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2637 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2642 tcg_temp_free_i32(tmp2
);
2643 tcg_temp_free_i32(tmp
);
2645 gen_op_iwmmxt_movq_wRn_M0(acc
);
2649 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2650 /* Internal Accumulator Access Format */
2651 rdhi
= (insn
>> 16) & 0xf;
2652 rdlo
= (insn
>> 12) & 0xf;
2658 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2659 iwmmxt_load_reg(cpu_V0
, acc
);
2660 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2661 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2662 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2663 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2665 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2666 iwmmxt_store_reg(cpu_V0
, acc
);
2674 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2675 #define VFP_SREG(insn, bigbit, smallbit) \
2676 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2677 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2678 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2679 reg = (((insn) >> (bigbit)) & 0x0f) \
2680 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2682 if (insn & (1 << (smallbit))) \
2684 reg = ((insn) >> (bigbit)) & 0x0f; \
2687 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2688 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2689 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2690 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2691 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2692 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2694 /* Move between integer and VFP cores. */
2695 static TCGv_i32
gen_vfp_mrs(void)
2697 TCGv_i32 tmp
= tcg_temp_new_i32();
2698 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2702 static void gen_vfp_msr(TCGv_i32 tmp
)
2704 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2705 tcg_temp_free_i32(tmp
);
2708 static void gen_neon_dup_u8(TCGv_i32 var
, int shift
)
2710 TCGv_i32 tmp
= tcg_temp_new_i32();
2712 tcg_gen_shri_i32(var
, var
, shift
);
2713 tcg_gen_ext8u_i32(var
, var
);
2714 tcg_gen_shli_i32(tmp
, var
, 8);
2715 tcg_gen_or_i32(var
, var
, tmp
);
2716 tcg_gen_shli_i32(tmp
, var
, 16);
2717 tcg_gen_or_i32(var
, var
, tmp
);
2718 tcg_temp_free_i32(tmp
);
2721 static void gen_neon_dup_low16(TCGv_i32 var
)
2723 TCGv_i32 tmp
= tcg_temp_new_i32();
2724 tcg_gen_ext16u_i32(var
, var
);
2725 tcg_gen_shli_i32(tmp
, var
, 16);
2726 tcg_gen_or_i32(var
, var
, tmp
);
2727 tcg_temp_free_i32(tmp
);
2730 static void gen_neon_dup_high16(TCGv_i32 var
)
2732 TCGv_i32 tmp
= tcg_temp_new_i32();
2733 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2734 tcg_gen_shri_i32(tmp
, var
, 16);
2735 tcg_gen_or_i32(var
, var
, tmp
);
2736 tcg_temp_free_i32(tmp
);
2739 static TCGv_i32
gen_load_and_replicate(DisasContext
*s
, TCGv_i32 addr
, int size
)
2741 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2742 TCGv_i32 tmp
= tcg_temp_new_i32();
2745 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
2746 gen_neon_dup_u8(tmp
, 0);
2749 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
2750 gen_neon_dup_low16(tmp
);
2753 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
2755 default: /* Avoid compiler warnings. */
2761 static int handle_vsel(uint32_t insn
, uint32_t rd
, uint32_t rn
, uint32_t rm
,
2764 uint32_t cc
= extract32(insn
, 20, 2);
2767 TCGv_i64 frn
, frm
, dest
;
2768 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
2770 zero
= tcg_const_i64(0);
2772 frn
= tcg_temp_new_i64();
2773 frm
= tcg_temp_new_i64();
2774 dest
= tcg_temp_new_i64();
2776 zf
= tcg_temp_new_i64();
2777 nf
= tcg_temp_new_i64();
2778 vf
= tcg_temp_new_i64();
2780 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
2781 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
2782 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
2784 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2785 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2788 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
2792 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
2795 case 2: /* ge: N == V -> N ^ V == 0 */
2796 tmp
= tcg_temp_new_i64();
2797 tcg_gen_xor_i64(tmp
, vf
, nf
);
2798 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2800 tcg_temp_free_i64(tmp
);
2802 case 3: /* gt: !Z && N == V */
2803 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
2805 tmp
= tcg_temp_new_i64();
2806 tcg_gen_xor_i64(tmp
, vf
, nf
);
2807 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2809 tcg_temp_free_i64(tmp
);
2812 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2813 tcg_temp_free_i64(frn
);
2814 tcg_temp_free_i64(frm
);
2815 tcg_temp_free_i64(dest
);
2817 tcg_temp_free_i64(zf
);
2818 tcg_temp_free_i64(nf
);
2819 tcg_temp_free_i64(vf
);
2821 tcg_temp_free_i64(zero
);
2823 TCGv_i32 frn
, frm
, dest
;
2826 zero
= tcg_const_i32(0);
2828 frn
= tcg_temp_new_i32();
2829 frm
= tcg_temp_new_i32();
2830 dest
= tcg_temp_new_i32();
2831 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2832 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2835 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
2839 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
2842 case 2: /* ge: N == V -> N ^ V == 0 */
2843 tmp
= tcg_temp_new_i32();
2844 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2845 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2847 tcg_temp_free_i32(tmp
);
2849 case 3: /* gt: !Z && N == V */
2850 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
2852 tmp
= tcg_temp_new_i32();
2853 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2854 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2856 tcg_temp_free_i32(tmp
);
2859 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2860 tcg_temp_free_i32(frn
);
2861 tcg_temp_free_i32(frm
);
2862 tcg_temp_free_i32(dest
);
2864 tcg_temp_free_i32(zero
);
2870 static int handle_vminmaxnm(uint32_t insn
, uint32_t rd
, uint32_t rn
,
2871 uint32_t rm
, uint32_t dp
)
2873 uint32_t vmin
= extract32(insn
, 6, 1);
2874 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2877 TCGv_i64 frn
, frm
, dest
;
2879 frn
= tcg_temp_new_i64();
2880 frm
= tcg_temp_new_i64();
2881 dest
= tcg_temp_new_i64();
2883 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2884 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2886 gen_helper_vfp_minnumd(dest
, frn
, frm
, fpst
);
2888 gen_helper_vfp_maxnumd(dest
, frn
, frm
, fpst
);
2890 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2891 tcg_temp_free_i64(frn
);
2892 tcg_temp_free_i64(frm
);
2893 tcg_temp_free_i64(dest
);
2895 TCGv_i32 frn
, frm
, dest
;
2897 frn
= tcg_temp_new_i32();
2898 frm
= tcg_temp_new_i32();
2899 dest
= tcg_temp_new_i32();
2901 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2902 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2904 gen_helper_vfp_minnums(dest
, frn
, frm
, fpst
);
2906 gen_helper_vfp_maxnums(dest
, frn
, frm
, fpst
);
2908 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2909 tcg_temp_free_i32(frn
);
2910 tcg_temp_free_i32(frm
);
2911 tcg_temp_free_i32(dest
);
2914 tcg_temp_free_ptr(fpst
);
2918 static int handle_vrint(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
2921 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2924 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
2925 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2930 tcg_op
= tcg_temp_new_i64();
2931 tcg_res
= tcg_temp_new_i64();
2932 tcg_gen_ld_f64(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
2933 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
2934 tcg_gen_st_f64(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
2935 tcg_temp_free_i64(tcg_op
);
2936 tcg_temp_free_i64(tcg_res
);
2940 tcg_op
= tcg_temp_new_i32();
2941 tcg_res
= tcg_temp_new_i32();
2942 tcg_gen_ld_f32(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
2943 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
2944 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
2945 tcg_temp_free_i32(tcg_op
);
2946 tcg_temp_free_i32(tcg_res
);
2949 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2950 tcg_temp_free_i32(tcg_rmode
);
2952 tcg_temp_free_ptr(fpst
);
2956 static int handle_vcvt(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
2959 bool is_signed
= extract32(insn
, 7, 1);
2960 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2961 TCGv_i32 tcg_rmode
, tcg_shift
;
2963 tcg_shift
= tcg_const_i32(0);
2965 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
2966 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2969 TCGv_i64 tcg_double
, tcg_res
;
2971 /* Rd is encoded as a single precision register even when the source
2972 * is double precision.
2974 rd
= ((rd
<< 1) & 0x1e) | ((rd
>> 4) & 0x1);
2975 tcg_double
= tcg_temp_new_i64();
2976 tcg_res
= tcg_temp_new_i64();
2977 tcg_tmp
= tcg_temp_new_i32();
2978 tcg_gen_ld_f64(tcg_double
, cpu_env
, vfp_reg_offset(1, rm
));
2980 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
2982 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
2984 tcg_gen_extrl_i64_i32(tcg_tmp
, tcg_res
);
2985 tcg_gen_st_f32(tcg_tmp
, cpu_env
, vfp_reg_offset(0, rd
));
2986 tcg_temp_free_i32(tcg_tmp
);
2987 tcg_temp_free_i64(tcg_res
);
2988 tcg_temp_free_i64(tcg_double
);
2990 TCGv_i32 tcg_single
, tcg_res
;
2991 tcg_single
= tcg_temp_new_i32();
2992 tcg_res
= tcg_temp_new_i32();
2993 tcg_gen_ld_f32(tcg_single
, cpu_env
, vfp_reg_offset(0, rm
));
2995 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
2997 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
2999 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(0, rd
));
3000 tcg_temp_free_i32(tcg_res
);
3001 tcg_temp_free_i32(tcg_single
);
3004 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3005 tcg_temp_free_i32(tcg_rmode
);
3007 tcg_temp_free_i32(tcg_shift
);
3009 tcg_temp_free_ptr(fpst
);
3014 /* Table for converting the most common AArch32 encoding of
3015 * rounding mode to arm_fprounding order (which matches the
3016 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3018 static const uint8_t fp_decode_rm
[] = {
3025 static int disas_vfp_v8_insn(DisasContext
*s
, uint32_t insn
)
3027 uint32_t rd
, rn
, rm
, dp
= extract32(insn
, 8, 1);
3029 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
3034 VFP_DREG_D(rd
, insn
);
3035 VFP_DREG_N(rn
, insn
);
3036 VFP_DREG_M(rm
, insn
);
3038 rd
= VFP_SREG_D(insn
);
3039 rn
= VFP_SREG_N(insn
);
3040 rm
= VFP_SREG_M(insn
);
3043 if ((insn
& 0x0f800e50) == 0x0e000a00) {
3044 return handle_vsel(insn
, rd
, rn
, rm
, dp
);
3045 } else if ((insn
& 0x0fb00e10) == 0x0e800a00) {
3046 return handle_vminmaxnm(insn
, rd
, rn
, rm
, dp
);
3047 } else if ((insn
& 0x0fbc0ed0) == 0x0eb80a40) {
3048 /* VRINTA, VRINTN, VRINTP, VRINTM */
3049 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
3050 return handle_vrint(insn
, rd
, rm
, dp
, rounding
);
3051 } else if ((insn
& 0x0fbc0e50) == 0x0ebc0a40) {
3052 /* VCVTA, VCVTN, VCVTP, VCVTM */
3053 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
3054 return handle_vcvt(insn
, rd
, rm
, dp
, rounding
);
3059 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3060 (ie. an undefined instruction). */
3061 static int disas_vfp_insn(DisasContext
*s
, uint32_t insn
)
3063 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
3069 if (!arm_dc_feature(s
, ARM_FEATURE_VFP
)) {
3073 /* FIXME: this access check should not take precedence over UNDEF
3074 * for invalid encodings; we will generate incorrect syndrome information
3075 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3077 if (s
->fp_excp_el
) {
3078 gen_exception_insn(s
, 4, EXCP_UDEF
,
3079 syn_fp_access_trap(1, 0xe, s
->thumb
), s
->fp_excp_el
);
3083 if (!s
->vfp_enabled
) {
3084 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3085 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
3087 rn
= (insn
>> 16) & 0xf;
3088 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
&& rn
!= ARM_VFP_MVFR2
3089 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
) {
3094 if (extract32(insn
, 28, 4) == 0xf) {
3095 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3096 * only used in v8 and above.
3098 return disas_vfp_v8_insn(s
, insn
);
3101 dp
= ((insn
& 0xf00) == 0xb00);
3102 switch ((insn
>> 24) & 0xf) {
3104 if (insn
& (1 << 4)) {
3105 /* single register transfer */
3106 rd
= (insn
>> 12) & 0xf;
3111 VFP_DREG_N(rn
, insn
);
3114 if (insn
& 0x00c00060
3115 && !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
3119 pass
= (insn
>> 21) & 1;
3120 if (insn
& (1 << 22)) {
3122 offset
= ((insn
>> 5) & 3) * 8;
3123 } else if (insn
& (1 << 5)) {
3125 offset
= (insn
& (1 << 6)) ? 16 : 0;
3130 if (insn
& ARM_CP_RW_BIT
) {
3132 tmp
= neon_load_reg(rn
, pass
);
3136 tcg_gen_shri_i32(tmp
, tmp
, offset
);
3137 if (insn
& (1 << 23))
3143 if (insn
& (1 << 23)) {
3145 tcg_gen_shri_i32(tmp
, tmp
, 16);
3151 tcg_gen_sari_i32(tmp
, tmp
, 16);
3160 store_reg(s
, rd
, tmp
);
3163 tmp
= load_reg(s
, rd
);
3164 if (insn
& (1 << 23)) {
3167 gen_neon_dup_u8(tmp
, 0);
3168 } else if (size
== 1) {
3169 gen_neon_dup_low16(tmp
);
3171 for (n
= 0; n
<= pass
* 2; n
++) {
3172 tmp2
= tcg_temp_new_i32();
3173 tcg_gen_mov_i32(tmp2
, tmp
);
3174 neon_store_reg(rn
, n
, tmp2
);
3176 neon_store_reg(rn
, n
, tmp
);
3181 tmp2
= neon_load_reg(rn
, pass
);
3182 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
3183 tcg_temp_free_i32(tmp2
);
3186 tmp2
= neon_load_reg(rn
, pass
);
3187 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
3188 tcg_temp_free_i32(tmp2
);
3193 neon_store_reg(rn
, pass
, tmp
);
3197 if ((insn
& 0x6f) != 0x00)
3199 rn
= VFP_SREG_N(insn
);
3200 if (insn
& ARM_CP_RW_BIT
) {
3202 if (insn
& (1 << 21)) {
3203 /* system register */
3208 /* VFP2 allows access to FSID from userspace.
3209 VFP3 restricts all id registers to privileged
3212 && arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3215 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3220 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3222 case ARM_VFP_FPINST
:
3223 case ARM_VFP_FPINST2
:
3224 /* Not present in VFP3. */
3226 || arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3229 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3233 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
3234 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
3236 tmp
= tcg_temp_new_i32();
3237 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
3241 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
3248 || !arm_dc_feature(s
, ARM_FEATURE_MVFR
)) {
3251 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3257 gen_mov_F0_vreg(0, rn
);
3258 tmp
= gen_vfp_mrs();
3261 /* Set the 4 flag bits in the CPSR. */
3263 tcg_temp_free_i32(tmp
);
3265 store_reg(s
, rd
, tmp
);
3269 if (insn
& (1 << 21)) {
3271 /* system register */
3276 /* Writes are ignored. */
3279 tmp
= load_reg(s
, rd
);
3280 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
3281 tcg_temp_free_i32(tmp
);
3287 /* TODO: VFP subarchitecture support.
3288 * For now, keep the EN bit only */
3289 tmp
= load_reg(s
, rd
);
3290 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
3291 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3294 case ARM_VFP_FPINST
:
3295 case ARM_VFP_FPINST2
:
3299 tmp
= load_reg(s
, rd
);
3300 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3306 tmp
= load_reg(s
, rd
);
3308 gen_mov_vreg_F0(0, rn
);
3313 /* data processing */
3314 /* The opcode is in bits 23, 21, 20 and 6. */
3315 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
3319 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
3321 /* rn is register number */
3322 VFP_DREG_N(rn
, insn
);
3325 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18) ||
3326 ((rn
& 0x1e) == 0x6))) {
3327 /* Integer or single/half precision destination. */
3328 rd
= VFP_SREG_D(insn
);
3330 VFP_DREG_D(rd
, insn
);
3333 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14) ||
3334 ((rn
& 0x1e) == 0x4))) {
3335 /* VCVT from int or half precision is always from S reg
3336 * regardless of dp bit. VCVT with immediate frac_bits
3337 * has same format as SREG_M.
3339 rm
= VFP_SREG_M(insn
);
3341 VFP_DREG_M(rm
, insn
);
3344 rn
= VFP_SREG_N(insn
);
3345 if (op
== 15 && rn
== 15) {
3346 /* Double precision destination. */
3347 VFP_DREG_D(rd
, insn
);
3349 rd
= VFP_SREG_D(insn
);
3351 /* NB that we implicitly rely on the encoding for the frac_bits
3352 * in VCVT of fixed to float being the same as that of an SREG_M
3354 rm
= VFP_SREG_M(insn
);
3357 veclen
= s
->vec_len
;
3358 if (op
== 15 && rn
> 3)
3361 /* Shut up compiler warnings. */
3372 /* Figure out what type of vector operation this is. */
3373 if ((rd
& bank_mask
) == 0) {
3378 delta_d
= (s
->vec_stride
>> 1) + 1;
3380 delta_d
= s
->vec_stride
+ 1;
3382 if ((rm
& bank_mask
) == 0) {
3383 /* mixed scalar/vector */
3392 /* Load the initial operands. */
3397 /* Integer source */
3398 gen_mov_F0_vreg(0, rm
);
3403 gen_mov_F0_vreg(dp
, rd
);
3404 gen_mov_F1_vreg(dp
, rm
);
3408 /* Compare with zero */
3409 gen_mov_F0_vreg(dp
, rd
);
3420 /* Source and destination the same. */
3421 gen_mov_F0_vreg(dp
, rd
);
3427 /* VCVTB, VCVTT: only present with the halfprec extension
3428 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3429 * (we choose to UNDEF)
3431 if ((dp
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) ||
3432 !arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
)) {
3435 if (!extract32(rn
, 1, 1)) {
3436 /* Half precision source. */
3437 gen_mov_F0_vreg(0, rm
);
3440 /* Otherwise fall through */
3442 /* One source operand. */
3443 gen_mov_F0_vreg(dp
, rm
);
3447 /* Two source operands. */
3448 gen_mov_F0_vreg(dp
, rn
);
3449 gen_mov_F1_vreg(dp
, rm
);
3453 /* Perform the calculation. */
3455 case 0: /* VMLA: fd + (fn * fm) */
3456 /* Note that order of inputs to the add matters for NaNs */
3458 gen_mov_F0_vreg(dp
, rd
);
3461 case 1: /* VMLS: fd + -(fn * fm) */
3464 gen_mov_F0_vreg(dp
, rd
);
3467 case 2: /* VNMLS: -fd + (fn * fm) */
3468 /* Note that it isn't valid to replace (-A + B) with (B - A)
3469 * or similar plausible looking simplifications
3470 * because this will give wrong results for NaNs.
3473 gen_mov_F0_vreg(dp
, rd
);
3477 case 3: /* VNMLA: -fd + -(fn * fm) */
3480 gen_mov_F0_vreg(dp
, rd
);
3484 case 4: /* mul: fn * fm */
3487 case 5: /* nmul: -(fn * fm) */
3491 case 6: /* add: fn + fm */
3494 case 7: /* sub: fn - fm */
3497 case 8: /* div: fn / fm */
3500 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3501 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3502 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3503 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3504 /* These are fused multiply-add, and must be done as one
3505 * floating point operation with no rounding between the
3506 * multiplication and addition steps.
3507 * NB that doing the negations here as separate steps is
3508 * correct : an input NaN should come out with its sign bit
3509 * flipped if it is a negated-input.
3511 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
)) {
3519 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
3521 frd
= tcg_temp_new_i64();
3522 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3525 gen_helper_vfp_negd(frd
, frd
);
3527 fpst
= get_fpstatus_ptr(0);
3528 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
3529 cpu_F1d
, frd
, fpst
);
3530 tcg_temp_free_ptr(fpst
);
3531 tcg_temp_free_i64(frd
);
3537 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
3539 frd
= tcg_temp_new_i32();
3540 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3542 gen_helper_vfp_negs(frd
, frd
);
3544 fpst
= get_fpstatus_ptr(0);
3545 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
3546 cpu_F1s
, frd
, fpst
);
3547 tcg_temp_free_ptr(fpst
);
3548 tcg_temp_free_i32(frd
);
3551 case 14: /* fconst */
3552 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3556 n
= (insn
<< 12) & 0x80000000;
3557 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3564 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3571 tcg_gen_movi_i32(cpu_F0s
, n
);
3574 case 15: /* extension space */
3588 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3589 tmp
= gen_vfp_mrs();
3590 tcg_gen_ext16u_i32(tmp
, tmp
);
3592 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3595 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3598 tcg_temp_free_i32(tmp
);
3600 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3601 tmp
= gen_vfp_mrs();
3602 tcg_gen_shri_i32(tmp
, tmp
, 16);
3604 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3607 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3610 tcg_temp_free_i32(tmp
);
3612 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3613 tmp
= tcg_temp_new_i32();
3615 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3618 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3621 gen_mov_F0_vreg(0, rd
);
3622 tmp2
= gen_vfp_mrs();
3623 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3624 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3625 tcg_temp_free_i32(tmp2
);
3628 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3629 tmp
= tcg_temp_new_i32();
3631 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3634 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3637 tcg_gen_shli_i32(tmp
, tmp
, 16);
3638 gen_mov_F0_vreg(0, rd
);
3639 tmp2
= gen_vfp_mrs();
3640 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3641 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3642 tcg_temp_free_i32(tmp2
);
3654 case 11: /* cmpez */
3658 case 12: /* vrintr */
3660 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3662 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
3664 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
3666 tcg_temp_free_ptr(fpst
);
3669 case 13: /* vrintz */
3671 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3673 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3674 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3676 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
3678 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
3680 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3681 tcg_temp_free_i32(tcg_rmode
);
3682 tcg_temp_free_ptr(fpst
);
3685 case 14: /* vrintx */
3687 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3689 gen_helper_rintd_exact(cpu_F0d
, cpu_F0d
, fpst
);
3691 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpst
);
3693 tcg_temp_free_ptr(fpst
);
3696 case 15: /* single<->double conversion */
3698 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3700 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3702 case 16: /* fuito */
3703 gen_vfp_uito(dp
, 0);
3705 case 17: /* fsito */
3706 gen_vfp_sito(dp
, 0);
3708 case 20: /* fshto */
3709 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3712 gen_vfp_shto(dp
, 16 - rm
, 0);
3714 case 21: /* fslto */
3715 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3718 gen_vfp_slto(dp
, 32 - rm
, 0);
3720 case 22: /* fuhto */
3721 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3724 gen_vfp_uhto(dp
, 16 - rm
, 0);
3726 case 23: /* fulto */
3727 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3730 gen_vfp_ulto(dp
, 32 - rm
, 0);
3732 case 24: /* ftoui */
3733 gen_vfp_toui(dp
, 0);
3735 case 25: /* ftouiz */
3736 gen_vfp_touiz(dp
, 0);
3738 case 26: /* ftosi */
3739 gen_vfp_tosi(dp
, 0);
3741 case 27: /* ftosiz */
3742 gen_vfp_tosiz(dp
, 0);
3744 case 28: /* ftosh */
3745 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3748 gen_vfp_tosh(dp
, 16 - rm
, 0);
3750 case 29: /* ftosl */
3751 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3754 gen_vfp_tosl(dp
, 32 - rm
, 0);
3756 case 30: /* ftouh */
3757 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3760 gen_vfp_touh(dp
, 16 - rm
, 0);
3762 case 31: /* ftoul */
3763 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3766 gen_vfp_toul(dp
, 32 - rm
, 0);
3768 default: /* undefined */
3772 default: /* undefined */
3776 /* Write back the result. */
3777 if (op
== 15 && (rn
>= 8 && rn
<= 11)) {
3778 /* Comparison, do nothing. */
3779 } else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18 ||
3780 (rn
& 0x1e) == 0x6)) {
3781 /* VCVT double to int: always integer result.
3782 * VCVT double to half precision is always a single
3785 gen_mov_vreg_F0(0, rd
);
3786 } else if (op
== 15 && rn
== 15) {
3788 gen_mov_vreg_F0(!dp
, rd
);
3790 gen_mov_vreg_F0(dp
, rd
);
3793 /* break out of the loop if we have finished */
3797 if (op
== 15 && delta_m
== 0) {
3798 /* single source one-many */
3800 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3802 gen_mov_vreg_F0(dp
, rd
);
3806 /* Setup the next operands. */
3808 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3812 /* One source operand. */
3813 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3815 gen_mov_F0_vreg(dp
, rm
);
3817 /* Two source operands. */
3818 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3820 gen_mov_F0_vreg(dp
, rn
);
3822 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3824 gen_mov_F1_vreg(dp
, rm
);
3832 if ((insn
& 0x03e00000) == 0x00400000) {
3833 /* two-register transfer */
3834 rn
= (insn
>> 16) & 0xf;
3835 rd
= (insn
>> 12) & 0xf;
3837 VFP_DREG_M(rm
, insn
);
3839 rm
= VFP_SREG_M(insn
);
3842 if (insn
& ARM_CP_RW_BIT
) {
3845 gen_mov_F0_vreg(0, rm
* 2);
3846 tmp
= gen_vfp_mrs();
3847 store_reg(s
, rd
, tmp
);
3848 gen_mov_F0_vreg(0, rm
* 2 + 1);
3849 tmp
= gen_vfp_mrs();
3850 store_reg(s
, rn
, tmp
);
3852 gen_mov_F0_vreg(0, rm
);
3853 tmp
= gen_vfp_mrs();
3854 store_reg(s
, rd
, tmp
);
3855 gen_mov_F0_vreg(0, rm
+ 1);
3856 tmp
= gen_vfp_mrs();
3857 store_reg(s
, rn
, tmp
);
3862 tmp
= load_reg(s
, rd
);
3864 gen_mov_vreg_F0(0, rm
* 2);
3865 tmp
= load_reg(s
, rn
);
3867 gen_mov_vreg_F0(0, rm
* 2 + 1);
3869 tmp
= load_reg(s
, rd
);
3871 gen_mov_vreg_F0(0, rm
);
3872 tmp
= load_reg(s
, rn
);
3874 gen_mov_vreg_F0(0, rm
+ 1);
3879 rn
= (insn
>> 16) & 0xf;
3881 VFP_DREG_D(rd
, insn
);
3883 rd
= VFP_SREG_D(insn
);
3884 if ((insn
& 0x01200000) == 0x01000000) {
3885 /* Single load/store */
3886 offset
= (insn
& 0xff) << 2;
3887 if ((insn
& (1 << 23)) == 0)
3889 if (s
->thumb
&& rn
== 15) {
3890 /* This is actually UNPREDICTABLE */
3891 addr
= tcg_temp_new_i32();
3892 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3894 addr
= load_reg(s
, rn
);
3896 tcg_gen_addi_i32(addr
, addr
, offset
);
3897 if (insn
& (1 << 20)) {
3898 gen_vfp_ld(s
, dp
, addr
);
3899 gen_mov_vreg_F0(dp
, rd
);
3901 gen_mov_F0_vreg(dp
, rd
);
3902 gen_vfp_st(s
, dp
, addr
);
3904 tcg_temp_free_i32(addr
);
3906 /* load/store multiple */
3907 int w
= insn
& (1 << 21);
3909 n
= (insn
>> 1) & 0x7f;
3913 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
3914 /* P == U , W == 1 => UNDEF */
3917 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
3918 /* UNPREDICTABLE cases for bad immediates: we choose to
3919 * UNDEF to avoid generating huge numbers of TCG ops
3923 if (rn
== 15 && w
) {
3924 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3928 if (s
->thumb
&& rn
== 15) {
3929 /* This is actually UNPREDICTABLE */
3930 addr
= tcg_temp_new_i32();
3931 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3933 addr
= load_reg(s
, rn
);
3935 if (insn
& (1 << 24)) /* pre-decrement */
3936 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3942 for (i
= 0; i
< n
; i
++) {
3943 if (insn
& ARM_CP_RW_BIT
) {
3945 gen_vfp_ld(s
, dp
, addr
);
3946 gen_mov_vreg_F0(dp
, rd
+ i
);
3949 gen_mov_F0_vreg(dp
, rd
+ i
);
3950 gen_vfp_st(s
, dp
, addr
);
3952 tcg_gen_addi_i32(addr
, addr
, offset
);
3956 if (insn
& (1 << 24))
3957 offset
= -offset
* n
;
3958 else if (dp
&& (insn
& 1))
3964 tcg_gen_addi_i32(addr
, addr
, offset
);
3965 store_reg(s
, rn
, addr
);
3967 tcg_temp_free_i32(addr
);
3973 /* Should never happen. */
3979 static inline void gen_goto_tb(DisasContext
*s
, int n
, target_ulong dest
)
3981 TranslationBlock
*tb
;
3984 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3986 gen_set_pc_im(s
, dest
);
3987 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
3989 gen_set_pc_im(s
, dest
);
3994 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3996 if (unlikely(s
->singlestep_enabled
|| s
->ss_active
)) {
3997 /* An indirect jump so that we still trigger the debug exception. */
4002 gen_goto_tb(s
, 0, dest
);
4003 s
->is_jmp
= DISAS_TB_JUMP
;
4007 static inline void gen_mulxy(TCGv_i32 t0
, TCGv_i32 t1
, int x
, int y
)
4010 tcg_gen_sari_i32(t0
, t0
, 16);
4014 tcg_gen_sari_i32(t1
, t1
, 16);
4017 tcg_gen_mul_i32(t0
, t0
, t1
);
4020 /* Return the mask of PSR bits set by a MSR instruction. */
4021 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
)
4026 if (flags
& (1 << 0))
4028 if (flags
& (1 << 1))
4030 if (flags
& (1 << 2))
4032 if (flags
& (1 << 3))
4035 /* Mask out undefined bits. */
4036 mask
&= ~CPSR_RESERVED
;
4037 if (!arm_dc_feature(s
, ARM_FEATURE_V4T
)) {
4040 if (!arm_dc_feature(s
, ARM_FEATURE_V5
)) {
4041 mask
&= ~CPSR_Q
; /* V5TE in reality*/
4043 if (!arm_dc_feature(s
, ARM_FEATURE_V6
)) {
4044 mask
&= ~(CPSR_E
| CPSR_GE
);
4046 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB2
)) {
4049 /* Mask out execution state and reserved bits. */
4051 mask
&= ~(CPSR_EXEC
| CPSR_RESERVED
);
4053 /* Mask out privileged bits. */
4059 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4060 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv_i32 t0
)
4064 /* ??? This is also undefined in system mode. */
4068 tmp
= load_cpu_field(spsr
);
4069 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
4070 tcg_gen_andi_i32(t0
, t0
, mask
);
4071 tcg_gen_or_i32(tmp
, tmp
, t0
);
4072 store_cpu_field(tmp
, spsr
);
4074 gen_set_cpsr(t0
, mask
);
4076 tcg_temp_free_i32(t0
);
4081 /* Returns nonzero if access to the PSR is not permitted. */
4082 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
4085 tmp
= tcg_temp_new_i32();
4086 tcg_gen_movi_i32(tmp
, val
);
4087 return gen_set_psr(s
, mask
, spsr
, tmp
);
4090 /* Generate an old-style exception return. Marks pc as dead. */
4091 static void gen_exception_return(DisasContext
*s
, TCGv_i32 pc
)
4094 store_reg(s
, 15, pc
);
4095 tmp
= load_cpu_field(spsr
);
4096 gen_set_cpsr(tmp
, CPSR_ERET_MASK
);
4097 tcg_temp_free_i32(tmp
);
4098 s
->is_jmp
= DISAS_JUMP
;
4101 /* Generate a v6 exception return. Marks both values as dead. */
4102 static void gen_rfe(DisasContext
*s
, TCGv_i32 pc
, TCGv_i32 cpsr
)
4104 gen_set_cpsr(cpsr
, CPSR_ERET_MASK
);
4105 tcg_temp_free_i32(cpsr
);
4106 store_reg(s
, 15, pc
);
4107 s
->is_jmp
= DISAS_JUMP
;
4110 static void gen_nop_hint(DisasContext
*s
, int val
)
4114 gen_set_pc_im(s
, s
->pc
);
4115 s
->is_jmp
= DISAS_YIELD
;
4118 gen_set_pc_im(s
, s
->pc
);
4119 s
->is_jmp
= DISAS_WFI
;
4122 gen_set_pc_im(s
, s
->pc
);
4123 s
->is_jmp
= DISAS_WFE
;
4127 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4133 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4135 static inline void gen_neon_add(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
4138 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
4139 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
4140 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
4145 static inline void gen_neon_rsb(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
4148 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
4149 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
4150 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
4155 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4156 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4157 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4158 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4159 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4161 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4162 switch ((size << 1) | u) { \
4164 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4167 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4170 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4173 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4176 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4179 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4181 default: return 1; \
4184 #define GEN_NEON_INTEGER_OP(name) do { \
4185 switch ((size << 1) | u) { \
4187 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4190 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4193 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4196 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4199 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4202 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4204 default: return 1; \
4207 static TCGv_i32
neon_load_scratch(int scratch
)
4209 TCGv_i32 tmp
= tcg_temp_new_i32();
4210 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4214 static void neon_store_scratch(int scratch
, TCGv_i32 var
)
4216 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4217 tcg_temp_free_i32(var
);
4220 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
4224 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
4226 gen_neon_dup_high16(tmp
);
4228 gen_neon_dup_low16(tmp
);
4231 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
4236 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
4239 if (!q
&& size
== 2) {
4242 tmp
= tcg_const_i32(rd
);
4243 tmp2
= tcg_const_i32(rm
);
4247 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
4250 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
4253 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
4261 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
4264 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
4270 tcg_temp_free_i32(tmp
);
4271 tcg_temp_free_i32(tmp2
);
4275 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
4278 if (!q
&& size
== 2) {
4281 tmp
= tcg_const_i32(rd
);
4282 tmp2
= tcg_const_i32(rm
);
4286 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
4289 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
4292 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
4300 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
4303 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
4309 tcg_temp_free_i32(tmp
);
4310 tcg_temp_free_i32(tmp2
);
4314 static void gen_neon_trn_u8(TCGv_i32 t0
, TCGv_i32 t1
)
4318 rd
= tcg_temp_new_i32();
4319 tmp
= tcg_temp_new_i32();
4321 tcg_gen_shli_i32(rd
, t0
, 8);
4322 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
4323 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
4324 tcg_gen_or_i32(rd
, rd
, tmp
);
4326 tcg_gen_shri_i32(t1
, t1
, 8);
4327 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
4328 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
4329 tcg_gen_or_i32(t1
, t1
, tmp
);
4330 tcg_gen_mov_i32(t0
, rd
);
4332 tcg_temp_free_i32(tmp
);
4333 tcg_temp_free_i32(rd
);
4336 static void gen_neon_trn_u16(TCGv_i32 t0
, TCGv_i32 t1
)
4340 rd
= tcg_temp_new_i32();
4341 tmp
= tcg_temp_new_i32();
4343 tcg_gen_shli_i32(rd
, t0
, 16);
4344 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
4345 tcg_gen_or_i32(rd
, rd
, tmp
);
4346 tcg_gen_shri_i32(t1
, t1
, 16);
4347 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
4348 tcg_gen_or_i32(t1
, t1
, tmp
);
4349 tcg_gen_mov_i32(t0
, rd
);
4351 tcg_temp_free_i32(tmp
);
4352 tcg_temp_free_i32(rd
);
4360 } neon_ls_element_type
[11] = {
4374 /* Translate a NEON load/store element instruction. Return nonzero if the
4375 instruction is invalid. */
4376 static int disas_neon_ls_insn(DisasContext
*s
, uint32_t insn
)
4395 /* FIXME: this access check should not take precedence over UNDEF
4396 * for invalid encodings; we will generate incorrect syndrome information
4397 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4399 if (s
->fp_excp_el
) {
4400 gen_exception_insn(s
, 4, EXCP_UDEF
,
4401 syn_fp_access_trap(1, 0xe, s
->thumb
), s
->fp_excp_el
);
4405 if (!s
->vfp_enabled
)
4407 VFP_DREG_D(rd
, insn
);
4408 rn
= (insn
>> 16) & 0xf;
4410 load
= (insn
& (1 << 21)) != 0;
4411 if ((insn
& (1 << 23)) == 0) {
4412 /* Load store all elements. */
4413 op
= (insn
>> 8) & 0xf;
4414 size
= (insn
>> 6) & 3;
4417 /* Catch UNDEF cases for bad values of align field */
4420 if (((insn
>> 5) & 1) == 1) {
4425 if (((insn
>> 4) & 3) == 3) {
4432 nregs
= neon_ls_element_type
[op
].nregs
;
4433 interleave
= neon_ls_element_type
[op
].interleave
;
4434 spacing
= neon_ls_element_type
[op
].spacing
;
4435 if (size
== 3 && (interleave
| spacing
) != 1)
4437 addr
= tcg_temp_new_i32();
4438 load_reg_var(s
, addr
, rn
);
4439 stride
= (1 << size
) * interleave
;
4440 for (reg
= 0; reg
< nregs
; reg
++) {
4441 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
4442 load_reg_var(s
, addr
, rn
);
4443 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
4444 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
4445 load_reg_var(s
, addr
, rn
);
4446 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4449 tmp64
= tcg_temp_new_i64();
4451 gen_aa32_ld64(tmp64
, addr
, get_mem_index(s
));
4452 neon_store_reg64(tmp64
, rd
);
4454 neon_load_reg64(tmp64
, rd
);
4455 gen_aa32_st64(tmp64
, addr
, get_mem_index(s
));
4457 tcg_temp_free_i64(tmp64
);
4458 tcg_gen_addi_i32(addr
, addr
, stride
);
4460 for (pass
= 0; pass
< 2; pass
++) {
4463 tmp
= tcg_temp_new_i32();
4464 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
4465 neon_store_reg(rd
, pass
, tmp
);
4467 tmp
= neon_load_reg(rd
, pass
);
4468 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
4469 tcg_temp_free_i32(tmp
);
4471 tcg_gen_addi_i32(addr
, addr
, stride
);
4472 } else if (size
== 1) {
4474 tmp
= tcg_temp_new_i32();
4475 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
4476 tcg_gen_addi_i32(addr
, addr
, stride
);
4477 tmp2
= tcg_temp_new_i32();
4478 gen_aa32_ld16u(tmp2
, addr
, get_mem_index(s
));
4479 tcg_gen_addi_i32(addr
, addr
, stride
);
4480 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
4481 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4482 tcg_temp_free_i32(tmp2
);
4483 neon_store_reg(rd
, pass
, tmp
);
4485 tmp
= neon_load_reg(rd
, pass
);
4486 tmp2
= tcg_temp_new_i32();
4487 tcg_gen_shri_i32(tmp2
, tmp
, 16);
4488 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
4489 tcg_temp_free_i32(tmp
);
4490 tcg_gen_addi_i32(addr
, addr
, stride
);
4491 gen_aa32_st16(tmp2
, addr
, get_mem_index(s
));
4492 tcg_temp_free_i32(tmp2
);
4493 tcg_gen_addi_i32(addr
, addr
, stride
);
4495 } else /* size == 0 */ {
4497 TCGV_UNUSED_I32(tmp2
);
4498 for (n
= 0; n
< 4; n
++) {
4499 tmp
= tcg_temp_new_i32();
4500 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
4501 tcg_gen_addi_i32(addr
, addr
, stride
);
4505 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
4506 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
4507 tcg_temp_free_i32(tmp
);
4510 neon_store_reg(rd
, pass
, tmp2
);
4512 tmp2
= neon_load_reg(rd
, pass
);
4513 for (n
= 0; n
< 4; n
++) {
4514 tmp
= tcg_temp_new_i32();
4516 tcg_gen_mov_i32(tmp
, tmp2
);
4518 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
4520 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
4521 tcg_temp_free_i32(tmp
);
4522 tcg_gen_addi_i32(addr
, addr
, stride
);
4524 tcg_temp_free_i32(tmp2
);
4531 tcg_temp_free_i32(addr
);
4534 size
= (insn
>> 10) & 3;
4536 /* Load single element to all lanes. */
4537 int a
= (insn
>> 4) & 1;
4541 size
= (insn
>> 6) & 3;
4542 nregs
= ((insn
>> 8) & 3) + 1;
4545 if (nregs
!= 4 || a
== 0) {
4548 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4551 if (nregs
== 1 && a
== 1 && size
== 0) {
4554 if (nregs
== 3 && a
== 1) {
4557 addr
= tcg_temp_new_i32();
4558 load_reg_var(s
, addr
, rn
);
4560 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4561 tmp
= gen_load_and_replicate(s
, addr
, size
);
4562 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4563 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4564 if (insn
& (1 << 5)) {
4565 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
4566 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
4568 tcg_temp_free_i32(tmp
);
4570 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4571 stride
= (insn
& (1 << 5)) ? 2 : 1;
4572 for (reg
= 0; reg
< nregs
; reg
++) {
4573 tmp
= gen_load_and_replicate(s
, addr
, size
);
4574 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4575 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4576 tcg_temp_free_i32(tmp
);
4577 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4581 tcg_temp_free_i32(addr
);
4582 stride
= (1 << size
) * nregs
;
4584 /* Single element. */
4585 int idx
= (insn
>> 4) & 0xf;
4586 pass
= (insn
>> 7) & 1;
4589 shift
= ((insn
>> 5) & 3) * 8;
4593 shift
= ((insn
>> 6) & 1) * 16;
4594 stride
= (insn
& (1 << 5)) ? 2 : 1;
4598 stride
= (insn
& (1 << 6)) ? 2 : 1;
4603 nregs
= ((insn
>> 8) & 3) + 1;
4604 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4607 if (((idx
& (1 << size
)) != 0) ||
4608 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
4613 if ((idx
& 1) != 0) {
4618 if (size
== 2 && (idx
& 2) != 0) {
4623 if ((size
== 2) && ((idx
& 3) == 3)) {
4630 if ((rd
+ stride
* (nregs
- 1)) > 31) {
4631 /* Attempts to write off the end of the register file
4632 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4633 * the neon_load_reg() would write off the end of the array.
4637 addr
= tcg_temp_new_i32();
4638 load_reg_var(s
, addr
, rn
);
4639 for (reg
= 0; reg
< nregs
; reg
++) {
4641 tmp
= tcg_temp_new_i32();
4644 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
4647 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
4650 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
4652 default: /* Avoid compiler warnings. */
4656 tmp2
= neon_load_reg(rd
, pass
);
4657 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
,
4658 shift
, size
? 16 : 8);
4659 tcg_temp_free_i32(tmp2
);
4661 neon_store_reg(rd
, pass
, tmp
);
4662 } else { /* Store */
4663 tmp
= neon_load_reg(rd
, pass
);
4665 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4668 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
4671 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
4674 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
4677 tcg_temp_free_i32(tmp
);
4680 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4682 tcg_temp_free_i32(addr
);
4683 stride
= nregs
* (1 << size
);
4689 base
= load_reg(s
, rn
);
4691 tcg_gen_addi_i32(base
, base
, stride
);
4694 index
= load_reg(s
, rm
);
4695 tcg_gen_add_i32(base
, base
, index
);
4696 tcg_temp_free_i32(index
);
4698 store_reg(s
, rn
, base
);
4703 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4704 static void gen_neon_bsl(TCGv_i32 dest
, TCGv_i32 t
, TCGv_i32 f
, TCGv_i32 c
)
4706 tcg_gen_and_i32(t
, t
, c
);
4707 tcg_gen_andc_i32(f
, f
, c
);
4708 tcg_gen_or_i32(dest
, t
, f
);
4711 static inline void gen_neon_narrow(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4714 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4715 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4716 case 2: tcg_gen_extrl_i64_i32(dest
, src
); break;
4721 static inline void gen_neon_narrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4724 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4725 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4726 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4731 static inline void gen_neon_narrow_satu(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4734 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4735 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4736 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4741 static inline void gen_neon_unarrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4744 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4745 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4746 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4751 static inline void gen_neon_shift_narrow(int size
, TCGv_i32 var
, TCGv_i32 shift
,
4757 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4758 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4763 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4764 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4771 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4772 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4777 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4778 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4785 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv_i32 src
, int size
, int u
)
4789 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4790 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4791 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4796 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4797 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4798 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4802 tcg_temp_free_i32(src
);
4805 static inline void gen_neon_addl(int size
)
4808 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4809 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4810 case 2: tcg_gen_add_i64(CPU_V001
); break;
4815 static inline void gen_neon_subl(int size
)
4818 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4819 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4820 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4825 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4828 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4829 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4831 tcg_gen_neg_i64(var
, var
);
4837 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4840 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4841 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4846 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv_i32 a
, TCGv_i32 b
,
4851 switch ((size
<< 1) | u
) {
4852 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4853 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4854 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4855 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4857 tmp
= gen_muls_i64_i32(a
, b
);
4858 tcg_gen_mov_i64(dest
, tmp
);
4859 tcg_temp_free_i64(tmp
);
4862 tmp
= gen_mulu_i64_i32(a
, b
);
4863 tcg_gen_mov_i64(dest
, tmp
);
4864 tcg_temp_free_i64(tmp
);
4869 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4870 Don't forget to clean them now. */
4872 tcg_temp_free_i32(a
);
4873 tcg_temp_free_i32(b
);
4877 static void gen_neon_narrow_op(int op
, int u
, int size
,
4878 TCGv_i32 dest
, TCGv_i64 src
)
4882 gen_neon_unarrow_sats(size
, dest
, src
);
4884 gen_neon_narrow(size
, dest
, src
);
4888 gen_neon_narrow_satu(size
, dest
, src
);
4890 gen_neon_narrow_sats(size
, dest
, src
);
4895 /* Symbolic constants for op fields for Neon 3-register same-length.
4896 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4899 #define NEON_3R_VHADD 0
4900 #define NEON_3R_VQADD 1
4901 #define NEON_3R_VRHADD 2
4902 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4903 #define NEON_3R_VHSUB 4
4904 #define NEON_3R_VQSUB 5
4905 #define NEON_3R_VCGT 6
4906 #define NEON_3R_VCGE 7
4907 #define NEON_3R_VSHL 8
4908 #define NEON_3R_VQSHL 9
4909 #define NEON_3R_VRSHL 10
4910 #define NEON_3R_VQRSHL 11
4911 #define NEON_3R_VMAX 12
4912 #define NEON_3R_VMIN 13
4913 #define NEON_3R_VABD 14
4914 #define NEON_3R_VABA 15
4915 #define NEON_3R_VADD_VSUB 16
4916 #define NEON_3R_VTST_VCEQ 17
4917 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4918 #define NEON_3R_VMUL 19
4919 #define NEON_3R_VPMAX 20
4920 #define NEON_3R_VPMIN 21
4921 #define NEON_3R_VQDMULH_VQRDMULH 22
4922 #define NEON_3R_VPADD 23
4923 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4924 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4925 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4926 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4927 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4928 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4929 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4930 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4932 static const uint8_t neon_3r_sizes
[] = {
4933 [NEON_3R_VHADD
] = 0x7,
4934 [NEON_3R_VQADD
] = 0xf,
4935 [NEON_3R_VRHADD
] = 0x7,
4936 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4937 [NEON_3R_VHSUB
] = 0x7,
4938 [NEON_3R_VQSUB
] = 0xf,
4939 [NEON_3R_VCGT
] = 0x7,
4940 [NEON_3R_VCGE
] = 0x7,
4941 [NEON_3R_VSHL
] = 0xf,
4942 [NEON_3R_VQSHL
] = 0xf,
4943 [NEON_3R_VRSHL
] = 0xf,
4944 [NEON_3R_VQRSHL
] = 0xf,
4945 [NEON_3R_VMAX
] = 0x7,
4946 [NEON_3R_VMIN
] = 0x7,
4947 [NEON_3R_VABD
] = 0x7,
4948 [NEON_3R_VABA
] = 0x7,
4949 [NEON_3R_VADD_VSUB
] = 0xf,
4950 [NEON_3R_VTST_VCEQ
] = 0x7,
4951 [NEON_3R_VML
] = 0x7,
4952 [NEON_3R_VMUL
] = 0x7,
4953 [NEON_3R_VPMAX
] = 0x7,
4954 [NEON_3R_VPMIN
] = 0x7,
4955 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4956 [NEON_3R_VPADD
] = 0x7,
4957 [NEON_3R_SHA
] = 0xf, /* size field encodes op type */
4958 [NEON_3R_VFM
] = 0x5, /* size bit 1 encodes op */
4959 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4960 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4961 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4962 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4963 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4964 [NEON_3R_FLOAT_MISC
] = 0x5, /* size bit 1 encodes op */
4967 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4968 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4971 #define NEON_2RM_VREV64 0
4972 #define NEON_2RM_VREV32 1
4973 #define NEON_2RM_VREV16 2
4974 #define NEON_2RM_VPADDL 4
4975 #define NEON_2RM_VPADDL_U 5
4976 #define NEON_2RM_AESE 6 /* Includes AESD */
4977 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
4978 #define NEON_2RM_VCLS 8
4979 #define NEON_2RM_VCLZ 9
4980 #define NEON_2RM_VCNT 10
4981 #define NEON_2RM_VMVN 11
4982 #define NEON_2RM_VPADAL 12
4983 #define NEON_2RM_VPADAL_U 13
4984 #define NEON_2RM_VQABS 14
4985 #define NEON_2RM_VQNEG 15
4986 #define NEON_2RM_VCGT0 16
4987 #define NEON_2RM_VCGE0 17
4988 #define NEON_2RM_VCEQ0 18
4989 #define NEON_2RM_VCLE0 19
4990 #define NEON_2RM_VCLT0 20
4991 #define NEON_2RM_SHA1H 21
4992 #define NEON_2RM_VABS 22
4993 #define NEON_2RM_VNEG 23
4994 #define NEON_2RM_VCGT0_F 24
4995 #define NEON_2RM_VCGE0_F 25
4996 #define NEON_2RM_VCEQ0_F 26
4997 #define NEON_2RM_VCLE0_F 27
4998 #define NEON_2RM_VCLT0_F 28
4999 #define NEON_2RM_VABS_F 30
5000 #define NEON_2RM_VNEG_F 31
5001 #define NEON_2RM_VSWP 32
5002 #define NEON_2RM_VTRN 33
5003 #define NEON_2RM_VUZP 34
5004 #define NEON_2RM_VZIP 35
5005 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5006 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5007 #define NEON_2RM_VSHLL 38
5008 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5009 #define NEON_2RM_VRINTN 40
5010 #define NEON_2RM_VRINTX 41
5011 #define NEON_2RM_VRINTA 42
5012 #define NEON_2RM_VRINTZ 43
5013 #define NEON_2RM_VCVT_F16_F32 44
5014 #define NEON_2RM_VRINTM 45
5015 #define NEON_2RM_VCVT_F32_F16 46
5016 #define NEON_2RM_VRINTP 47
5017 #define NEON_2RM_VCVTAU 48
5018 #define NEON_2RM_VCVTAS 49
5019 #define NEON_2RM_VCVTNU 50
5020 #define NEON_2RM_VCVTNS 51
5021 #define NEON_2RM_VCVTPU 52
5022 #define NEON_2RM_VCVTPS 53
5023 #define NEON_2RM_VCVTMU 54
5024 #define NEON_2RM_VCVTMS 55
5025 #define NEON_2RM_VRECPE 56
5026 #define NEON_2RM_VRSQRTE 57
5027 #define NEON_2RM_VRECPE_F 58
5028 #define NEON_2RM_VRSQRTE_F 59
5029 #define NEON_2RM_VCVT_FS 60
5030 #define NEON_2RM_VCVT_FU 61
5031 #define NEON_2RM_VCVT_SF 62
5032 #define NEON_2RM_VCVT_UF 63
5034 static int neon_2rm_is_float_op(int op
)
5036 /* Return true if this neon 2reg-misc op is float-to-float */
5037 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
5038 (op
>= NEON_2RM_VRINTN
&& op
<= NEON_2RM_VRINTZ
) ||
5039 op
== NEON_2RM_VRINTM
||
5040 (op
>= NEON_2RM_VRINTP
&& op
<= NEON_2RM_VCVTMS
) ||
5041 op
>= NEON_2RM_VRECPE_F
);
5044 /* Each entry in this array has bit n set if the insn allows
5045 * size value n (otherwise it will UNDEF). Since unallocated
5046 * op values will have no bits set they always UNDEF.
5048 static const uint8_t neon_2rm_sizes
[] = {
5049 [NEON_2RM_VREV64
] = 0x7,
5050 [NEON_2RM_VREV32
] = 0x3,
5051 [NEON_2RM_VREV16
] = 0x1,
5052 [NEON_2RM_VPADDL
] = 0x7,
5053 [NEON_2RM_VPADDL_U
] = 0x7,
5054 [NEON_2RM_AESE
] = 0x1,
5055 [NEON_2RM_AESMC
] = 0x1,
5056 [NEON_2RM_VCLS
] = 0x7,
5057 [NEON_2RM_VCLZ
] = 0x7,
5058 [NEON_2RM_VCNT
] = 0x1,
5059 [NEON_2RM_VMVN
] = 0x1,
5060 [NEON_2RM_VPADAL
] = 0x7,
5061 [NEON_2RM_VPADAL_U
] = 0x7,
5062 [NEON_2RM_VQABS
] = 0x7,
5063 [NEON_2RM_VQNEG
] = 0x7,
5064 [NEON_2RM_VCGT0
] = 0x7,
5065 [NEON_2RM_VCGE0
] = 0x7,
5066 [NEON_2RM_VCEQ0
] = 0x7,
5067 [NEON_2RM_VCLE0
] = 0x7,
5068 [NEON_2RM_VCLT0
] = 0x7,
5069 [NEON_2RM_SHA1H
] = 0x4,
5070 [NEON_2RM_VABS
] = 0x7,
5071 [NEON_2RM_VNEG
] = 0x7,
5072 [NEON_2RM_VCGT0_F
] = 0x4,
5073 [NEON_2RM_VCGE0_F
] = 0x4,
5074 [NEON_2RM_VCEQ0_F
] = 0x4,
5075 [NEON_2RM_VCLE0_F
] = 0x4,
5076 [NEON_2RM_VCLT0_F
] = 0x4,
5077 [NEON_2RM_VABS_F
] = 0x4,
5078 [NEON_2RM_VNEG_F
] = 0x4,
5079 [NEON_2RM_VSWP
] = 0x1,
5080 [NEON_2RM_VTRN
] = 0x7,
5081 [NEON_2RM_VUZP
] = 0x7,
5082 [NEON_2RM_VZIP
] = 0x7,
5083 [NEON_2RM_VMOVN
] = 0x7,
5084 [NEON_2RM_VQMOVN
] = 0x7,
5085 [NEON_2RM_VSHLL
] = 0x7,
5086 [NEON_2RM_SHA1SU1
] = 0x4,
5087 [NEON_2RM_VRINTN
] = 0x4,
5088 [NEON_2RM_VRINTX
] = 0x4,
5089 [NEON_2RM_VRINTA
] = 0x4,
5090 [NEON_2RM_VRINTZ
] = 0x4,
5091 [NEON_2RM_VCVT_F16_F32
] = 0x2,
5092 [NEON_2RM_VRINTM
] = 0x4,
5093 [NEON_2RM_VCVT_F32_F16
] = 0x2,
5094 [NEON_2RM_VRINTP
] = 0x4,
5095 [NEON_2RM_VCVTAU
] = 0x4,
5096 [NEON_2RM_VCVTAS
] = 0x4,
5097 [NEON_2RM_VCVTNU
] = 0x4,
5098 [NEON_2RM_VCVTNS
] = 0x4,
5099 [NEON_2RM_VCVTPU
] = 0x4,
5100 [NEON_2RM_VCVTPS
] = 0x4,
5101 [NEON_2RM_VCVTMU
] = 0x4,
5102 [NEON_2RM_VCVTMS
] = 0x4,
5103 [NEON_2RM_VRECPE
] = 0x4,
5104 [NEON_2RM_VRSQRTE
] = 0x4,
5105 [NEON_2RM_VRECPE_F
] = 0x4,
5106 [NEON_2RM_VRSQRTE_F
] = 0x4,
5107 [NEON_2RM_VCVT_FS
] = 0x4,
5108 [NEON_2RM_VCVT_FU
] = 0x4,
5109 [NEON_2RM_VCVT_SF
] = 0x4,
5110 [NEON_2RM_VCVT_UF
] = 0x4,
5113 /* Translate a NEON data processing instruction. Return nonzero if the
5114 instruction is invalid.
5115 We process data in a mixture of 32-bit and 64-bit chunks.
5116 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5118 static int disas_neon_data_insn(DisasContext
*s
, uint32_t insn
)
5130 TCGv_i32 tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
5133 /* FIXME: this access check should not take precedence over UNDEF
5134 * for invalid encodings; we will generate incorrect syndrome information
5135 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5137 if (s
->fp_excp_el
) {
5138 gen_exception_insn(s
, 4, EXCP_UDEF
,
5139 syn_fp_access_trap(1, 0xe, s
->thumb
), s
->fp_excp_el
);
5143 if (!s
->vfp_enabled
)
5145 q
= (insn
& (1 << 6)) != 0;
5146 u
= (insn
>> 24) & 1;
5147 VFP_DREG_D(rd
, insn
);
5148 VFP_DREG_N(rn
, insn
);
5149 VFP_DREG_M(rm
, insn
);
5150 size
= (insn
>> 20) & 3;
5151 if ((insn
& (1 << 23)) == 0) {
5152 /* Three register same length. */
5153 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
5154 /* Catch invalid op and bad size combinations: UNDEF */
5155 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
5158 /* All insns of this form UNDEF for either this condition or the
5159 * superset of cases "Q==1"; we catch the latter later.
5161 if (q
&& ((rd
| rn
| rm
) & 1)) {
5165 * The SHA-1/SHA-256 3-register instructions require special treatment
5166 * here, as their size field is overloaded as an op type selector, and
5167 * they all consume their input in a single pass.
5169 if (op
== NEON_3R_SHA
) {
5173 if (!u
) { /* SHA-1 */
5174 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)) {
5177 tmp
= tcg_const_i32(rd
);
5178 tmp2
= tcg_const_i32(rn
);
5179 tmp3
= tcg_const_i32(rm
);
5180 tmp4
= tcg_const_i32(size
);
5181 gen_helper_crypto_sha1_3reg(cpu_env
, tmp
, tmp2
, tmp3
, tmp4
);
5182 tcg_temp_free_i32(tmp4
);
5183 } else { /* SHA-256 */
5184 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA256
) || size
== 3) {
5187 tmp
= tcg_const_i32(rd
);
5188 tmp2
= tcg_const_i32(rn
);
5189 tmp3
= tcg_const_i32(rm
);
5192 gen_helper_crypto_sha256h(cpu_env
, tmp
, tmp2
, tmp3
);
5195 gen_helper_crypto_sha256h2(cpu_env
, tmp
, tmp2
, tmp3
);
5198 gen_helper_crypto_sha256su1(cpu_env
, tmp
, tmp2
, tmp3
);
5202 tcg_temp_free_i32(tmp
);
5203 tcg_temp_free_i32(tmp2
);
5204 tcg_temp_free_i32(tmp3
);
5207 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
5208 /* 64-bit element instructions. */
5209 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5210 neon_load_reg64(cpu_V0
, rn
+ pass
);
5211 neon_load_reg64(cpu_V1
, rm
+ pass
);
5215 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
5218 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
5224 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
5227 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
5233 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5235 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5240 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5243 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5249 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5251 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5254 case NEON_3R_VQRSHL
:
5256 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
5259 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
5263 case NEON_3R_VADD_VSUB
:
5265 tcg_gen_sub_i64(CPU_V001
);
5267 tcg_gen_add_i64(CPU_V001
);
5273 neon_store_reg64(cpu_V0
, rd
+ pass
);
5282 case NEON_3R_VQRSHL
:
5285 /* Shift instruction operands are reversed. */
5300 case NEON_3R_FLOAT_ARITH
:
5301 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
5303 case NEON_3R_FLOAT_MINMAX
:
5304 pairwise
= u
; /* if VPMIN/VPMAX (float) */
5306 case NEON_3R_FLOAT_CMP
:
5308 /* no encoding for U=0 C=1x */
5312 case NEON_3R_FLOAT_ACMP
:
5317 case NEON_3R_FLOAT_MISC
:
5318 /* VMAXNM/VMINNM in ARMv8 */
5319 if (u
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
5324 if (u
&& (size
!= 0)) {
5325 /* UNDEF on invalid size for polynomial subcase */
5330 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
) || u
) {
5338 if (pairwise
&& q
) {
5339 /* All the pairwise insns UNDEF if Q is set */
5343 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5348 tmp
= neon_load_reg(rn
, 0);
5349 tmp2
= neon_load_reg(rn
, 1);
5351 tmp
= neon_load_reg(rm
, 0);
5352 tmp2
= neon_load_reg(rm
, 1);
5356 tmp
= neon_load_reg(rn
, pass
);
5357 tmp2
= neon_load_reg(rm
, pass
);
5361 GEN_NEON_INTEGER_OP(hadd
);
5364 GEN_NEON_INTEGER_OP_ENV(qadd
);
5366 case NEON_3R_VRHADD
:
5367 GEN_NEON_INTEGER_OP(rhadd
);
5369 case NEON_3R_LOGIC
: /* Logic ops. */
5370 switch ((u
<< 2) | size
) {
5372 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
5375 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
5378 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5381 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
5384 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
5387 tmp3
= neon_load_reg(rd
, pass
);
5388 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
5389 tcg_temp_free_i32(tmp3
);
5392 tmp3
= neon_load_reg(rd
, pass
);
5393 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
5394 tcg_temp_free_i32(tmp3
);
5397 tmp3
= neon_load_reg(rd
, pass
);
5398 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
5399 tcg_temp_free_i32(tmp3
);
5404 GEN_NEON_INTEGER_OP(hsub
);
5407 GEN_NEON_INTEGER_OP_ENV(qsub
);
5410 GEN_NEON_INTEGER_OP(cgt
);
5413 GEN_NEON_INTEGER_OP(cge
);
5416 GEN_NEON_INTEGER_OP(shl
);
5419 GEN_NEON_INTEGER_OP_ENV(qshl
);
5422 GEN_NEON_INTEGER_OP(rshl
);
5424 case NEON_3R_VQRSHL
:
5425 GEN_NEON_INTEGER_OP_ENV(qrshl
);
5428 GEN_NEON_INTEGER_OP(max
);
5431 GEN_NEON_INTEGER_OP(min
);
5434 GEN_NEON_INTEGER_OP(abd
);
5437 GEN_NEON_INTEGER_OP(abd
);
5438 tcg_temp_free_i32(tmp2
);
5439 tmp2
= neon_load_reg(rd
, pass
);
5440 gen_neon_add(size
, tmp
, tmp2
);
5442 case NEON_3R_VADD_VSUB
:
5443 if (!u
) { /* VADD */
5444 gen_neon_add(size
, tmp
, tmp2
);
5447 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
5448 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
5449 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
5454 case NEON_3R_VTST_VCEQ
:
5455 if (!u
) { /* VTST */
5457 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
5458 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
5459 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
5464 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5465 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5466 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5471 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
5473 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5474 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5475 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5478 tcg_temp_free_i32(tmp2
);
5479 tmp2
= neon_load_reg(rd
, pass
);
5481 gen_neon_rsb(size
, tmp
, tmp2
);
5483 gen_neon_add(size
, tmp
, tmp2
);
5487 if (u
) { /* polynomial */
5488 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
5489 } else { /* Integer */
5491 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5492 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5493 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5499 GEN_NEON_INTEGER_OP(pmax
);
5502 GEN_NEON_INTEGER_OP(pmin
);
5504 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
5505 if (!u
) { /* VQDMULH */
5508 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5511 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5515 } else { /* VQRDMULH */
5518 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5521 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5529 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
5530 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
5531 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
5535 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
5537 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5538 switch ((u
<< 2) | size
) {
5541 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5544 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
5547 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
5552 tcg_temp_free_ptr(fpstatus
);
5555 case NEON_3R_FLOAT_MULTIPLY
:
5557 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5558 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5560 tcg_temp_free_i32(tmp2
);
5561 tmp2
= neon_load_reg(rd
, pass
);
5563 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5565 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5568 tcg_temp_free_ptr(fpstatus
);
5571 case NEON_3R_FLOAT_CMP
:
5573 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5575 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
5578 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5580 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5583 tcg_temp_free_ptr(fpstatus
);
5586 case NEON_3R_FLOAT_ACMP
:
5588 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5590 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5592 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5594 tcg_temp_free_ptr(fpstatus
);
5597 case NEON_3R_FLOAT_MINMAX
:
5599 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5601 gen_helper_vfp_maxs(tmp
, tmp
, tmp2
, fpstatus
);
5603 gen_helper_vfp_mins(tmp
, tmp
, tmp2
, fpstatus
);
5605 tcg_temp_free_ptr(fpstatus
);
5608 case NEON_3R_FLOAT_MISC
:
5611 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5613 gen_helper_vfp_maxnums(tmp
, tmp
, tmp2
, fpstatus
);
5615 gen_helper_vfp_minnums(tmp
, tmp
, tmp2
, fpstatus
);
5617 tcg_temp_free_ptr(fpstatus
);
5620 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
5622 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
5628 /* VFMA, VFMS: fused multiply-add */
5629 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5630 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
5633 gen_helper_vfp_negs(tmp
, tmp
);
5635 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
5636 tcg_temp_free_i32(tmp3
);
5637 tcg_temp_free_ptr(fpstatus
);
5643 tcg_temp_free_i32(tmp2
);
5645 /* Save the result. For elementwise operations we can put it
5646 straight into the destination register. For pairwise operations
5647 we have to be careful to avoid clobbering the source operands. */
5648 if (pairwise
&& rd
== rm
) {
5649 neon_store_scratch(pass
, tmp
);
5651 neon_store_reg(rd
, pass
, tmp
);
5655 if (pairwise
&& rd
== rm
) {
5656 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5657 tmp
= neon_load_scratch(pass
);
5658 neon_store_reg(rd
, pass
, tmp
);
5661 /* End of 3 register same size operations. */
5662 } else if (insn
& (1 << 4)) {
5663 if ((insn
& 0x00380080) != 0) {
5664 /* Two registers and shift. */
5665 op
= (insn
>> 8) & 0xf;
5666 if (insn
& (1 << 7)) {
5674 while ((insn
& (1 << (size
+ 19))) == 0)
5677 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
5678 /* To avoid excessive duplication of ops we implement shift
5679 by immediate using the variable shift operations. */
5681 /* Shift by immediate:
5682 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5683 if (q
&& ((rd
| rm
) & 1)) {
5686 if (!u
&& (op
== 4 || op
== 6)) {
5689 /* Right shifts are encoded as N - shift, where N is the
5690 element size in bits. */
5692 shift
= shift
- (1 << (size
+ 3));
5700 imm
= (uint8_t) shift
;
5705 imm
= (uint16_t) shift
;
5716 for (pass
= 0; pass
< count
; pass
++) {
5718 neon_load_reg64(cpu_V0
, rm
+ pass
);
5719 tcg_gen_movi_i64(cpu_V1
, imm
);
5724 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5726 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5731 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5733 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5736 case 5: /* VSHL, VSLI */
5737 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5739 case 6: /* VQSHLU */
5740 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
5745 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5748 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5753 if (op
== 1 || op
== 3) {
5755 neon_load_reg64(cpu_V1
, rd
+ pass
);
5756 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5757 } else if (op
== 4 || (op
== 5 && u
)) {
5759 neon_load_reg64(cpu_V1
, rd
+ pass
);
5761 if (shift
< -63 || shift
> 63) {
5765 mask
= 0xffffffffffffffffull
>> -shift
;
5767 mask
= 0xffffffffffffffffull
<< shift
;
5770 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
5771 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5773 neon_store_reg64(cpu_V0
, rd
+ pass
);
5774 } else { /* size < 3 */
5775 /* Operands in T0 and T1. */
5776 tmp
= neon_load_reg(rm
, pass
);
5777 tmp2
= tcg_temp_new_i32();
5778 tcg_gen_movi_i32(tmp2
, imm
);
5782 GEN_NEON_INTEGER_OP(shl
);
5786 GEN_NEON_INTEGER_OP(rshl
);
5789 case 5: /* VSHL, VSLI */
5791 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
5792 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
5793 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
5797 case 6: /* VQSHLU */
5800 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5804 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5808 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5816 GEN_NEON_INTEGER_OP_ENV(qshl
);
5819 tcg_temp_free_i32(tmp2
);
5821 if (op
== 1 || op
== 3) {
5823 tmp2
= neon_load_reg(rd
, pass
);
5824 gen_neon_add(size
, tmp
, tmp2
);
5825 tcg_temp_free_i32(tmp2
);
5826 } else if (op
== 4 || (op
== 5 && u
)) {
5831 mask
= 0xff >> -shift
;
5833 mask
= (uint8_t)(0xff << shift
);
5839 mask
= 0xffff >> -shift
;
5841 mask
= (uint16_t)(0xffff << shift
);
5845 if (shift
< -31 || shift
> 31) {
5849 mask
= 0xffffffffu
>> -shift
;
5851 mask
= 0xffffffffu
<< shift
;
5857 tmp2
= neon_load_reg(rd
, pass
);
5858 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5859 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5860 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5861 tcg_temp_free_i32(tmp2
);
5863 neon_store_reg(rd
, pass
, tmp
);
5866 } else if (op
< 10) {
5867 /* Shift by immediate and narrow:
5868 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5869 int input_unsigned
= (op
== 8) ? !u
: u
;
5873 shift
= shift
- (1 << (size
+ 3));
5876 tmp64
= tcg_const_i64(shift
);
5877 neon_load_reg64(cpu_V0
, rm
);
5878 neon_load_reg64(cpu_V1
, rm
+ 1);
5879 for (pass
= 0; pass
< 2; pass
++) {
5887 if (input_unsigned
) {
5888 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5890 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5893 if (input_unsigned
) {
5894 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5896 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5899 tmp
= tcg_temp_new_i32();
5900 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5901 neon_store_reg(rd
, pass
, tmp
);
5903 tcg_temp_free_i64(tmp64
);
5906 imm
= (uint16_t)shift
;
5910 imm
= (uint32_t)shift
;
5912 tmp2
= tcg_const_i32(imm
);
5913 tmp4
= neon_load_reg(rm
+ 1, 0);
5914 tmp5
= neon_load_reg(rm
+ 1, 1);
5915 for (pass
= 0; pass
< 2; pass
++) {
5917 tmp
= neon_load_reg(rm
, 0);
5921 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5924 tmp3
= neon_load_reg(rm
, 1);
5928 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5930 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5931 tcg_temp_free_i32(tmp
);
5932 tcg_temp_free_i32(tmp3
);
5933 tmp
= tcg_temp_new_i32();
5934 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5935 neon_store_reg(rd
, pass
, tmp
);
5937 tcg_temp_free_i32(tmp2
);
5939 } else if (op
== 10) {
5941 if (q
|| (rd
& 1)) {
5944 tmp
= neon_load_reg(rm
, 0);
5945 tmp2
= neon_load_reg(rm
, 1);
5946 for (pass
= 0; pass
< 2; pass
++) {
5950 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5953 /* The shift is less than the width of the source
5954 type, so we can just shift the whole register. */
5955 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5956 /* Widen the result of shift: we need to clear
5957 * the potential overflow bits resulting from
5958 * left bits of the narrow input appearing as
5959 * right bits of left the neighbour narrow
5961 if (size
< 2 || !u
) {
5964 imm
= (0xffu
>> (8 - shift
));
5966 } else if (size
== 1) {
5967 imm
= 0xffff >> (16 - shift
);
5970 imm
= 0xffffffff >> (32 - shift
);
5973 imm64
= imm
| (((uint64_t)imm
) << 32);
5977 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5980 neon_store_reg64(cpu_V0
, rd
+ pass
);
5982 } else if (op
>= 14) {
5983 /* VCVT fixed-point. */
5984 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5987 /* We have already masked out the must-be-1 top bit of imm6,
5988 * hence this 32-shift where the ARM ARM has 64-imm6.
5991 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5992 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5995 gen_vfp_ulto(0, shift
, 1);
5997 gen_vfp_slto(0, shift
, 1);
6000 gen_vfp_toul(0, shift
, 1);
6002 gen_vfp_tosl(0, shift
, 1);
6004 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
6009 } else { /* (insn & 0x00380080) == 0 */
6011 if (q
&& (rd
& 1)) {
6015 op
= (insn
>> 8) & 0xf;
6016 /* One register and immediate. */
6017 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
6018 invert
= (insn
& (1 << 5)) != 0;
6019 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6020 * We choose to not special-case this and will behave as if a
6021 * valid constant encoding of 0 had been given.
6040 imm
= (imm
<< 8) | (imm
<< 24);
6043 imm
= (imm
<< 8) | 0xff;
6046 imm
= (imm
<< 16) | 0xffff;
6049 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
6057 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
6058 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
6064 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6065 if (op
& 1 && op
< 12) {
6066 tmp
= neon_load_reg(rd
, pass
);
6068 /* The immediate value has already been inverted, so
6070 tcg_gen_andi_i32(tmp
, tmp
, imm
);
6072 tcg_gen_ori_i32(tmp
, tmp
, imm
);
6076 tmp
= tcg_temp_new_i32();
6077 if (op
== 14 && invert
) {
6081 for (n
= 0; n
< 4; n
++) {
6082 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
6083 val
|= 0xff << (n
* 8);
6085 tcg_gen_movi_i32(tmp
, val
);
6087 tcg_gen_movi_i32(tmp
, imm
);
6090 neon_store_reg(rd
, pass
, tmp
);
6093 } else { /* (insn & 0x00800010 == 0x00800000) */
6095 op
= (insn
>> 8) & 0xf;
6096 if ((insn
& (1 << 6)) == 0) {
6097 /* Three registers of different lengths. */
6101 /* undefreq: bit 0 : UNDEF if size == 0
6102 * bit 1 : UNDEF if size == 1
6103 * bit 2 : UNDEF if size == 2
6104 * bit 3 : UNDEF if U == 1
6105 * Note that [2:0] set implies 'always UNDEF'
6108 /* prewiden, src1_wide, src2_wide, undefreq */
6109 static const int neon_3reg_wide
[16][4] = {
6110 {1, 0, 0, 0}, /* VADDL */
6111 {1, 1, 0, 0}, /* VADDW */
6112 {1, 0, 0, 0}, /* VSUBL */
6113 {1, 1, 0, 0}, /* VSUBW */
6114 {0, 1, 1, 0}, /* VADDHN */
6115 {0, 0, 0, 0}, /* VABAL */
6116 {0, 1, 1, 0}, /* VSUBHN */
6117 {0, 0, 0, 0}, /* VABDL */
6118 {0, 0, 0, 0}, /* VMLAL */
6119 {0, 0, 0, 9}, /* VQDMLAL */
6120 {0, 0, 0, 0}, /* VMLSL */
6121 {0, 0, 0, 9}, /* VQDMLSL */
6122 {0, 0, 0, 0}, /* Integer VMULL */
6123 {0, 0, 0, 1}, /* VQDMULL */
6124 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6125 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6128 prewiden
= neon_3reg_wide
[op
][0];
6129 src1_wide
= neon_3reg_wide
[op
][1];
6130 src2_wide
= neon_3reg_wide
[op
][2];
6131 undefreq
= neon_3reg_wide
[op
][3];
6133 if ((undefreq
& (1 << size
)) ||
6134 ((undefreq
& 8) && u
)) {
6137 if ((src1_wide
&& (rn
& 1)) ||
6138 (src2_wide
&& (rm
& 1)) ||
6139 (!src2_wide
&& (rd
& 1))) {
6143 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6144 * outside the loop below as it only performs a single pass.
6146 if (op
== 14 && size
== 2) {
6147 TCGv_i64 tcg_rn
, tcg_rm
, tcg_rd
;
6149 if (!arm_dc_feature(s
, ARM_FEATURE_V8_PMULL
)) {
6152 tcg_rn
= tcg_temp_new_i64();
6153 tcg_rm
= tcg_temp_new_i64();
6154 tcg_rd
= tcg_temp_new_i64();
6155 neon_load_reg64(tcg_rn
, rn
);
6156 neon_load_reg64(tcg_rm
, rm
);
6157 gen_helper_neon_pmull_64_lo(tcg_rd
, tcg_rn
, tcg_rm
);
6158 neon_store_reg64(tcg_rd
, rd
);
6159 gen_helper_neon_pmull_64_hi(tcg_rd
, tcg_rn
, tcg_rm
);
6160 neon_store_reg64(tcg_rd
, rd
+ 1);
6161 tcg_temp_free_i64(tcg_rn
);
6162 tcg_temp_free_i64(tcg_rm
);
6163 tcg_temp_free_i64(tcg_rd
);
6167 /* Avoid overlapping operands. Wide source operands are
6168 always aligned so will never overlap with wide
6169 destinations in problematic ways. */
6170 if (rd
== rm
&& !src2_wide
) {
6171 tmp
= neon_load_reg(rm
, 1);
6172 neon_store_scratch(2, tmp
);
6173 } else if (rd
== rn
&& !src1_wide
) {
6174 tmp
= neon_load_reg(rn
, 1);
6175 neon_store_scratch(2, tmp
);
6177 TCGV_UNUSED_I32(tmp3
);
6178 for (pass
= 0; pass
< 2; pass
++) {
6180 neon_load_reg64(cpu_V0
, rn
+ pass
);
6181 TCGV_UNUSED_I32(tmp
);
6183 if (pass
== 1 && rd
== rn
) {
6184 tmp
= neon_load_scratch(2);
6186 tmp
= neon_load_reg(rn
, pass
);
6189 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
6193 neon_load_reg64(cpu_V1
, rm
+ pass
);
6194 TCGV_UNUSED_I32(tmp2
);
6196 if (pass
== 1 && rd
== rm
) {
6197 tmp2
= neon_load_scratch(2);
6199 tmp2
= neon_load_reg(rm
, pass
);
6202 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
6206 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6207 gen_neon_addl(size
);
6209 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6210 gen_neon_subl(size
);
6212 case 5: case 7: /* VABAL, VABDL */
6213 switch ((size
<< 1) | u
) {
6215 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
6218 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
6221 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
6224 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
6227 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
6230 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
6234 tcg_temp_free_i32(tmp2
);
6235 tcg_temp_free_i32(tmp
);
6237 case 8: case 9: case 10: case 11: case 12: case 13:
6238 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6239 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6241 case 14: /* Polynomial VMULL */
6242 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
6243 tcg_temp_free_i32(tmp2
);
6244 tcg_temp_free_i32(tmp
);
6246 default: /* 15 is RESERVED: caught earlier */
6251 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6252 neon_store_reg64(cpu_V0
, rd
+ pass
);
6253 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
6255 neon_load_reg64(cpu_V1
, rd
+ pass
);
6257 case 10: /* VMLSL */
6258 gen_neon_negl(cpu_V0
, size
);
6260 case 5: case 8: /* VABAL, VMLAL */
6261 gen_neon_addl(size
);
6263 case 9: case 11: /* VQDMLAL, VQDMLSL */
6264 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6266 gen_neon_negl(cpu_V0
, size
);
6268 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6273 neon_store_reg64(cpu_V0
, rd
+ pass
);
6274 } else if (op
== 4 || op
== 6) {
6275 /* Narrowing operation. */
6276 tmp
= tcg_temp_new_i32();
6280 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
6283 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
6286 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6287 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
6294 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
6297 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
6300 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
6301 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6302 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
6310 neon_store_reg(rd
, 0, tmp3
);
6311 neon_store_reg(rd
, 1, tmp
);
6314 /* Write back the result. */
6315 neon_store_reg64(cpu_V0
, rd
+ pass
);
6319 /* Two registers and a scalar. NB that for ops of this form
6320 * the ARM ARM labels bit 24 as Q, but it is in our variable
6327 case 1: /* Float VMLA scalar */
6328 case 5: /* Floating point VMLS scalar */
6329 case 9: /* Floating point VMUL scalar */
6334 case 0: /* Integer VMLA scalar */
6335 case 4: /* Integer VMLS scalar */
6336 case 8: /* Integer VMUL scalar */
6337 case 12: /* VQDMULH scalar */
6338 case 13: /* VQRDMULH scalar */
6339 if (u
&& ((rd
| rn
) & 1)) {
6342 tmp
= neon_get_scalar(size
, rm
);
6343 neon_store_scratch(0, tmp
);
6344 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
6345 tmp
= neon_load_scratch(0);
6346 tmp2
= neon_load_reg(rn
, pass
);
6349 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6351 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6353 } else if (op
== 13) {
6355 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6357 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6359 } else if (op
& 1) {
6360 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6361 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
6362 tcg_temp_free_ptr(fpstatus
);
6365 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
6366 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
6367 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
6371 tcg_temp_free_i32(tmp2
);
6374 tmp2
= neon_load_reg(rd
, pass
);
6377 gen_neon_add(size
, tmp
, tmp2
);
6381 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6382 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
6383 tcg_temp_free_ptr(fpstatus
);
6387 gen_neon_rsb(size
, tmp
, tmp2
);
6391 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6392 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
6393 tcg_temp_free_ptr(fpstatus
);
6399 tcg_temp_free_i32(tmp2
);
6401 neon_store_reg(rd
, pass
, tmp
);
6404 case 3: /* VQDMLAL scalar */
6405 case 7: /* VQDMLSL scalar */
6406 case 11: /* VQDMULL scalar */
6411 case 2: /* VMLAL sclar */
6412 case 6: /* VMLSL scalar */
6413 case 10: /* VMULL scalar */
6417 tmp2
= neon_get_scalar(size
, rm
);
6418 /* We need a copy of tmp2 because gen_neon_mull
6419 * deletes it during pass 0. */
6420 tmp4
= tcg_temp_new_i32();
6421 tcg_gen_mov_i32(tmp4
, tmp2
);
6422 tmp3
= neon_load_reg(rn
, 1);
6424 for (pass
= 0; pass
< 2; pass
++) {
6426 tmp
= neon_load_reg(rn
, 0);
6431 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6433 neon_load_reg64(cpu_V1
, rd
+ pass
);
6437 gen_neon_negl(cpu_V0
, size
);
6440 gen_neon_addl(size
);
6443 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6445 gen_neon_negl(cpu_V0
, size
);
6447 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6453 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6458 neon_store_reg64(cpu_V0
, rd
+ pass
);
6463 default: /* 14 and 15 are RESERVED */
6467 } else { /* size == 3 */
6470 imm
= (insn
>> 8) & 0xf;
6475 if (q
&& ((rd
| rn
| rm
) & 1)) {
6480 neon_load_reg64(cpu_V0
, rn
);
6482 neon_load_reg64(cpu_V1
, rn
+ 1);
6484 } else if (imm
== 8) {
6485 neon_load_reg64(cpu_V0
, rn
+ 1);
6487 neon_load_reg64(cpu_V1
, rm
);
6490 tmp64
= tcg_temp_new_i64();
6492 neon_load_reg64(cpu_V0
, rn
);
6493 neon_load_reg64(tmp64
, rn
+ 1);
6495 neon_load_reg64(cpu_V0
, rn
+ 1);
6496 neon_load_reg64(tmp64
, rm
);
6498 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
6499 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
6500 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6502 neon_load_reg64(cpu_V1
, rm
);
6504 neon_load_reg64(cpu_V1
, rm
+ 1);
6507 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6508 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
6509 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
6510 tcg_temp_free_i64(tmp64
);
6513 neon_load_reg64(cpu_V0
, rn
);
6514 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
6515 neon_load_reg64(cpu_V1
, rm
);
6516 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6517 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6519 neon_store_reg64(cpu_V0
, rd
);
6521 neon_store_reg64(cpu_V1
, rd
+ 1);
6523 } else if ((insn
& (1 << 11)) == 0) {
6524 /* Two register misc. */
6525 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
6526 size
= (insn
>> 18) & 3;
6527 /* UNDEF for unknown op values and bad op-size combinations */
6528 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
6531 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
6532 q
&& ((rm
| rd
) & 1)) {
6536 case NEON_2RM_VREV64
:
6537 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
6538 tmp
= neon_load_reg(rm
, pass
* 2);
6539 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
6541 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6542 case 1: gen_swap_half(tmp
); break;
6543 case 2: /* no-op */ break;
6546 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
6548 neon_store_reg(rd
, pass
* 2, tmp2
);
6551 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
6552 case 1: gen_swap_half(tmp2
); break;
6555 neon_store_reg(rd
, pass
* 2, tmp2
);
6559 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
6560 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
6561 for (pass
= 0; pass
< q
+ 1; pass
++) {
6562 tmp
= neon_load_reg(rm
, pass
* 2);
6563 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
6564 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
6565 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
6567 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
6568 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
6569 case 2: tcg_gen_add_i64(CPU_V001
); break;
6572 if (op
>= NEON_2RM_VPADAL
) {
6574 neon_load_reg64(cpu_V1
, rd
+ pass
);
6575 gen_neon_addl(size
);
6577 neon_store_reg64(cpu_V0
, rd
+ pass
);
6583 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
6584 tmp
= neon_load_reg(rm
, n
);
6585 tmp2
= neon_load_reg(rd
, n
+ 1);
6586 neon_store_reg(rm
, n
, tmp2
);
6587 neon_store_reg(rd
, n
+ 1, tmp
);
6594 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
6599 if (gen_neon_zip(rd
, rm
, size
, q
)) {
6603 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
6604 /* also VQMOVUN; op field and mnemonics don't line up */
6608 TCGV_UNUSED_I32(tmp2
);
6609 for (pass
= 0; pass
< 2; pass
++) {
6610 neon_load_reg64(cpu_V0
, rm
+ pass
);
6611 tmp
= tcg_temp_new_i32();
6612 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
6617 neon_store_reg(rd
, 0, tmp2
);
6618 neon_store_reg(rd
, 1, tmp
);
6622 case NEON_2RM_VSHLL
:
6623 if (q
|| (rd
& 1)) {
6626 tmp
= neon_load_reg(rm
, 0);
6627 tmp2
= neon_load_reg(rm
, 1);
6628 for (pass
= 0; pass
< 2; pass
++) {
6631 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
6632 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
6633 neon_store_reg64(cpu_V0
, rd
+ pass
);
6636 case NEON_2RM_VCVT_F16_F32
:
6637 if (!arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
) ||
6641 tmp
= tcg_temp_new_i32();
6642 tmp2
= tcg_temp_new_i32();
6643 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
6644 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
6645 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
6646 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
6647 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6648 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6649 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
6650 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
6651 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
6652 neon_store_reg(rd
, 0, tmp2
);
6653 tmp2
= tcg_temp_new_i32();
6654 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
6655 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6656 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6657 neon_store_reg(rd
, 1, tmp2
);
6658 tcg_temp_free_i32(tmp
);
6660 case NEON_2RM_VCVT_F32_F16
:
6661 if (!arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
) ||
6665 tmp3
= tcg_temp_new_i32();
6666 tmp
= neon_load_reg(rm
, 0);
6667 tmp2
= neon_load_reg(rm
, 1);
6668 tcg_gen_ext16u_i32(tmp3
, tmp
);
6669 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6670 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
6671 tcg_gen_shri_i32(tmp3
, tmp
, 16);
6672 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6673 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
6674 tcg_temp_free_i32(tmp
);
6675 tcg_gen_ext16u_i32(tmp3
, tmp2
);
6676 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6677 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
6678 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
6679 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6680 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
6681 tcg_temp_free_i32(tmp2
);
6682 tcg_temp_free_i32(tmp3
);
6684 case NEON_2RM_AESE
: case NEON_2RM_AESMC
:
6685 if (!arm_dc_feature(s
, ARM_FEATURE_V8_AES
)
6686 || ((rm
| rd
) & 1)) {
6689 tmp
= tcg_const_i32(rd
);
6690 tmp2
= tcg_const_i32(rm
);
6692 /* Bit 6 is the lowest opcode bit; it distinguishes between
6693 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6695 tmp3
= tcg_const_i32(extract32(insn
, 6, 1));
6697 if (op
== NEON_2RM_AESE
) {
6698 gen_helper_crypto_aese(cpu_env
, tmp
, tmp2
, tmp3
);
6700 gen_helper_crypto_aesmc(cpu_env
, tmp
, tmp2
, tmp3
);
6702 tcg_temp_free_i32(tmp
);
6703 tcg_temp_free_i32(tmp2
);
6704 tcg_temp_free_i32(tmp3
);
6706 case NEON_2RM_SHA1H
:
6707 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)
6708 || ((rm
| rd
) & 1)) {
6711 tmp
= tcg_const_i32(rd
);
6712 tmp2
= tcg_const_i32(rm
);
6714 gen_helper_crypto_sha1h(cpu_env
, tmp
, tmp2
);
6716 tcg_temp_free_i32(tmp
);
6717 tcg_temp_free_i32(tmp2
);
6719 case NEON_2RM_SHA1SU1
:
6720 if ((rm
| rd
) & 1) {
6723 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6725 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA256
)) {
6728 } else if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)) {
6731 tmp
= tcg_const_i32(rd
);
6732 tmp2
= tcg_const_i32(rm
);
6734 gen_helper_crypto_sha256su0(cpu_env
, tmp
, tmp2
);
6736 gen_helper_crypto_sha1su1(cpu_env
, tmp
, tmp2
);
6738 tcg_temp_free_i32(tmp
);
6739 tcg_temp_free_i32(tmp2
);
6743 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6744 if (neon_2rm_is_float_op(op
)) {
6745 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
6746 neon_reg_offset(rm
, pass
));
6747 TCGV_UNUSED_I32(tmp
);
6749 tmp
= neon_load_reg(rm
, pass
);
6752 case NEON_2RM_VREV32
:
6754 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6755 case 1: gen_swap_half(tmp
); break;
6759 case NEON_2RM_VREV16
:
6764 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
6765 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
6766 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
6772 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
6773 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
6774 case 2: gen_helper_clz(tmp
, tmp
); break;
6779 gen_helper_neon_cnt_u8(tmp
, tmp
);
6782 tcg_gen_not_i32(tmp
, tmp
);
6784 case NEON_2RM_VQABS
:
6787 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
6790 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
6793 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
6798 case NEON_2RM_VQNEG
:
6801 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
6804 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
6807 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
6812 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
6813 tmp2
= tcg_const_i32(0);
6815 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
6816 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
6817 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
6820 tcg_temp_free_i32(tmp2
);
6821 if (op
== NEON_2RM_VCLE0
) {
6822 tcg_gen_not_i32(tmp
, tmp
);
6825 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
6826 tmp2
= tcg_const_i32(0);
6828 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
6829 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
6830 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
6833 tcg_temp_free_i32(tmp2
);
6834 if (op
== NEON_2RM_VCLT0
) {
6835 tcg_gen_not_i32(tmp
, tmp
);
6838 case NEON_2RM_VCEQ0
:
6839 tmp2
= tcg_const_i32(0);
6841 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
6842 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
6843 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
6846 tcg_temp_free_i32(tmp2
);
6850 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
6851 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
6852 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
6857 tmp2
= tcg_const_i32(0);
6858 gen_neon_rsb(size
, tmp
, tmp2
);
6859 tcg_temp_free_i32(tmp2
);
6861 case NEON_2RM_VCGT0_F
:
6863 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6864 tmp2
= tcg_const_i32(0);
6865 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6866 tcg_temp_free_i32(tmp2
);
6867 tcg_temp_free_ptr(fpstatus
);
6870 case NEON_2RM_VCGE0_F
:
6872 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6873 tmp2
= tcg_const_i32(0);
6874 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6875 tcg_temp_free_i32(tmp2
);
6876 tcg_temp_free_ptr(fpstatus
);
6879 case NEON_2RM_VCEQ0_F
:
6881 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6882 tmp2
= tcg_const_i32(0);
6883 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6884 tcg_temp_free_i32(tmp2
);
6885 tcg_temp_free_ptr(fpstatus
);
6888 case NEON_2RM_VCLE0_F
:
6890 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6891 tmp2
= tcg_const_i32(0);
6892 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6893 tcg_temp_free_i32(tmp2
);
6894 tcg_temp_free_ptr(fpstatus
);
6897 case NEON_2RM_VCLT0_F
:
6899 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6900 tmp2
= tcg_const_i32(0);
6901 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6902 tcg_temp_free_i32(tmp2
);
6903 tcg_temp_free_ptr(fpstatus
);
6906 case NEON_2RM_VABS_F
:
6909 case NEON_2RM_VNEG_F
:
6913 tmp2
= neon_load_reg(rd
, pass
);
6914 neon_store_reg(rm
, pass
, tmp2
);
6917 tmp2
= neon_load_reg(rd
, pass
);
6919 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6920 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6923 neon_store_reg(rm
, pass
, tmp2
);
6925 case NEON_2RM_VRINTN
:
6926 case NEON_2RM_VRINTA
:
6927 case NEON_2RM_VRINTM
:
6928 case NEON_2RM_VRINTP
:
6929 case NEON_2RM_VRINTZ
:
6932 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6935 if (op
== NEON_2RM_VRINTZ
) {
6936 rmode
= FPROUNDING_ZERO
;
6938 rmode
= fp_decode_rm
[((op
& 0x6) >> 1) ^ 1];
6941 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6942 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6944 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpstatus
);
6945 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6947 tcg_temp_free_ptr(fpstatus
);
6948 tcg_temp_free_i32(tcg_rmode
);
6951 case NEON_2RM_VRINTX
:
6953 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6954 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpstatus
);
6955 tcg_temp_free_ptr(fpstatus
);
6958 case NEON_2RM_VCVTAU
:
6959 case NEON_2RM_VCVTAS
:
6960 case NEON_2RM_VCVTNU
:
6961 case NEON_2RM_VCVTNS
:
6962 case NEON_2RM_VCVTPU
:
6963 case NEON_2RM_VCVTPS
:
6964 case NEON_2RM_VCVTMU
:
6965 case NEON_2RM_VCVTMS
:
6967 bool is_signed
= !extract32(insn
, 7, 1);
6968 TCGv_ptr fpst
= get_fpstatus_ptr(1);
6969 TCGv_i32 tcg_rmode
, tcg_shift
;
6970 int rmode
= fp_decode_rm
[extract32(insn
, 8, 2)];
6972 tcg_shift
= tcg_const_i32(0);
6973 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6974 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6978 gen_helper_vfp_tosls(cpu_F0s
, cpu_F0s
,
6981 gen_helper_vfp_touls(cpu_F0s
, cpu_F0s
,
6985 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6987 tcg_temp_free_i32(tcg_rmode
);
6988 tcg_temp_free_i32(tcg_shift
);
6989 tcg_temp_free_ptr(fpst
);
6992 case NEON_2RM_VRECPE
:
6994 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6995 gen_helper_recpe_u32(tmp
, tmp
, fpstatus
);
6996 tcg_temp_free_ptr(fpstatus
);
6999 case NEON_2RM_VRSQRTE
:
7001 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7002 gen_helper_rsqrte_u32(tmp
, tmp
, fpstatus
);
7003 tcg_temp_free_ptr(fpstatus
);
7006 case NEON_2RM_VRECPE_F
:
7008 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7009 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
7010 tcg_temp_free_ptr(fpstatus
);
7013 case NEON_2RM_VRSQRTE_F
:
7015 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7016 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
7017 tcg_temp_free_ptr(fpstatus
);
7020 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
7023 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
7026 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
7027 gen_vfp_tosiz(0, 1);
7029 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
7030 gen_vfp_touiz(0, 1);
7033 /* Reserved op values were caught by the
7034 * neon_2rm_sizes[] check earlier.
7038 if (neon_2rm_is_float_op(op
)) {
7039 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
7040 neon_reg_offset(rd
, pass
));
7042 neon_store_reg(rd
, pass
, tmp
);
7047 } else if ((insn
& (1 << 10)) == 0) {
7049 int n
= ((insn
>> 8) & 3) + 1;
7050 if ((rn
+ n
) > 32) {
7051 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7052 * helper function running off the end of the register file.
7057 if (insn
& (1 << 6)) {
7058 tmp
= neon_load_reg(rd
, 0);
7060 tmp
= tcg_temp_new_i32();
7061 tcg_gen_movi_i32(tmp
, 0);
7063 tmp2
= neon_load_reg(rm
, 0);
7064 tmp4
= tcg_const_i32(rn
);
7065 tmp5
= tcg_const_i32(n
);
7066 gen_helper_neon_tbl(tmp2
, cpu_env
, tmp2
, tmp
, tmp4
, tmp5
);
7067 tcg_temp_free_i32(tmp
);
7068 if (insn
& (1 << 6)) {
7069 tmp
= neon_load_reg(rd
, 1);
7071 tmp
= tcg_temp_new_i32();
7072 tcg_gen_movi_i32(tmp
, 0);
7074 tmp3
= neon_load_reg(rm
, 1);
7075 gen_helper_neon_tbl(tmp3
, cpu_env
, tmp3
, tmp
, tmp4
, tmp5
);
7076 tcg_temp_free_i32(tmp5
);
7077 tcg_temp_free_i32(tmp4
);
7078 neon_store_reg(rd
, 0, tmp2
);
7079 neon_store_reg(rd
, 1, tmp3
);
7080 tcg_temp_free_i32(tmp
);
7081 } else if ((insn
& 0x380) == 0) {
7083 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
7086 if (insn
& (1 << 19)) {
7087 tmp
= neon_load_reg(rm
, 1);
7089 tmp
= neon_load_reg(rm
, 0);
7091 if (insn
& (1 << 16)) {
7092 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
7093 } else if (insn
& (1 << 17)) {
7094 if ((insn
>> 18) & 1)
7095 gen_neon_dup_high16(tmp
);
7097 gen_neon_dup_low16(tmp
);
7099 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
7100 tmp2
= tcg_temp_new_i32();
7101 tcg_gen_mov_i32(tmp2
, tmp
);
7102 neon_store_reg(rd
, pass
, tmp2
);
7104 tcg_temp_free_i32(tmp
);
7113 static int disas_coproc_insn(DisasContext
*s
, uint32_t insn
)
7115 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
7116 const ARMCPRegInfo
*ri
;
7118 cpnum
= (insn
>> 8) & 0xf;
7120 /* First check for coprocessor space used for XScale/iwMMXt insns */
7121 if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && (cpnum
< 2)) {
7122 if (extract32(s
->c15_cpar
, cpnum
, 1) == 0) {
7125 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
7126 return disas_iwmmxt_insn(s
, insn
);
7127 } else if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
)) {
7128 return disas_dsp_insn(s
, insn
);
7133 /* Otherwise treat as a generic register access */
7134 is64
= (insn
& (1 << 25)) == 0;
7135 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
7143 opc1
= (insn
>> 4) & 0xf;
7145 rt2
= (insn
>> 16) & 0xf;
7147 crn
= (insn
>> 16) & 0xf;
7148 opc1
= (insn
>> 21) & 7;
7149 opc2
= (insn
>> 5) & 7;
7152 isread
= (insn
>> 20) & 1;
7153 rt
= (insn
>> 12) & 0xf;
7155 ri
= get_arm_cp_reginfo(s
->cp_regs
,
7156 ENCODE_CP_REG(cpnum
, is64
, s
->ns
, crn
, crm
, opc1
, opc2
));
7158 /* Check access permissions */
7159 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
7164 (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && cpnum
< 14)) {
7165 /* Emit code to perform further access permissions checks at
7166 * runtime; this may result in an exception.
7167 * Note that on XScale all cp0..c13 registers do an access check
7168 * call in order to handle c15_cpar.
7174 /* Note that since we are an implementation which takes an
7175 * exception on a trapped conditional instruction only if the
7176 * instruction passes its condition code check, we can take
7177 * advantage of the clause in the ARM ARM that allows us to set
7178 * the COND field in the instruction to 0xE in all cases.
7179 * We could fish the actual condition out of the insn (ARM)
7180 * or the condexec bits (Thumb) but it isn't necessary.
7185 syndrome
= syn_cp14_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7188 syndrome
= syn_cp14_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7189 rt
, isread
, s
->thumb
);
7194 syndrome
= syn_cp15_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7197 syndrome
= syn_cp15_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7198 rt
, isread
, s
->thumb
);
7202 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7203 * so this can only happen if this is an ARMv7 or earlier CPU,
7204 * in which case the syndrome information won't actually be
7207 assert(!arm_dc_feature(s
, ARM_FEATURE_V8
));
7208 syndrome
= syn_uncategorized();
7212 gen_set_condexec(s
);
7213 gen_set_pc_im(s
, s
->pc
- 4);
7214 tmpptr
= tcg_const_ptr(ri
);
7215 tcg_syn
= tcg_const_i32(syndrome
);
7216 gen_helper_access_check_cp_reg(cpu_env
, tmpptr
, tcg_syn
);
7217 tcg_temp_free_ptr(tmpptr
);
7218 tcg_temp_free_i32(tcg_syn
);
7221 /* Handle special cases first */
7222 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
7229 gen_set_pc_im(s
, s
->pc
);
7230 s
->is_jmp
= DISAS_WFI
;
7236 if ((s
->tb
->cflags
& CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
7245 if (ri
->type
& ARM_CP_CONST
) {
7246 tmp64
= tcg_const_i64(ri
->resetvalue
);
7247 } else if (ri
->readfn
) {
7249 tmp64
= tcg_temp_new_i64();
7250 tmpptr
= tcg_const_ptr(ri
);
7251 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
7252 tcg_temp_free_ptr(tmpptr
);
7254 tmp64
= tcg_temp_new_i64();
7255 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7257 tmp
= tcg_temp_new_i32();
7258 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
7259 store_reg(s
, rt
, tmp
);
7260 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7261 tmp
= tcg_temp_new_i32();
7262 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
7263 tcg_temp_free_i64(tmp64
);
7264 store_reg(s
, rt2
, tmp
);
7267 if (ri
->type
& ARM_CP_CONST
) {
7268 tmp
= tcg_const_i32(ri
->resetvalue
);
7269 } else if (ri
->readfn
) {
7271 tmp
= tcg_temp_new_i32();
7272 tmpptr
= tcg_const_ptr(ri
);
7273 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
7274 tcg_temp_free_ptr(tmpptr
);
7276 tmp
= load_cpu_offset(ri
->fieldoffset
);
7279 /* Destination register of r15 for 32 bit loads sets
7280 * the condition codes from the high 4 bits of the value
7283 tcg_temp_free_i32(tmp
);
7285 store_reg(s
, rt
, tmp
);
7290 if (ri
->type
& ARM_CP_CONST
) {
7291 /* If not forbidden by access permissions, treat as WI */
7296 TCGv_i32 tmplo
, tmphi
;
7297 TCGv_i64 tmp64
= tcg_temp_new_i64();
7298 tmplo
= load_reg(s
, rt
);
7299 tmphi
= load_reg(s
, rt2
);
7300 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
7301 tcg_temp_free_i32(tmplo
);
7302 tcg_temp_free_i32(tmphi
);
7304 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
7305 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
7306 tcg_temp_free_ptr(tmpptr
);
7308 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7310 tcg_temp_free_i64(tmp64
);
7315 tmp
= load_reg(s
, rt
);
7316 tmpptr
= tcg_const_ptr(ri
);
7317 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
7318 tcg_temp_free_ptr(tmpptr
);
7319 tcg_temp_free_i32(tmp
);
7321 TCGv_i32 tmp
= load_reg(s
, rt
);
7322 store_cpu_offset(tmp
, ri
->fieldoffset
);
7327 if ((s
->tb
->cflags
& CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
7328 /* I/O operations must end the TB here (whether read or write) */
7331 } else if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
7332 /* We default to ending the TB on a coprocessor register write,
7333 * but allow this to be suppressed by the register definition
7334 * (usually only necessary to work around guest bugs).
7342 /* Unknown register; this might be a guest error or a QEMU
7343 * unimplemented feature.
7346 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7347 "64 bit system register cp:%d opc1: %d crm:%d "
7349 isread
? "read" : "write", cpnum
, opc1
, crm
,
7350 s
->ns
? "non-secure" : "secure");
7352 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7353 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7355 isread
? "read" : "write", cpnum
, opc1
, crn
, crm
, opc2
,
7356 s
->ns
? "non-secure" : "secure");
7363 /* Store a 64-bit value to a register pair. Clobbers val. */
7364 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
7367 tmp
= tcg_temp_new_i32();
7368 tcg_gen_extrl_i64_i32(tmp
, val
);
7369 store_reg(s
, rlow
, tmp
);
7370 tmp
= tcg_temp_new_i32();
7371 tcg_gen_shri_i64(val
, val
, 32);
7372 tcg_gen_extrl_i64_i32(tmp
, val
);
7373 store_reg(s
, rhigh
, tmp
);
7376 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7377 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
7382 /* Load value and extend to 64 bits. */
7383 tmp
= tcg_temp_new_i64();
7384 tmp2
= load_reg(s
, rlow
);
7385 tcg_gen_extu_i32_i64(tmp
, tmp2
);
7386 tcg_temp_free_i32(tmp2
);
7387 tcg_gen_add_i64(val
, val
, tmp
);
7388 tcg_temp_free_i64(tmp
);
7391 /* load and add a 64-bit value from a register pair. */
7392 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
7398 /* Load 64-bit value rd:rn. */
7399 tmpl
= load_reg(s
, rlow
);
7400 tmph
= load_reg(s
, rhigh
);
7401 tmp
= tcg_temp_new_i64();
7402 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
7403 tcg_temp_free_i32(tmpl
);
7404 tcg_temp_free_i32(tmph
);
7405 tcg_gen_add_i64(val
, val
, tmp
);
7406 tcg_temp_free_i64(tmp
);
7409 /* Set N and Z flags from hi|lo. */
7410 static void gen_logicq_cc(TCGv_i32 lo
, TCGv_i32 hi
)
7412 tcg_gen_mov_i32(cpu_NF
, hi
);
7413 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
7416 /* Load/Store exclusive instructions are implemented by remembering
7417 the value/address loaded, and seeing if these are the same
7418 when the store is performed. This should be sufficient to implement
7419 the architecturally mandated semantics, and avoids having to monitor
7422 In system emulation mode only one CPU will be running at once, so
7423 this sequence is effectively atomic. In user emulation mode we
7424 throw an exception and handle the atomic operation elsewhere. */
7425 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
7426 TCGv_i32 addr
, int size
)
7428 TCGv_i32 tmp
= tcg_temp_new_i32();
7434 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
7437 gen_aa32_ld16ua(tmp
, addr
, get_mem_index(s
));
7441 gen_aa32_ld32ua(tmp
, addr
, get_mem_index(s
));
7448 TCGv_i32 tmp2
= tcg_temp_new_i32();
7449 TCGv_i32 tmp3
= tcg_temp_new_i32();
7451 tcg_gen_addi_i32(tmp2
, addr
, 4);
7452 gen_aa32_ld32u(tmp3
, tmp2
, get_mem_index(s
));
7453 tcg_temp_free_i32(tmp2
);
7454 tcg_gen_concat_i32_i64(cpu_exclusive_val
, tmp
, tmp3
);
7455 store_reg(s
, rt2
, tmp3
);
7457 tcg_gen_extu_i32_i64(cpu_exclusive_val
, tmp
);
7460 store_reg(s
, rt
, tmp
);
7461 tcg_gen_extu_i32_i64(cpu_exclusive_addr
, addr
);
7464 static void gen_clrex(DisasContext
*s
)
7466 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7469 #ifdef CONFIG_USER_ONLY
7470 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7471 TCGv_i32 addr
, int size
)
7473 tcg_gen_extu_i32_i64(cpu_exclusive_test
, addr
);
7474 tcg_gen_movi_i32(cpu_exclusive_info
,
7475 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
7476 gen_exception_internal_insn(s
, 4, EXCP_STREX
);
7479 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7480 TCGv_i32 addr
, int size
)
7483 TCGv_i64 val64
, extaddr
;
7484 TCGLabel
*done_label
;
7485 TCGLabel
*fail_label
;
7487 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7493 fail_label
= gen_new_label();
7494 done_label
= gen_new_label();
7495 extaddr
= tcg_temp_new_i64();
7496 tcg_gen_extu_i32_i64(extaddr
, addr
);
7497 tcg_gen_brcond_i64(TCG_COND_NE
, extaddr
, cpu_exclusive_addr
, fail_label
);
7498 tcg_temp_free_i64(extaddr
);
7500 tmp
= tcg_temp_new_i32();
7503 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
7506 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
7510 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
7516 val64
= tcg_temp_new_i64();
7518 TCGv_i32 tmp2
= tcg_temp_new_i32();
7519 TCGv_i32 tmp3
= tcg_temp_new_i32();
7520 tcg_gen_addi_i32(tmp2
, addr
, 4);
7521 gen_aa32_ld32u(tmp3
, tmp2
, get_mem_index(s
));
7522 tcg_temp_free_i32(tmp2
);
7523 tcg_gen_concat_i32_i64(val64
, tmp
, tmp3
);
7524 tcg_temp_free_i32(tmp3
);
7526 tcg_gen_extu_i32_i64(val64
, tmp
);
7528 tcg_temp_free_i32(tmp
);
7530 tcg_gen_brcond_i64(TCG_COND_NE
, val64
, cpu_exclusive_val
, fail_label
);
7531 tcg_temp_free_i64(val64
);
7533 tmp
= load_reg(s
, rt
);
7536 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
7539 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
7543 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
7548 tcg_temp_free_i32(tmp
);
7550 tcg_gen_addi_i32(addr
, addr
, 4);
7551 tmp
= load_reg(s
, rt2
);
7552 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
7553 tcg_temp_free_i32(tmp
);
7555 tcg_gen_movi_i32(cpu_R
[rd
], 0);
7556 tcg_gen_br(done_label
);
7557 gen_set_label(fail_label
);
7558 tcg_gen_movi_i32(cpu_R
[rd
], 1);
7559 gen_set_label(done_label
);
7560 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7567 * @mode: mode field from insn (which stack to store to)
7568 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7569 * @writeback: true if writeback bit set
7571 * Generate code for the SRS (Store Return State) insn.
7573 static void gen_srs(DisasContext
*s
,
7574 uint32_t mode
, uint32_t amode
, bool writeback
)
7577 TCGv_i32 addr
= tcg_temp_new_i32();
7578 TCGv_i32 tmp
= tcg_const_i32(mode
);
7579 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7580 tcg_temp_free_i32(tmp
);
7597 tcg_gen_addi_i32(addr
, addr
, offset
);
7598 tmp
= load_reg(s
, 14);
7599 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
7600 tcg_temp_free_i32(tmp
);
7601 tmp
= load_cpu_field(spsr
);
7602 tcg_gen_addi_i32(addr
, addr
, 4);
7603 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
7604 tcg_temp_free_i32(tmp
);
7622 tcg_gen_addi_i32(addr
, addr
, offset
);
7623 tmp
= tcg_const_i32(mode
);
7624 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7625 tcg_temp_free_i32(tmp
);
7627 tcg_temp_free_i32(addr
);
7630 static void disas_arm_insn(DisasContext
*s
, unsigned int insn
)
7632 unsigned int cond
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
7639 /* M variants do not implement ARM mode. */
7640 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
7645 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7646 * choose to UNDEF. In ARMv5 and above the space is used
7647 * for miscellaneous unconditional instructions.
7651 /* Unconditional instructions. */
7652 if (((insn
>> 25) & 7) == 1) {
7653 /* NEON Data processing. */
7654 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
7658 if (disas_neon_data_insn(s
, insn
)) {
7663 if ((insn
& 0x0f100000) == 0x04000000) {
7664 /* NEON load/store. */
7665 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
7669 if (disas_neon_ls_insn(s
, insn
)) {
7674 if ((insn
& 0x0f000e10) == 0x0e000a00) {
7676 if (disas_vfp_insn(s
, insn
)) {
7681 if (((insn
& 0x0f30f000) == 0x0510f000) ||
7682 ((insn
& 0x0f30f010) == 0x0710f000)) {
7683 if ((insn
& (1 << 22)) == 0) {
7685 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
7689 /* Otherwise PLD; v5TE+ */
7693 if (((insn
& 0x0f70f000) == 0x0450f000) ||
7694 ((insn
& 0x0f70f010) == 0x0650f000)) {
7696 return; /* PLI; V7 */
7698 if (((insn
& 0x0f700000) == 0x04100000) ||
7699 ((insn
& 0x0f700010) == 0x06100000)) {
7700 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
7703 return; /* v7MP: Unallocated memory hint: must NOP */
7706 if ((insn
& 0x0ffffdff) == 0x01010000) {
7709 if (((insn
>> 9) & 1) != s
->bswap_code
) {
7710 /* Dynamic endianness switching not implemented. */
7711 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented setend\n");
7715 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
7716 switch ((insn
>> 4) & 0xf) {
7724 /* We don't emulate caches so these are a no-op. */
7727 /* We need to break the TB after this insn to execute
7728 * self-modifying code correctly and also to take
7729 * any pending interrupts immediately.
7736 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
7742 gen_srs(s
, (insn
& 0x1f), (insn
>> 23) & 3, insn
& (1 << 21));
7744 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
7750 rn
= (insn
>> 16) & 0xf;
7751 addr
= load_reg(s
, rn
);
7752 i
= (insn
>> 23) & 3;
7754 case 0: offset
= -4; break; /* DA */
7755 case 1: offset
= 0; break; /* IA */
7756 case 2: offset
= -8; break; /* DB */
7757 case 3: offset
= 4; break; /* IB */
7761 tcg_gen_addi_i32(addr
, addr
, offset
);
7762 /* Load PC into tmp and CPSR into tmp2. */
7763 tmp
= tcg_temp_new_i32();
7764 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
7765 tcg_gen_addi_i32(addr
, addr
, 4);
7766 tmp2
= tcg_temp_new_i32();
7767 gen_aa32_ld32u(tmp2
, addr
, get_mem_index(s
));
7768 if (insn
& (1 << 21)) {
7769 /* Base writeback. */
7771 case 0: offset
= -8; break;
7772 case 1: offset
= 4; break;
7773 case 2: offset
= -4; break;
7774 case 3: offset
= 0; break;
7778 tcg_gen_addi_i32(addr
, addr
, offset
);
7779 store_reg(s
, rn
, addr
);
7781 tcg_temp_free_i32(addr
);
7783 gen_rfe(s
, tmp
, tmp2
);
7785 } else if ((insn
& 0x0e000000) == 0x0a000000) {
7786 /* branch link and change to thumb (blx <offset>) */
7789 val
= (uint32_t)s
->pc
;
7790 tmp
= tcg_temp_new_i32();
7791 tcg_gen_movi_i32(tmp
, val
);
7792 store_reg(s
, 14, tmp
);
7793 /* Sign-extend the 24-bit offset */
7794 offset
= (((int32_t)insn
) << 8) >> 8;
7795 /* offset * 4 + bit24 * 2 + (thumb bit) */
7796 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
7797 /* pipeline offset */
7799 /* protected by ARCH(5); above, near the start of uncond block */
7802 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
7803 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
7804 /* iWMMXt register transfer. */
7805 if (extract32(s
->c15_cpar
, 1, 1)) {
7806 if (!disas_iwmmxt_insn(s
, insn
)) {
7811 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
7812 /* Coprocessor double register transfer. */
7814 } else if ((insn
& 0x0f000010) == 0x0e000010) {
7815 /* Additional coprocessor register transfer. */
7816 } else if ((insn
& 0x0ff10020) == 0x01000000) {
7819 /* cps (privileged) */
7823 if (insn
& (1 << 19)) {
7824 if (insn
& (1 << 8))
7826 if (insn
& (1 << 7))
7828 if (insn
& (1 << 6))
7830 if (insn
& (1 << 18))
7833 if (insn
& (1 << 17)) {
7835 val
|= (insn
& 0x1f);
7838 gen_set_psr_im(s
, mask
, 0, val
);
7845 /* if not always execute, we generate a conditional jump to
7847 s
->condlabel
= gen_new_label();
7848 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
7851 if ((insn
& 0x0f900000) == 0x03000000) {
7852 if ((insn
& (1 << 21)) == 0) {
7854 rd
= (insn
>> 12) & 0xf;
7855 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
7856 if ((insn
& (1 << 22)) == 0) {
7858 tmp
= tcg_temp_new_i32();
7859 tcg_gen_movi_i32(tmp
, val
);
7862 tmp
= load_reg(s
, rd
);
7863 tcg_gen_ext16u_i32(tmp
, tmp
);
7864 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
7866 store_reg(s
, rd
, tmp
);
7868 if (((insn
>> 12) & 0xf) != 0xf)
7870 if (((insn
>> 16) & 0xf) == 0) {
7871 gen_nop_hint(s
, insn
& 0xff);
7873 /* CPSR = immediate */
7875 shift
= ((insn
>> 8) & 0xf) * 2;
7877 val
= (val
>> shift
) | (val
<< (32 - shift
));
7878 i
= ((insn
& (1 << 22)) != 0);
7879 if (gen_set_psr_im(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
),
7885 } else if ((insn
& 0x0f900000) == 0x01000000
7886 && (insn
& 0x00000090) != 0x00000090) {
7887 /* miscellaneous instructions */
7888 op1
= (insn
>> 21) & 3;
7889 sh
= (insn
>> 4) & 0xf;
7892 case 0x0: /* move program status register */
7895 tmp
= load_reg(s
, rm
);
7896 i
= ((op1
& 2) != 0);
7897 if (gen_set_psr(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
7901 rd
= (insn
>> 12) & 0xf;
7905 tmp
= load_cpu_field(spsr
);
7907 tmp
= tcg_temp_new_i32();
7908 gen_helper_cpsr_read(tmp
, cpu_env
);
7910 store_reg(s
, rd
, tmp
);
7915 /* branch/exchange thumb (bx). */
7917 tmp
= load_reg(s
, rm
);
7919 } else if (op1
== 3) {
7922 rd
= (insn
>> 12) & 0xf;
7923 tmp
= load_reg(s
, rm
);
7924 gen_helper_clz(tmp
, tmp
);
7925 store_reg(s
, rd
, tmp
);
7933 /* Trivial implementation equivalent to bx. */
7934 tmp
= load_reg(s
, rm
);
7945 /* branch link/exchange thumb (blx) */
7946 tmp
= load_reg(s
, rm
);
7947 tmp2
= tcg_temp_new_i32();
7948 tcg_gen_movi_i32(tmp2
, s
->pc
);
7949 store_reg(s
, 14, tmp2
);
7955 uint32_t c
= extract32(insn
, 8, 4);
7957 /* Check this CPU supports ARMv8 CRC instructions.
7958 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7959 * Bits 8, 10 and 11 should be zero.
7961 if (!arm_dc_feature(s
, ARM_FEATURE_CRC
) || op1
== 0x3 ||
7966 rn
= extract32(insn
, 16, 4);
7967 rd
= extract32(insn
, 12, 4);
7969 tmp
= load_reg(s
, rn
);
7970 tmp2
= load_reg(s
, rm
);
7972 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
7973 } else if (op1
== 1) {
7974 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
7976 tmp3
= tcg_const_i32(1 << op1
);
7978 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
7980 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
7982 tcg_temp_free_i32(tmp2
);
7983 tcg_temp_free_i32(tmp3
);
7984 store_reg(s
, rd
, tmp
);
7987 case 0x5: /* saturating add/subtract */
7989 rd
= (insn
>> 12) & 0xf;
7990 rn
= (insn
>> 16) & 0xf;
7991 tmp
= load_reg(s
, rm
);
7992 tmp2
= load_reg(s
, rn
);
7994 gen_helper_double_saturate(tmp2
, cpu_env
, tmp2
);
7996 gen_helper_sub_saturate(tmp
, cpu_env
, tmp
, tmp2
);
7998 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
7999 tcg_temp_free_i32(tmp2
);
8000 store_reg(s
, rd
, tmp
);
8004 int imm16
= extract32(insn
, 0, 4) | (extract32(insn
, 8, 12) << 4);
8009 gen_exception_insn(s
, 4, EXCP_BKPT
,
8010 syn_aa32_bkpt(imm16
, false),
8011 default_exception_el(s
));
8014 /* Hypervisor call (v7) */
8022 /* Secure monitor call (v6+) */
8034 case 0x8: /* signed multiply */
8039 rs
= (insn
>> 8) & 0xf;
8040 rn
= (insn
>> 12) & 0xf;
8041 rd
= (insn
>> 16) & 0xf;
8043 /* (32 * 16) >> 16 */
8044 tmp
= load_reg(s
, rm
);
8045 tmp2
= load_reg(s
, rs
);
8047 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8050 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8051 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8052 tmp
= tcg_temp_new_i32();
8053 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
8054 tcg_temp_free_i64(tmp64
);
8055 if ((sh
& 2) == 0) {
8056 tmp2
= load_reg(s
, rn
);
8057 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8058 tcg_temp_free_i32(tmp2
);
8060 store_reg(s
, rd
, tmp
);
8063 tmp
= load_reg(s
, rm
);
8064 tmp2
= load_reg(s
, rs
);
8065 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
8066 tcg_temp_free_i32(tmp2
);
8068 tmp64
= tcg_temp_new_i64();
8069 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8070 tcg_temp_free_i32(tmp
);
8071 gen_addq(s
, tmp64
, rn
, rd
);
8072 gen_storeq_reg(s
, rn
, rd
, tmp64
);
8073 tcg_temp_free_i64(tmp64
);
8076 tmp2
= load_reg(s
, rn
);
8077 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8078 tcg_temp_free_i32(tmp2
);
8080 store_reg(s
, rd
, tmp
);
8087 } else if (((insn
& 0x0e000000) == 0 &&
8088 (insn
& 0x00000090) != 0x90) ||
8089 ((insn
& 0x0e000000) == (1 << 25))) {
8090 int set_cc
, logic_cc
, shiftop
;
8092 op1
= (insn
>> 21) & 0xf;
8093 set_cc
= (insn
>> 20) & 1;
8094 logic_cc
= table_logic_cc
[op1
] & set_cc
;
8096 /* data processing instruction */
8097 if (insn
& (1 << 25)) {
8098 /* immediate operand */
8100 shift
= ((insn
>> 8) & 0xf) * 2;
8102 val
= (val
>> shift
) | (val
<< (32 - shift
));
8104 tmp2
= tcg_temp_new_i32();
8105 tcg_gen_movi_i32(tmp2
, val
);
8106 if (logic_cc
&& shift
) {
8107 gen_set_CF_bit31(tmp2
);
8112 tmp2
= load_reg(s
, rm
);
8113 shiftop
= (insn
>> 5) & 3;
8114 if (!(insn
& (1 << 4))) {
8115 shift
= (insn
>> 7) & 0x1f;
8116 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8118 rs
= (insn
>> 8) & 0xf;
8119 tmp
= load_reg(s
, rs
);
8120 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
8123 if (op1
!= 0x0f && op1
!= 0x0d) {
8124 rn
= (insn
>> 16) & 0xf;
8125 tmp
= load_reg(s
, rn
);
8127 TCGV_UNUSED_I32(tmp
);
8129 rd
= (insn
>> 12) & 0xf;
8132 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8136 store_reg_bx(s
, rd
, tmp
);
8139 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8143 store_reg_bx(s
, rd
, tmp
);
8146 if (set_cc
&& rd
== 15) {
8147 /* SUBS r15, ... is used for exception return. */
8151 gen_sub_CC(tmp
, tmp
, tmp2
);
8152 gen_exception_return(s
, tmp
);
8155 gen_sub_CC(tmp
, tmp
, tmp2
);
8157 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8159 store_reg_bx(s
, rd
, tmp
);
8164 gen_sub_CC(tmp
, tmp2
, tmp
);
8166 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8168 store_reg_bx(s
, rd
, tmp
);
8172 gen_add_CC(tmp
, tmp
, tmp2
);
8174 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8176 store_reg_bx(s
, rd
, tmp
);
8180 gen_adc_CC(tmp
, tmp
, tmp2
);
8182 gen_add_carry(tmp
, tmp
, tmp2
);
8184 store_reg_bx(s
, rd
, tmp
);
8188 gen_sbc_CC(tmp
, tmp
, tmp2
);
8190 gen_sub_carry(tmp
, tmp
, tmp2
);
8192 store_reg_bx(s
, rd
, tmp
);
8196 gen_sbc_CC(tmp
, tmp2
, tmp
);
8198 gen_sub_carry(tmp
, tmp2
, tmp
);
8200 store_reg_bx(s
, rd
, tmp
);
8204 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8207 tcg_temp_free_i32(tmp
);
8211 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8214 tcg_temp_free_i32(tmp
);
8218 gen_sub_CC(tmp
, tmp
, tmp2
);
8220 tcg_temp_free_i32(tmp
);
8224 gen_add_CC(tmp
, tmp
, tmp2
);
8226 tcg_temp_free_i32(tmp
);
8229 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8233 store_reg_bx(s
, rd
, tmp
);
8236 if (logic_cc
&& rd
== 15) {
8237 /* MOVS r15, ... is used for exception return. */
8241 gen_exception_return(s
, tmp2
);
8246 store_reg_bx(s
, rd
, tmp2
);
8250 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8254 store_reg_bx(s
, rd
, tmp
);
8258 tcg_gen_not_i32(tmp2
, tmp2
);
8262 store_reg_bx(s
, rd
, tmp2
);
8265 if (op1
!= 0x0f && op1
!= 0x0d) {
8266 tcg_temp_free_i32(tmp2
);
8269 /* other instructions */
8270 op1
= (insn
>> 24) & 0xf;
8274 /* multiplies, extra load/stores */
8275 sh
= (insn
>> 5) & 3;
8278 rd
= (insn
>> 16) & 0xf;
8279 rn
= (insn
>> 12) & 0xf;
8280 rs
= (insn
>> 8) & 0xf;
8282 op1
= (insn
>> 20) & 0xf;
8284 case 0: case 1: case 2: case 3: case 6:
8286 tmp
= load_reg(s
, rs
);
8287 tmp2
= load_reg(s
, rm
);
8288 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8289 tcg_temp_free_i32(tmp2
);
8290 if (insn
& (1 << 22)) {
8291 /* Subtract (mls) */
8293 tmp2
= load_reg(s
, rn
);
8294 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8295 tcg_temp_free_i32(tmp2
);
8296 } else if (insn
& (1 << 21)) {
8298 tmp2
= load_reg(s
, rn
);
8299 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8300 tcg_temp_free_i32(tmp2
);
8302 if (insn
& (1 << 20))
8304 store_reg(s
, rd
, tmp
);
8307 /* 64 bit mul double accumulate (UMAAL) */
8309 tmp
= load_reg(s
, rs
);
8310 tmp2
= load_reg(s
, rm
);
8311 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8312 gen_addq_lo(s
, tmp64
, rn
);
8313 gen_addq_lo(s
, tmp64
, rd
);
8314 gen_storeq_reg(s
, rn
, rd
, tmp64
);
8315 tcg_temp_free_i64(tmp64
);
8317 case 8: case 9: case 10: case 11:
8318 case 12: case 13: case 14: case 15:
8319 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8320 tmp
= load_reg(s
, rs
);
8321 tmp2
= load_reg(s
, rm
);
8322 if (insn
& (1 << 22)) {
8323 tcg_gen_muls2_i32(tmp
, tmp2
, tmp
, tmp2
);
8325 tcg_gen_mulu2_i32(tmp
, tmp2
, tmp
, tmp2
);
8327 if (insn
& (1 << 21)) { /* mult accumulate */
8328 TCGv_i32 al
= load_reg(s
, rn
);
8329 TCGv_i32 ah
= load_reg(s
, rd
);
8330 tcg_gen_add2_i32(tmp
, tmp2
, tmp
, tmp2
, al
, ah
);
8331 tcg_temp_free_i32(al
);
8332 tcg_temp_free_i32(ah
);
8334 if (insn
& (1 << 20)) {
8335 gen_logicq_cc(tmp
, tmp2
);
8337 store_reg(s
, rn
, tmp
);
8338 store_reg(s
, rd
, tmp2
);
8344 rn
= (insn
>> 16) & 0xf;
8345 rd
= (insn
>> 12) & 0xf;
8346 if (insn
& (1 << 23)) {
8347 /* load/store exclusive */
8348 int op2
= (insn
>> 8) & 3;
8349 op1
= (insn
>> 21) & 0x3;
8352 case 0: /* lda/stl */
8358 case 1: /* reserved */
8360 case 2: /* ldaex/stlex */
8363 case 3: /* ldrex/strex */
8372 addr
= tcg_temp_local_new_i32();
8373 load_reg_var(s
, addr
, rn
);
8375 /* Since the emulation does not have barriers,
8376 the acquire/release semantics need no special
8379 if (insn
& (1 << 20)) {
8380 tmp
= tcg_temp_new_i32();
8383 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
8386 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
8389 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
8394 store_reg(s
, rd
, tmp
);
8397 tmp
= load_reg(s
, rm
);
8400 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8403 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
8406 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
8411 tcg_temp_free_i32(tmp
);
8413 } else if (insn
& (1 << 20)) {
8416 gen_load_exclusive(s
, rd
, 15, addr
, 2);
8418 case 1: /* ldrexd */
8419 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
8421 case 2: /* ldrexb */
8422 gen_load_exclusive(s
, rd
, 15, addr
, 0);
8424 case 3: /* ldrexh */
8425 gen_load_exclusive(s
, rd
, 15, addr
, 1);
8434 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
8436 case 1: /* strexd */
8437 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
8439 case 2: /* strexb */
8440 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
8442 case 3: /* strexh */
8443 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
8449 tcg_temp_free_i32(addr
);
8451 /* SWP instruction */
8454 /* ??? This is not really atomic. However we know
8455 we never have multiple CPUs running in parallel,
8456 so it is good enough. */
8457 addr
= load_reg(s
, rn
);
8458 tmp
= load_reg(s
, rm
);
8459 tmp2
= tcg_temp_new_i32();
8460 if (insn
& (1 << 22)) {
8461 gen_aa32_ld8u(tmp2
, addr
, get_mem_index(s
));
8462 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
8464 gen_aa32_ld32u(tmp2
, addr
, get_mem_index(s
));
8465 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8467 tcg_temp_free_i32(tmp
);
8468 tcg_temp_free_i32(addr
);
8469 store_reg(s
, rd
, tmp2
);
8474 bool load
= insn
& (1 << 20);
8475 bool doubleword
= false;
8476 /* Misc load/store */
8477 rn
= (insn
>> 16) & 0xf;
8478 rd
= (insn
>> 12) & 0xf;
8480 if (!load
&& (sh
& 2)) {
8484 /* UNPREDICTABLE; we choose to UNDEF */
8487 load
= (sh
& 1) == 0;
8491 addr
= load_reg(s
, rn
);
8492 if (insn
& (1 << 24))
8493 gen_add_datah_offset(s
, insn
, 0, addr
);
8499 tmp
= load_reg(s
, rd
);
8500 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8501 tcg_temp_free_i32(tmp
);
8502 tcg_gen_addi_i32(addr
, addr
, 4);
8503 tmp
= load_reg(s
, rd
+ 1);
8504 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8505 tcg_temp_free_i32(tmp
);
8508 tmp
= tcg_temp_new_i32();
8509 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
8510 store_reg(s
, rd
, tmp
);
8511 tcg_gen_addi_i32(addr
, addr
, 4);
8512 tmp
= tcg_temp_new_i32();
8513 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
8516 address_offset
= -4;
8519 tmp
= tcg_temp_new_i32();
8522 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
8525 gen_aa32_ld8s(tmp
, addr
, get_mem_index(s
));
8529 gen_aa32_ld16s(tmp
, addr
, get_mem_index(s
));
8534 tmp
= load_reg(s
, rd
);
8535 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
8536 tcg_temp_free_i32(tmp
);
8538 /* Perform base writeback before the loaded value to
8539 ensure correct behavior with overlapping index registers.
8540 ldrd with base writeback is undefined if the
8541 destination and index registers overlap. */
8542 if (!(insn
& (1 << 24))) {
8543 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
8544 store_reg(s
, rn
, addr
);
8545 } else if (insn
& (1 << 21)) {
8547 tcg_gen_addi_i32(addr
, addr
, address_offset
);
8548 store_reg(s
, rn
, addr
);
8550 tcg_temp_free_i32(addr
);
8553 /* Complete the load. */
8554 store_reg(s
, rd
, tmp
);
8563 if (insn
& (1 << 4)) {
8565 /* Armv6 Media instructions. */
8567 rn
= (insn
>> 16) & 0xf;
8568 rd
= (insn
>> 12) & 0xf;
8569 rs
= (insn
>> 8) & 0xf;
8570 switch ((insn
>> 23) & 3) {
8571 case 0: /* Parallel add/subtract. */
8572 op1
= (insn
>> 20) & 7;
8573 tmp
= load_reg(s
, rn
);
8574 tmp2
= load_reg(s
, rm
);
8575 sh
= (insn
>> 5) & 7;
8576 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
8578 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
8579 tcg_temp_free_i32(tmp2
);
8580 store_reg(s
, rd
, tmp
);
8583 if ((insn
& 0x00700020) == 0) {
8584 /* Halfword pack. */
8585 tmp
= load_reg(s
, rn
);
8586 tmp2
= load_reg(s
, rm
);
8587 shift
= (insn
>> 7) & 0x1f;
8588 if (insn
& (1 << 6)) {
8592 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8593 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8594 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8598 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8599 tcg_gen_ext16u_i32(tmp
, tmp
);
8600 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8602 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8603 tcg_temp_free_i32(tmp2
);
8604 store_reg(s
, rd
, tmp
);
8605 } else if ((insn
& 0x00200020) == 0x00200000) {
8607 tmp
= load_reg(s
, rm
);
8608 shift
= (insn
>> 7) & 0x1f;
8609 if (insn
& (1 << 6)) {
8612 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8614 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8616 sh
= (insn
>> 16) & 0x1f;
8617 tmp2
= tcg_const_i32(sh
);
8618 if (insn
& (1 << 22))
8619 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
8621 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
8622 tcg_temp_free_i32(tmp2
);
8623 store_reg(s
, rd
, tmp
);
8624 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
8626 tmp
= load_reg(s
, rm
);
8627 sh
= (insn
>> 16) & 0x1f;
8628 tmp2
= tcg_const_i32(sh
);
8629 if (insn
& (1 << 22))
8630 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
8632 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
8633 tcg_temp_free_i32(tmp2
);
8634 store_reg(s
, rd
, tmp
);
8635 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
8637 tmp
= load_reg(s
, rn
);
8638 tmp2
= load_reg(s
, rm
);
8639 tmp3
= tcg_temp_new_i32();
8640 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
8641 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8642 tcg_temp_free_i32(tmp3
);
8643 tcg_temp_free_i32(tmp2
);
8644 store_reg(s
, rd
, tmp
);
8645 } else if ((insn
& 0x000003e0) == 0x00000060) {
8646 tmp
= load_reg(s
, rm
);
8647 shift
= (insn
>> 10) & 3;
8648 /* ??? In many cases it's not necessary to do a
8649 rotate, a shift is sufficient. */
8651 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8652 op1
= (insn
>> 20) & 7;
8654 case 0: gen_sxtb16(tmp
); break;
8655 case 2: gen_sxtb(tmp
); break;
8656 case 3: gen_sxth(tmp
); break;
8657 case 4: gen_uxtb16(tmp
); break;
8658 case 6: gen_uxtb(tmp
); break;
8659 case 7: gen_uxth(tmp
); break;
8660 default: goto illegal_op
;
8663 tmp2
= load_reg(s
, rn
);
8664 if ((op1
& 3) == 0) {
8665 gen_add16(tmp
, tmp2
);
8667 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8668 tcg_temp_free_i32(tmp2
);
8671 store_reg(s
, rd
, tmp
);
8672 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
8674 tmp
= load_reg(s
, rm
);
8675 if (insn
& (1 << 22)) {
8676 if (insn
& (1 << 7)) {
8680 gen_helper_rbit(tmp
, tmp
);
8683 if (insn
& (1 << 7))
8686 tcg_gen_bswap32_i32(tmp
, tmp
);
8688 store_reg(s
, rd
, tmp
);
8693 case 2: /* Multiplies (Type 3). */
8694 switch ((insn
>> 20) & 0x7) {
8696 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
8697 /* op2 not 00x or 11x : UNDEF */
8700 /* Signed multiply most significant [accumulate].
8701 (SMMUL, SMMLA, SMMLS) */
8702 tmp
= load_reg(s
, rm
);
8703 tmp2
= load_reg(s
, rs
);
8704 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8707 tmp
= load_reg(s
, rd
);
8708 if (insn
& (1 << 6)) {
8709 tmp64
= gen_subq_msw(tmp64
, tmp
);
8711 tmp64
= gen_addq_msw(tmp64
, tmp
);
8714 if (insn
& (1 << 5)) {
8715 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8717 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8718 tmp
= tcg_temp_new_i32();
8719 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
8720 tcg_temp_free_i64(tmp64
);
8721 store_reg(s
, rn
, tmp
);
8725 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8726 if (insn
& (1 << 7)) {
8729 tmp
= load_reg(s
, rm
);
8730 tmp2
= load_reg(s
, rs
);
8731 if (insn
& (1 << 5))
8732 gen_swap_half(tmp2
);
8733 gen_smul_dual(tmp
, tmp2
);
8734 if (insn
& (1 << 22)) {
8735 /* smlald, smlsld */
8738 tmp64
= tcg_temp_new_i64();
8739 tmp64_2
= tcg_temp_new_i64();
8740 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8741 tcg_gen_ext_i32_i64(tmp64_2
, tmp2
);
8742 tcg_temp_free_i32(tmp
);
8743 tcg_temp_free_i32(tmp2
);
8744 if (insn
& (1 << 6)) {
8745 tcg_gen_sub_i64(tmp64
, tmp64
, tmp64_2
);
8747 tcg_gen_add_i64(tmp64
, tmp64
, tmp64_2
);
8749 tcg_temp_free_i64(tmp64_2
);
8750 gen_addq(s
, tmp64
, rd
, rn
);
8751 gen_storeq_reg(s
, rd
, rn
, tmp64
);
8752 tcg_temp_free_i64(tmp64
);
8754 /* smuad, smusd, smlad, smlsd */
8755 if (insn
& (1 << 6)) {
8756 /* This subtraction cannot overflow. */
8757 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8759 /* This addition cannot overflow 32 bits;
8760 * however it may overflow considered as a
8761 * signed operation, in which case we must set
8764 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8766 tcg_temp_free_i32(tmp2
);
8769 tmp2
= load_reg(s
, rd
);
8770 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8771 tcg_temp_free_i32(tmp2
);
8773 store_reg(s
, rn
, tmp
);
8779 if (!arm_dc_feature(s
, ARM_FEATURE_ARM_DIV
)) {
8782 if (((insn
>> 5) & 7) || (rd
!= 15)) {
8785 tmp
= load_reg(s
, rm
);
8786 tmp2
= load_reg(s
, rs
);
8787 if (insn
& (1 << 21)) {
8788 gen_helper_udiv(tmp
, tmp
, tmp2
);
8790 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8792 tcg_temp_free_i32(tmp2
);
8793 store_reg(s
, rn
, tmp
);
8800 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
8802 case 0: /* Unsigned sum of absolute differences. */
8804 tmp
= load_reg(s
, rm
);
8805 tmp2
= load_reg(s
, rs
);
8806 gen_helper_usad8(tmp
, tmp
, tmp2
);
8807 tcg_temp_free_i32(tmp2
);
8809 tmp2
= load_reg(s
, rd
);
8810 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8811 tcg_temp_free_i32(tmp2
);
8813 store_reg(s
, rn
, tmp
);
8815 case 0x20: case 0x24: case 0x28: case 0x2c:
8816 /* Bitfield insert/clear. */
8818 shift
= (insn
>> 7) & 0x1f;
8819 i
= (insn
>> 16) & 0x1f;
8821 /* UNPREDICTABLE; we choose to UNDEF */
8826 tmp
= tcg_temp_new_i32();
8827 tcg_gen_movi_i32(tmp
, 0);
8829 tmp
= load_reg(s
, rm
);
8832 tmp2
= load_reg(s
, rd
);
8833 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, i
);
8834 tcg_temp_free_i32(tmp2
);
8836 store_reg(s
, rd
, tmp
);
8838 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8839 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
8841 tmp
= load_reg(s
, rm
);
8842 shift
= (insn
>> 7) & 0x1f;
8843 i
= ((insn
>> 16) & 0x1f) + 1;
8848 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
8850 gen_sbfx(tmp
, shift
, i
);
8853 store_reg(s
, rd
, tmp
);
8863 /* Check for undefined extension instructions
8864 * per the ARM Bible IE:
8865 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8867 sh
= (0xf << 20) | (0xf << 4);
8868 if (op1
== 0x7 && ((insn
& sh
) == sh
))
8872 /* load/store byte/word */
8873 rn
= (insn
>> 16) & 0xf;
8874 rd
= (insn
>> 12) & 0xf;
8875 tmp2
= load_reg(s
, rn
);
8876 if ((insn
& 0x01200000) == 0x00200000) {
8878 i
= get_a32_user_mem_index(s
);
8880 i
= get_mem_index(s
);
8882 if (insn
& (1 << 24))
8883 gen_add_data_offset(s
, insn
, tmp2
);
8884 if (insn
& (1 << 20)) {
8886 tmp
= tcg_temp_new_i32();
8887 if (insn
& (1 << 22)) {
8888 gen_aa32_ld8u(tmp
, tmp2
, i
);
8890 gen_aa32_ld32u(tmp
, tmp2
, i
);
8894 tmp
= load_reg(s
, rd
);
8895 if (insn
& (1 << 22)) {
8896 gen_aa32_st8(tmp
, tmp2
, i
);
8898 gen_aa32_st32(tmp
, tmp2
, i
);
8900 tcg_temp_free_i32(tmp
);
8902 if (!(insn
& (1 << 24))) {
8903 gen_add_data_offset(s
, insn
, tmp2
);
8904 store_reg(s
, rn
, tmp2
);
8905 } else if (insn
& (1 << 21)) {
8906 store_reg(s
, rn
, tmp2
);
8908 tcg_temp_free_i32(tmp2
);
8910 if (insn
& (1 << 20)) {
8911 /* Complete the load. */
8912 store_reg_from_load(s
, rd
, tmp
);
8918 int j
, n
, loaded_base
;
8919 bool exc_return
= false;
8920 bool is_load
= extract32(insn
, 20, 1);
8922 TCGv_i32 loaded_var
;
8923 /* load/store multiple words */
8924 /* XXX: store correct base if write back */
8925 if (insn
& (1 << 22)) {
8926 /* LDM (user), LDM (exception return) and STM (user) */
8928 goto illegal_op
; /* only usable in supervisor mode */
8930 if (is_load
&& extract32(insn
, 15, 1)) {
8936 rn
= (insn
>> 16) & 0xf;
8937 addr
= load_reg(s
, rn
);
8939 /* compute total size */
8941 TCGV_UNUSED_I32(loaded_var
);
8944 if (insn
& (1 << i
))
8947 /* XXX: test invalid n == 0 case ? */
8948 if (insn
& (1 << 23)) {
8949 if (insn
& (1 << 24)) {
8951 tcg_gen_addi_i32(addr
, addr
, 4);
8953 /* post increment */
8956 if (insn
& (1 << 24)) {
8958 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
8960 /* post decrement */
8962 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
8967 if (insn
& (1 << i
)) {
8970 tmp
= tcg_temp_new_i32();
8971 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
8973 tmp2
= tcg_const_i32(i
);
8974 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
8975 tcg_temp_free_i32(tmp2
);
8976 tcg_temp_free_i32(tmp
);
8977 } else if (i
== rn
) {
8981 store_reg_from_load(s
, i
, tmp
);
8986 /* special case: r15 = PC + 8 */
8987 val
= (long)s
->pc
+ 4;
8988 tmp
= tcg_temp_new_i32();
8989 tcg_gen_movi_i32(tmp
, val
);
8991 tmp
= tcg_temp_new_i32();
8992 tmp2
= tcg_const_i32(i
);
8993 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
8994 tcg_temp_free_i32(tmp2
);
8996 tmp
= load_reg(s
, i
);
8998 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8999 tcg_temp_free_i32(tmp
);
9002 /* no need to add after the last transfer */
9004 tcg_gen_addi_i32(addr
, addr
, 4);
9007 if (insn
& (1 << 21)) {
9009 if (insn
& (1 << 23)) {
9010 if (insn
& (1 << 24)) {
9013 /* post increment */
9014 tcg_gen_addi_i32(addr
, addr
, 4);
9017 if (insn
& (1 << 24)) {
9020 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
9022 /* post decrement */
9023 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
9026 store_reg(s
, rn
, addr
);
9028 tcg_temp_free_i32(addr
);
9031 store_reg(s
, rn
, loaded_var
);
9034 /* Restore CPSR from SPSR. */
9035 tmp
= load_cpu_field(spsr
);
9036 gen_set_cpsr(tmp
, CPSR_ERET_MASK
);
9037 tcg_temp_free_i32(tmp
);
9038 s
->is_jmp
= DISAS_JUMP
;
9047 /* branch (and link) */
9048 val
= (int32_t)s
->pc
;
9049 if (insn
& (1 << 24)) {
9050 tmp
= tcg_temp_new_i32();
9051 tcg_gen_movi_i32(tmp
, val
);
9052 store_reg(s
, 14, tmp
);
9054 offset
= sextract32(insn
<< 2, 0, 26);
9062 if (((insn
>> 8) & 0xe) == 10) {
9064 if (disas_vfp_insn(s
, insn
)) {
9067 } else if (disas_coproc_insn(s
, insn
)) {
9074 gen_set_pc_im(s
, s
->pc
);
9075 s
->svc_imm
= extract32(insn
, 0, 24);
9076 s
->is_jmp
= DISAS_SWI
;
9080 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
9081 default_exception_el(s
));
9087 /* Return true if this is a Thumb-2 logical op. */
9089 thumb2_logic_op(int op
)
9094 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9095 then set condition code flags based on the result of the operation.
9096 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9097 to the high bit of T1.
9098 Returns zero if the opcode is valid. */
9101 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
,
9102 TCGv_i32 t0
, TCGv_i32 t1
)
9109 tcg_gen_and_i32(t0
, t0
, t1
);
9113 tcg_gen_andc_i32(t0
, t0
, t1
);
9117 tcg_gen_or_i32(t0
, t0
, t1
);
9121 tcg_gen_orc_i32(t0
, t0
, t1
);
9125 tcg_gen_xor_i32(t0
, t0
, t1
);
9130 gen_add_CC(t0
, t0
, t1
);
9132 tcg_gen_add_i32(t0
, t0
, t1
);
9136 gen_adc_CC(t0
, t0
, t1
);
9142 gen_sbc_CC(t0
, t0
, t1
);
9144 gen_sub_carry(t0
, t0
, t1
);
9149 gen_sub_CC(t0
, t0
, t1
);
9151 tcg_gen_sub_i32(t0
, t0
, t1
);
9155 gen_sub_CC(t0
, t1
, t0
);
9157 tcg_gen_sub_i32(t0
, t1
, t0
);
9159 default: /* 5, 6, 7, 9, 12, 15. */
9165 gen_set_CF_bit31(t1
);
9170 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9172 static int disas_thumb2_insn(CPUARMState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
9174 uint32_t insn
, imm
, shift
, offset
;
9175 uint32_t rd
, rn
, rm
, rs
;
9186 if (!(arm_dc_feature(s
, ARM_FEATURE_THUMB2
)
9187 || arm_dc_feature(s
, ARM_FEATURE_M
))) {
9188 /* Thumb-1 cores may need to treat bl and blx as a pair of
9189 16-bit instructions to get correct prefetch abort behavior. */
9191 if ((insn
& (1 << 12)) == 0) {
9193 /* Second half of blx. */
9194 offset
= ((insn
& 0x7ff) << 1);
9195 tmp
= load_reg(s
, 14);
9196 tcg_gen_addi_i32(tmp
, tmp
, offset
);
9197 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
9199 tmp2
= tcg_temp_new_i32();
9200 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
9201 store_reg(s
, 14, tmp2
);
9205 if (insn
& (1 << 11)) {
9206 /* Second half of bl. */
9207 offset
= ((insn
& 0x7ff) << 1) | 1;
9208 tmp
= load_reg(s
, 14);
9209 tcg_gen_addi_i32(tmp
, tmp
, offset
);
9211 tmp2
= tcg_temp_new_i32();
9212 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
9213 store_reg(s
, 14, tmp2
);
9217 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
9218 /* Instruction spans a page boundary. Implement it as two
9219 16-bit instructions in case the second half causes an
9221 offset
= ((int32_t)insn
<< 21) >> 9;
9222 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
9225 /* Fall through to 32-bit decode. */
9228 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
9230 insn
|= (uint32_t)insn_hw1
<< 16;
9232 if ((insn
& 0xf800e800) != 0xf000e800) {
9236 rn
= (insn
>> 16) & 0xf;
9237 rs
= (insn
>> 12) & 0xf;
9238 rd
= (insn
>> 8) & 0xf;
9240 switch ((insn
>> 25) & 0xf) {
9241 case 0: case 1: case 2: case 3:
9242 /* 16-bit instructions. Should never happen. */
9245 if (insn
& (1 << 22)) {
9246 /* Other load/store, table branch. */
9247 if (insn
& 0x01200000) {
9248 /* Load/store doubleword. */
9250 addr
= tcg_temp_new_i32();
9251 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
9253 addr
= load_reg(s
, rn
);
9255 offset
= (insn
& 0xff) * 4;
9256 if ((insn
& (1 << 23)) == 0)
9258 if (insn
& (1 << 24)) {
9259 tcg_gen_addi_i32(addr
, addr
, offset
);
9262 if (insn
& (1 << 20)) {
9264 tmp
= tcg_temp_new_i32();
9265 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9266 store_reg(s
, rs
, tmp
);
9267 tcg_gen_addi_i32(addr
, addr
, 4);
9268 tmp
= tcg_temp_new_i32();
9269 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9270 store_reg(s
, rd
, tmp
);
9273 tmp
= load_reg(s
, rs
);
9274 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
9275 tcg_temp_free_i32(tmp
);
9276 tcg_gen_addi_i32(addr
, addr
, 4);
9277 tmp
= load_reg(s
, rd
);
9278 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
9279 tcg_temp_free_i32(tmp
);
9281 if (insn
& (1 << 21)) {
9282 /* Base writeback. */
9285 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
9286 store_reg(s
, rn
, addr
);
9288 tcg_temp_free_i32(addr
);
9290 } else if ((insn
& (1 << 23)) == 0) {
9291 /* Load/store exclusive word. */
9292 addr
= tcg_temp_local_new_i32();
9293 load_reg_var(s
, addr
, rn
);
9294 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
9295 if (insn
& (1 << 20)) {
9296 gen_load_exclusive(s
, rs
, 15, addr
, 2);
9298 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
9300 tcg_temp_free_i32(addr
);
9301 } else if ((insn
& (7 << 5)) == 0) {
9304 addr
= tcg_temp_new_i32();
9305 tcg_gen_movi_i32(addr
, s
->pc
);
9307 addr
= load_reg(s
, rn
);
9309 tmp
= load_reg(s
, rm
);
9310 tcg_gen_add_i32(addr
, addr
, tmp
);
9311 if (insn
& (1 << 4)) {
9313 tcg_gen_add_i32(addr
, addr
, tmp
);
9314 tcg_temp_free_i32(tmp
);
9315 tmp
= tcg_temp_new_i32();
9316 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
9318 tcg_temp_free_i32(tmp
);
9319 tmp
= tcg_temp_new_i32();
9320 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
9322 tcg_temp_free_i32(addr
);
9323 tcg_gen_shli_i32(tmp
, tmp
, 1);
9324 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
9325 store_reg(s
, 15, tmp
);
9327 int op2
= (insn
>> 6) & 0x3;
9328 op
= (insn
>> 4) & 0x3;
9333 /* Load/store exclusive byte/halfword/doubleword */
9340 /* Load-acquire/store-release */
9346 /* Load-acquire/store-release exclusive */
9350 addr
= tcg_temp_local_new_i32();
9351 load_reg_var(s
, addr
, rn
);
9353 if (insn
& (1 << 20)) {
9354 tmp
= tcg_temp_new_i32();
9357 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
9360 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
9363 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9368 store_reg(s
, rs
, tmp
);
9370 tmp
= load_reg(s
, rs
);
9373 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
9376 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
9379 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
9384 tcg_temp_free_i32(tmp
);
9386 } else if (insn
& (1 << 20)) {
9387 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
9389 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
9391 tcg_temp_free_i32(addr
);
9394 /* Load/store multiple, RFE, SRS. */
9395 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
9396 /* RFE, SRS: not available in user mode or on M profile */
9397 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
9400 if (insn
& (1 << 20)) {
9402 addr
= load_reg(s
, rn
);
9403 if ((insn
& (1 << 24)) == 0)
9404 tcg_gen_addi_i32(addr
, addr
, -8);
9405 /* Load PC into tmp and CPSR into tmp2. */
9406 tmp
= tcg_temp_new_i32();
9407 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9408 tcg_gen_addi_i32(addr
, addr
, 4);
9409 tmp2
= tcg_temp_new_i32();
9410 gen_aa32_ld32u(tmp2
, addr
, get_mem_index(s
));
9411 if (insn
& (1 << 21)) {
9412 /* Base writeback. */
9413 if (insn
& (1 << 24)) {
9414 tcg_gen_addi_i32(addr
, addr
, 4);
9416 tcg_gen_addi_i32(addr
, addr
, -4);
9418 store_reg(s
, rn
, addr
);
9420 tcg_temp_free_i32(addr
);
9422 gen_rfe(s
, tmp
, tmp2
);
9425 gen_srs(s
, (insn
& 0x1f), (insn
& (1 << 24)) ? 1 : 2,
9429 int i
, loaded_base
= 0;
9430 TCGv_i32 loaded_var
;
9431 /* Load/store multiple. */
9432 addr
= load_reg(s
, rn
);
9434 for (i
= 0; i
< 16; i
++) {
9435 if (insn
& (1 << i
))
9438 if (insn
& (1 << 24)) {
9439 tcg_gen_addi_i32(addr
, addr
, -offset
);
9442 TCGV_UNUSED_I32(loaded_var
);
9443 for (i
= 0; i
< 16; i
++) {
9444 if ((insn
& (1 << i
)) == 0)
9446 if (insn
& (1 << 20)) {
9448 tmp
= tcg_temp_new_i32();
9449 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9452 } else if (i
== rn
) {
9456 store_reg(s
, i
, tmp
);
9460 tmp
= load_reg(s
, i
);
9461 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
9462 tcg_temp_free_i32(tmp
);
9464 tcg_gen_addi_i32(addr
, addr
, 4);
9467 store_reg(s
, rn
, loaded_var
);
9469 if (insn
& (1 << 21)) {
9470 /* Base register writeback. */
9471 if (insn
& (1 << 24)) {
9472 tcg_gen_addi_i32(addr
, addr
, -offset
);
9474 /* Fault if writeback register is in register list. */
9475 if (insn
& (1 << rn
))
9477 store_reg(s
, rn
, addr
);
9479 tcg_temp_free_i32(addr
);
9486 op
= (insn
>> 21) & 0xf;
9488 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9491 /* Halfword pack. */
9492 tmp
= load_reg(s
, rn
);
9493 tmp2
= load_reg(s
, rm
);
9494 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
9495 if (insn
& (1 << 5)) {
9499 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
9500 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
9501 tcg_gen_ext16u_i32(tmp2
, tmp2
);
9505 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
9506 tcg_gen_ext16u_i32(tmp
, tmp
);
9507 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
9509 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9510 tcg_temp_free_i32(tmp2
);
9511 store_reg(s
, rd
, tmp
);
9513 /* Data processing register constant shift. */
9515 tmp
= tcg_temp_new_i32();
9516 tcg_gen_movi_i32(tmp
, 0);
9518 tmp
= load_reg(s
, rn
);
9520 tmp2
= load_reg(s
, rm
);
9522 shiftop
= (insn
>> 4) & 3;
9523 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
9524 conds
= (insn
& (1 << 20)) != 0;
9525 logic_cc
= (conds
&& thumb2_logic_op(op
));
9526 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
9527 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
9529 tcg_temp_free_i32(tmp2
);
9531 store_reg(s
, rd
, tmp
);
9533 tcg_temp_free_i32(tmp
);
9537 case 13: /* Misc data processing. */
9538 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
9539 if (op
< 4 && (insn
& 0xf000) != 0xf000)
9542 case 0: /* Register controlled shift. */
9543 tmp
= load_reg(s
, rn
);
9544 tmp2
= load_reg(s
, rm
);
9545 if ((insn
& 0x70) != 0)
9547 op
= (insn
>> 21) & 3;
9548 logic_cc
= (insn
& (1 << 20)) != 0;
9549 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
9552 store_reg_bx(s
, rd
, tmp
);
9554 case 1: /* Sign/zero extend. */
9555 op
= (insn
>> 20) & 7;
9557 case 0: /* SXTAH, SXTH */
9558 case 1: /* UXTAH, UXTH */
9559 case 4: /* SXTAB, SXTB */
9560 case 5: /* UXTAB, UXTB */
9562 case 2: /* SXTAB16, SXTB16 */
9563 case 3: /* UXTAB16, UXTB16 */
9564 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9572 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9576 tmp
= load_reg(s
, rm
);
9577 shift
= (insn
>> 4) & 3;
9578 /* ??? In many cases it's not necessary to do a
9579 rotate, a shift is sufficient. */
9581 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
9582 op
= (insn
>> 20) & 7;
9584 case 0: gen_sxth(tmp
); break;
9585 case 1: gen_uxth(tmp
); break;
9586 case 2: gen_sxtb16(tmp
); break;
9587 case 3: gen_uxtb16(tmp
); break;
9588 case 4: gen_sxtb(tmp
); break;
9589 case 5: gen_uxtb(tmp
); break;
9591 g_assert_not_reached();
9594 tmp2
= load_reg(s
, rn
);
9595 if ((op
>> 1) == 1) {
9596 gen_add16(tmp
, tmp2
);
9598 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9599 tcg_temp_free_i32(tmp2
);
9602 store_reg(s
, rd
, tmp
);
9604 case 2: /* SIMD add/subtract. */
9605 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9608 op
= (insn
>> 20) & 7;
9609 shift
= (insn
>> 4) & 7;
9610 if ((op
& 3) == 3 || (shift
& 3) == 3)
9612 tmp
= load_reg(s
, rn
);
9613 tmp2
= load_reg(s
, rm
);
9614 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
9615 tcg_temp_free_i32(tmp2
);
9616 store_reg(s
, rd
, tmp
);
9618 case 3: /* Other data processing. */
9619 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
9621 /* Saturating add/subtract. */
9622 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9625 tmp
= load_reg(s
, rn
);
9626 tmp2
= load_reg(s
, rm
);
9628 gen_helper_double_saturate(tmp
, cpu_env
, tmp
);
9630 gen_helper_sub_saturate(tmp
, cpu_env
, tmp2
, tmp
);
9632 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
9633 tcg_temp_free_i32(tmp2
);
9636 case 0x0a: /* rbit */
9637 case 0x08: /* rev */
9638 case 0x09: /* rev16 */
9639 case 0x0b: /* revsh */
9640 case 0x18: /* clz */
9642 case 0x10: /* sel */
9643 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9647 case 0x20: /* crc32/crc32c */
9653 if (!arm_dc_feature(s
, ARM_FEATURE_CRC
)) {
9660 tmp
= load_reg(s
, rn
);
9662 case 0x0a: /* rbit */
9663 gen_helper_rbit(tmp
, tmp
);
9665 case 0x08: /* rev */
9666 tcg_gen_bswap32_i32(tmp
, tmp
);
9668 case 0x09: /* rev16 */
9671 case 0x0b: /* revsh */
9674 case 0x10: /* sel */
9675 tmp2
= load_reg(s
, rm
);
9676 tmp3
= tcg_temp_new_i32();
9677 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
9678 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
9679 tcg_temp_free_i32(tmp3
);
9680 tcg_temp_free_i32(tmp2
);
9682 case 0x18: /* clz */
9683 gen_helper_clz(tmp
, tmp
);
9693 uint32_t sz
= op
& 0x3;
9694 uint32_t c
= op
& 0x8;
9696 tmp2
= load_reg(s
, rm
);
9698 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
9699 } else if (sz
== 1) {
9700 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
9702 tmp3
= tcg_const_i32(1 << sz
);
9704 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
9706 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
9708 tcg_temp_free_i32(tmp2
);
9709 tcg_temp_free_i32(tmp3
);
9713 g_assert_not_reached();
9716 store_reg(s
, rd
, tmp
);
9718 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9719 switch ((insn
>> 20) & 7) {
9720 case 0: /* 32 x 32 -> 32 */
9721 case 7: /* Unsigned sum of absolute differences. */
9723 case 1: /* 16 x 16 -> 32 */
9724 case 2: /* Dual multiply add. */
9725 case 3: /* 32 * 16 -> 32msb */
9726 case 4: /* Dual multiply subtract. */
9727 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9728 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9733 op
= (insn
>> 4) & 0xf;
9734 tmp
= load_reg(s
, rn
);
9735 tmp2
= load_reg(s
, rm
);
9736 switch ((insn
>> 20) & 7) {
9737 case 0: /* 32 x 32 -> 32 */
9738 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9739 tcg_temp_free_i32(tmp2
);
9741 tmp2
= load_reg(s
, rs
);
9743 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
9745 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9746 tcg_temp_free_i32(tmp2
);
9749 case 1: /* 16 x 16 -> 32 */
9750 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
9751 tcg_temp_free_i32(tmp2
);
9753 tmp2
= load_reg(s
, rs
);
9754 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9755 tcg_temp_free_i32(tmp2
);
9758 case 2: /* Dual multiply add. */
9759 case 4: /* Dual multiply subtract. */
9761 gen_swap_half(tmp2
);
9762 gen_smul_dual(tmp
, tmp2
);
9763 if (insn
& (1 << 22)) {
9764 /* This subtraction cannot overflow. */
9765 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9767 /* This addition cannot overflow 32 bits;
9768 * however it may overflow considered as a signed
9769 * operation, in which case we must set the Q flag.
9771 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9773 tcg_temp_free_i32(tmp2
);
9776 tmp2
= load_reg(s
, rs
);
9777 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9778 tcg_temp_free_i32(tmp2
);
9781 case 3: /* 32 * 16 -> 32msb */
9783 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
9786 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9787 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
9788 tmp
= tcg_temp_new_i32();
9789 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
9790 tcg_temp_free_i64(tmp64
);
9793 tmp2
= load_reg(s
, rs
);
9794 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9795 tcg_temp_free_i32(tmp2
);
9798 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9799 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9801 tmp
= load_reg(s
, rs
);
9802 if (insn
& (1 << 20)) {
9803 tmp64
= gen_addq_msw(tmp64
, tmp
);
9805 tmp64
= gen_subq_msw(tmp64
, tmp
);
9808 if (insn
& (1 << 4)) {
9809 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
9811 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
9812 tmp
= tcg_temp_new_i32();
9813 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
9814 tcg_temp_free_i64(tmp64
);
9816 case 7: /* Unsigned sum of absolute differences. */
9817 gen_helper_usad8(tmp
, tmp
, tmp2
);
9818 tcg_temp_free_i32(tmp2
);
9820 tmp2
= load_reg(s
, rs
);
9821 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9822 tcg_temp_free_i32(tmp2
);
9826 store_reg(s
, rd
, tmp
);
9828 case 6: case 7: /* 64-bit multiply, Divide. */
9829 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
9830 tmp
= load_reg(s
, rn
);
9831 tmp2
= load_reg(s
, rm
);
9832 if ((op
& 0x50) == 0x10) {
9834 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DIV
)) {
9838 gen_helper_udiv(tmp
, tmp
, tmp2
);
9840 gen_helper_sdiv(tmp
, tmp
, tmp2
);
9841 tcg_temp_free_i32(tmp2
);
9842 store_reg(s
, rd
, tmp
);
9843 } else if ((op
& 0xe) == 0xc) {
9844 /* Dual multiply accumulate long. */
9845 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9846 tcg_temp_free_i32(tmp
);
9847 tcg_temp_free_i32(tmp2
);
9851 gen_swap_half(tmp2
);
9852 gen_smul_dual(tmp
, tmp2
);
9854 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9856 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9858 tcg_temp_free_i32(tmp2
);
9860 tmp64
= tcg_temp_new_i64();
9861 tcg_gen_ext_i32_i64(tmp64
, tmp
);
9862 tcg_temp_free_i32(tmp
);
9863 gen_addq(s
, tmp64
, rs
, rd
);
9864 gen_storeq_reg(s
, rs
, rd
, tmp64
);
9865 tcg_temp_free_i64(tmp64
);
9868 /* Unsigned 64-bit multiply */
9869 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
9873 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9874 tcg_temp_free_i32(tmp2
);
9875 tcg_temp_free_i32(tmp
);
9878 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
9879 tcg_temp_free_i32(tmp2
);
9880 tmp64
= tcg_temp_new_i64();
9881 tcg_gen_ext_i32_i64(tmp64
, tmp
);
9882 tcg_temp_free_i32(tmp
);
9884 /* Signed 64-bit multiply */
9885 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9890 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9891 tcg_temp_free_i64(tmp64
);
9894 gen_addq_lo(s
, tmp64
, rs
);
9895 gen_addq_lo(s
, tmp64
, rd
);
9896 } else if (op
& 0x40) {
9897 /* 64-bit accumulate. */
9898 gen_addq(s
, tmp64
, rs
, rd
);
9900 gen_storeq_reg(s
, rs
, rd
, tmp64
);
9901 tcg_temp_free_i64(tmp64
);
9906 case 6: case 7: case 14: case 15:
9908 if (((insn
>> 24) & 3) == 3) {
9909 /* Translate into the equivalent ARM encoding. */
9910 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
9911 if (disas_neon_data_insn(s
, insn
)) {
9914 } else if (((insn
>> 8) & 0xe) == 10) {
9915 if (disas_vfp_insn(s
, insn
)) {
9919 if (insn
& (1 << 28))
9921 if (disas_coproc_insn(s
, insn
)) {
9926 case 8: case 9: case 10: case 11:
9927 if (insn
& (1 << 15)) {
9928 /* Branches, misc control. */
9929 if (insn
& 0x5000) {
9930 /* Unconditional branch. */
9931 /* signextend(hw1[10:0]) -> offset[:12]. */
9932 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
9933 /* hw1[10:0] -> offset[11:1]. */
9934 offset
|= (insn
& 0x7ff) << 1;
9935 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9936 offset[24:22] already have the same value because of the
9937 sign extension above. */
9938 offset
^= ((~insn
) & (1 << 13)) << 10;
9939 offset
^= ((~insn
) & (1 << 11)) << 11;
9941 if (insn
& (1 << 14)) {
9942 /* Branch and link. */
9943 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
9947 if (insn
& (1 << 12)) {
9952 offset
&= ~(uint32_t)2;
9953 /* thumb2 bx, no need to check */
9954 gen_bx_im(s
, offset
);
9956 } else if (((insn
>> 23) & 7) == 7) {
9958 if (insn
& (1 << 13))
9961 if (insn
& (1 << 26)) {
9962 if (!(insn
& (1 << 20))) {
9963 /* Hypervisor call (v7) */
9964 int imm16
= extract32(insn
, 16, 4) << 12
9965 | extract32(insn
, 0, 12);
9972 /* Secure monitor call (v6+) */
9980 op
= (insn
>> 20) & 7;
9982 case 0: /* msr cpsr. */
9983 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
9984 tmp
= load_reg(s
, rn
);
9985 addr
= tcg_const_i32(insn
& 0xff);
9986 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9987 tcg_temp_free_i32(addr
);
9988 tcg_temp_free_i32(tmp
);
9993 case 1: /* msr spsr. */
9994 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
9997 tmp
= load_reg(s
, rn
);
9999 msr_mask(s
, (insn
>> 8) & 0xf, op
== 1),
10003 case 2: /* cps, nop-hint. */
10004 if (((insn
>> 8) & 7) == 0) {
10005 gen_nop_hint(s
, insn
& 0xff);
10007 /* Implemented as NOP in user mode. */
10012 if (insn
& (1 << 10)) {
10013 if (insn
& (1 << 7))
10015 if (insn
& (1 << 6))
10017 if (insn
& (1 << 5))
10019 if (insn
& (1 << 9))
10020 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
10022 if (insn
& (1 << 8)) {
10024 imm
|= (insn
& 0x1f);
10027 gen_set_psr_im(s
, offset
, 0, imm
);
10030 case 3: /* Special control operations. */
10032 op
= (insn
>> 4) & 0xf;
10034 case 2: /* clrex */
10039 /* These execute as NOPs. */
10042 /* We need to break the TB after this insn
10043 * to execute self-modifying code correctly
10044 * and also to take any pending interrupts
10054 /* Trivial implementation equivalent to bx. */
10055 tmp
= load_reg(s
, rn
);
10058 case 5: /* Exception return. */
10062 if (rn
!= 14 || rd
!= 15) {
10065 tmp
= load_reg(s
, rn
);
10066 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
10067 gen_exception_return(s
, tmp
);
10069 case 6: /* mrs cpsr. */
10070 tmp
= tcg_temp_new_i32();
10071 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10072 addr
= tcg_const_i32(insn
& 0xff);
10073 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
10074 tcg_temp_free_i32(addr
);
10076 gen_helper_cpsr_read(tmp
, cpu_env
);
10078 store_reg(s
, rd
, tmp
);
10080 case 7: /* mrs spsr. */
10081 /* Not accessible in user mode. */
10082 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
10085 tmp
= load_cpu_field(spsr
);
10086 store_reg(s
, rd
, tmp
);
10091 /* Conditional branch. */
10092 op
= (insn
>> 22) & 0xf;
10093 /* Generate a conditional jump to next instruction. */
10094 s
->condlabel
= gen_new_label();
10095 arm_gen_test_cc(op
^ 1, s
->condlabel
);
10098 /* offset[11:1] = insn[10:0] */
10099 offset
= (insn
& 0x7ff) << 1;
10100 /* offset[17:12] = insn[21:16]. */
10101 offset
|= (insn
& 0x003f0000) >> 4;
10102 /* offset[31:20] = insn[26]. */
10103 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
10104 /* offset[18] = insn[13]. */
10105 offset
|= (insn
& (1 << 13)) << 5;
10106 /* offset[19] = insn[11]. */
10107 offset
|= (insn
& (1 << 11)) << 8;
10109 /* jump to the offset */
10110 gen_jmp(s
, s
->pc
+ offset
);
10113 /* Data processing immediate. */
10114 if (insn
& (1 << 25)) {
10115 if (insn
& (1 << 24)) {
10116 if (insn
& (1 << 20))
10118 /* Bitfield/Saturate. */
10119 op
= (insn
>> 21) & 7;
10121 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
10123 tmp
= tcg_temp_new_i32();
10124 tcg_gen_movi_i32(tmp
, 0);
10126 tmp
= load_reg(s
, rn
);
10129 case 2: /* Signed bitfield extract. */
10131 if (shift
+ imm
> 32)
10134 gen_sbfx(tmp
, shift
, imm
);
10136 case 6: /* Unsigned bitfield extract. */
10138 if (shift
+ imm
> 32)
10141 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
10143 case 3: /* Bitfield insert/clear. */
10146 imm
= imm
+ 1 - shift
;
10148 tmp2
= load_reg(s
, rd
);
10149 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, imm
);
10150 tcg_temp_free_i32(tmp2
);
10155 default: /* Saturate. */
10158 tcg_gen_sari_i32(tmp
, tmp
, shift
);
10160 tcg_gen_shli_i32(tmp
, tmp
, shift
);
10162 tmp2
= tcg_const_i32(imm
);
10165 if ((op
& 1) && shift
== 0) {
10166 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10167 tcg_temp_free_i32(tmp
);
10168 tcg_temp_free_i32(tmp2
);
10171 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
10173 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
10177 if ((op
& 1) && shift
== 0) {
10178 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10179 tcg_temp_free_i32(tmp
);
10180 tcg_temp_free_i32(tmp2
);
10183 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
10185 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
10188 tcg_temp_free_i32(tmp2
);
10191 store_reg(s
, rd
, tmp
);
10193 imm
= ((insn
& 0x04000000) >> 15)
10194 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
10195 if (insn
& (1 << 22)) {
10196 /* 16-bit immediate. */
10197 imm
|= (insn
>> 4) & 0xf000;
10198 if (insn
& (1 << 23)) {
10200 tmp
= load_reg(s
, rd
);
10201 tcg_gen_ext16u_i32(tmp
, tmp
);
10202 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
10205 tmp
= tcg_temp_new_i32();
10206 tcg_gen_movi_i32(tmp
, imm
);
10209 /* Add/sub 12-bit immediate. */
10211 offset
= s
->pc
& ~(uint32_t)3;
10212 if (insn
& (1 << 23))
10216 tmp
= tcg_temp_new_i32();
10217 tcg_gen_movi_i32(tmp
, offset
);
10219 tmp
= load_reg(s
, rn
);
10220 if (insn
& (1 << 23))
10221 tcg_gen_subi_i32(tmp
, tmp
, imm
);
10223 tcg_gen_addi_i32(tmp
, tmp
, imm
);
10226 store_reg(s
, rd
, tmp
);
10229 int shifter_out
= 0;
10230 /* modified 12-bit immediate. */
10231 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
10232 imm
= (insn
& 0xff);
10235 /* Nothing to do. */
10237 case 1: /* 00XY00XY */
10240 case 2: /* XY00XY00 */
10244 case 3: /* XYXYXYXY */
10248 default: /* Rotated constant. */
10249 shift
= (shift
<< 1) | (imm
>> 7);
10251 imm
= imm
<< (32 - shift
);
10255 tmp2
= tcg_temp_new_i32();
10256 tcg_gen_movi_i32(tmp2
, imm
);
10257 rn
= (insn
>> 16) & 0xf;
10259 tmp
= tcg_temp_new_i32();
10260 tcg_gen_movi_i32(tmp
, 0);
10262 tmp
= load_reg(s
, rn
);
10264 op
= (insn
>> 21) & 0xf;
10265 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
10266 shifter_out
, tmp
, tmp2
))
10268 tcg_temp_free_i32(tmp2
);
10269 rd
= (insn
>> 8) & 0xf;
10271 store_reg(s
, rd
, tmp
);
10273 tcg_temp_free_i32(tmp
);
10278 case 12: /* Load/store single data item. */
10283 if ((insn
& 0x01100000) == 0x01000000) {
10284 if (disas_neon_ls_insn(s
, insn
)) {
10289 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
10291 if (!(insn
& (1 << 20))) {
10295 /* Byte or halfword load space with dest == r15 : memory hints.
10296 * Catch them early so we don't emit pointless addressing code.
10297 * This space is a mix of:
10298 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10299 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10301 * unallocated hints, which must be treated as NOPs
10302 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10303 * which is easiest for the decoding logic
10304 * Some space which must UNDEF
10306 int op1
= (insn
>> 23) & 3;
10307 int op2
= (insn
>> 6) & 0x3f;
10312 /* UNPREDICTABLE, unallocated hint or
10313 * PLD/PLDW/PLI (literal)
10318 return 0; /* PLD/PLDW/PLI or unallocated hint */
10320 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
10321 return 0; /* PLD/PLDW/PLI or unallocated hint */
10323 /* UNDEF space, or an UNPREDICTABLE */
10327 memidx
= get_mem_index(s
);
10329 addr
= tcg_temp_new_i32();
10331 /* s->pc has already been incremented by 4. */
10332 imm
= s
->pc
& 0xfffffffc;
10333 if (insn
& (1 << 23))
10334 imm
+= insn
& 0xfff;
10336 imm
-= insn
& 0xfff;
10337 tcg_gen_movi_i32(addr
, imm
);
10339 addr
= load_reg(s
, rn
);
10340 if (insn
& (1 << 23)) {
10341 /* Positive offset. */
10342 imm
= insn
& 0xfff;
10343 tcg_gen_addi_i32(addr
, addr
, imm
);
10346 switch ((insn
>> 8) & 0xf) {
10347 case 0x0: /* Shifted Register. */
10348 shift
= (insn
>> 4) & 0xf;
10350 tcg_temp_free_i32(addr
);
10353 tmp
= load_reg(s
, rm
);
10355 tcg_gen_shli_i32(tmp
, tmp
, shift
);
10356 tcg_gen_add_i32(addr
, addr
, tmp
);
10357 tcg_temp_free_i32(tmp
);
10359 case 0xc: /* Negative offset. */
10360 tcg_gen_addi_i32(addr
, addr
, -imm
);
10362 case 0xe: /* User privilege. */
10363 tcg_gen_addi_i32(addr
, addr
, imm
);
10364 memidx
= get_a32_user_mem_index(s
);
10366 case 0x9: /* Post-decrement. */
10368 /* Fall through. */
10369 case 0xb: /* Post-increment. */
10373 case 0xd: /* Pre-decrement. */
10375 /* Fall through. */
10376 case 0xf: /* Pre-increment. */
10377 tcg_gen_addi_i32(addr
, addr
, imm
);
10381 tcg_temp_free_i32(addr
);
10386 if (insn
& (1 << 20)) {
10388 tmp
= tcg_temp_new_i32();
10391 gen_aa32_ld8u(tmp
, addr
, memidx
);
10394 gen_aa32_ld8s(tmp
, addr
, memidx
);
10397 gen_aa32_ld16u(tmp
, addr
, memidx
);
10400 gen_aa32_ld16s(tmp
, addr
, memidx
);
10403 gen_aa32_ld32u(tmp
, addr
, memidx
);
10406 tcg_temp_free_i32(tmp
);
10407 tcg_temp_free_i32(addr
);
10413 store_reg(s
, rs
, tmp
);
10417 tmp
= load_reg(s
, rs
);
10420 gen_aa32_st8(tmp
, addr
, memidx
);
10423 gen_aa32_st16(tmp
, addr
, memidx
);
10426 gen_aa32_st32(tmp
, addr
, memidx
);
10429 tcg_temp_free_i32(tmp
);
10430 tcg_temp_free_i32(addr
);
10433 tcg_temp_free_i32(tmp
);
10436 tcg_gen_addi_i32(addr
, addr
, imm
);
10438 store_reg(s
, rn
, addr
);
10440 tcg_temp_free_i32(addr
);
10452 static void disas_thumb_insn(CPUARMState
*env
, DisasContext
*s
)
10454 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
10461 if (s
->condexec_mask
) {
10462 cond
= s
->condexec_cond
;
10463 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
10464 s
->condlabel
= gen_new_label();
10465 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
10470 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
10473 switch (insn
>> 12) {
10477 op
= (insn
>> 11) & 3;
10480 rn
= (insn
>> 3) & 7;
10481 tmp
= load_reg(s
, rn
);
10482 if (insn
& (1 << 10)) {
10484 tmp2
= tcg_temp_new_i32();
10485 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
10488 rm
= (insn
>> 6) & 7;
10489 tmp2
= load_reg(s
, rm
);
10491 if (insn
& (1 << 9)) {
10492 if (s
->condexec_mask
)
10493 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10495 gen_sub_CC(tmp
, tmp
, tmp2
);
10497 if (s
->condexec_mask
)
10498 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10500 gen_add_CC(tmp
, tmp
, tmp2
);
10502 tcg_temp_free_i32(tmp2
);
10503 store_reg(s
, rd
, tmp
);
10505 /* shift immediate */
10506 rm
= (insn
>> 3) & 7;
10507 shift
= (insn
>> 6) & 0x1f;
10508 tmp
= load_reg(s
, rm
);
10509 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
10510 if (!s
->condexec_mask
)
10512 store_reg(s
, rd
, tmp
);
10516 /* arithmetic large immediate */
10517 op
= (insn
>> 11) & 3;
10518 rd
= (insn
>> 8) & 0x7;
10519 if (op
== 0) { /* mov */
10520 tmp
= tcg_temp_new_i32();
10521 tcg_gen_movi_i32(tmp
, insn
& 0xff);
10522 if (!s
->condexec_mask
)
10524 store_reg(s
, rd
, tmp
);
10526 tmp
= load_reg(s
, rd
);
10527 tmp2
= tcg_temp_new_i32();
10528 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
10531 gen_sub_CC(tmp
, tmp
, tmp2
);
10532 tcg_temp_free_i32(tmp
);
10533 tcg_temp_free_i32(tmp2
);
10536 if (s
->condexec_mask
)
10537 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10539 gen_add_CC(tmp
, tmp
, tmp2
);
10540 tcg_temp_free_i32(tmp2
);
10541 store_reg(s
, rd
, tmp
);
10544 if (s
->condexec_mask
)
10545 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10547 gen_sub_CC(tmp
, tmp
, tmp2
);
10548 tcg_temp_free_i32(tmp2
);
10549 store_reg(s
, rd
, tmp
);
10555 if (insn
& (1 << 11)) {
10556 rd
= (insn
>> 8) & 7;
10557 /* load pc-relative. Bit 1 of PC is ignored. */
10558 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
10559 val
&= ~(uint32_t)2;
10560 addr
= tcg_temp_new_i32();
10561 tcg_gen_movi_i32(addr
, val
);
10562 tmp
= tcg_temp_new_i32();
10563 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10564 tcg_temp_free_i32(addr
);
10565 store_reg(s
, rd
, tmp
);
10568 if (insn
& (1 << 10)) {
10569 /* data processing extended or blx */
10570 rd
= (insn
& 7) | ((insn
>> 4) & 8);
10571 rm
= (insn
>> 3) & 0xf;
10572 op
= (insn
>> 8) & 3;
10575 tmp
= load_reg(s
, rd
);
10576 tmp2
= load_reg(s
, rm
);
10577 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10578 tcg_temp_free_i32(tmp2
);
10579 store_reg(s
, rd
, tmp
);
10582 tmp
= load_reg(s
, rd
);
10583 tmp2
= load_reg(s
, rm
);
10584 gen_sub_CC(tmp
, tmp
, tmp2
);
10585 tcg_temp_free_i32(tmp2
);
10586 tcg_temp_free_i32(tmp
);
10588 case 2: /* mov/cpy */
10589 tmp
= load_reg(s
, rm
);
10590 store_reg(s
, rd
, tmp
);
10592 case 3:/* branch [and link] exchange thumb register */
10593 tmp
= load_reg(s
, rm
);
10594 if (insn
& (1 << 7)) {
10596 val
= (uint32_t)s
->pc
| 1;
10597 tmp2
= tcg_temp_new_i32();
10598 tcg_gen_movi_i32(tmp2
, val
);
10599 store_reg(s
, 14, tmp2
);
10601 /* already thumb, no need to check */
10608 /* data processing register */
10610 rm
= (insn
>> 3) & 7;
10611 op
= (insn
>> 6) & 0xf;
10612 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
10613 /* the shift/rotate ops want the operands backwards */
10622 if (op
== 9) { /* neg */
10623 tmp
= tcg_temp_new_i32();
10624 tcg_gen_movi_i32(tmp
, 0);
10625 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
10626 tmp
= load_reg(s
, rd
);
10628 TCGV_UNUSED_I32(tmp
);
10631 tmp2
= load_reg(s
, rm
);
10633 case 0x0: /* and */
10634 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
10635 if (!s
->condexec_mask
)
10638 case 0x1: /* eor */
10639 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
10640 if (!s
->condexec_mask
)
10643 case 0x2: /* lsl */
10644 if (s
->condexec_mask
) {
10645 gen_shl(tmp2
, tmp2
, tmp
);
10647 gen_helper_shl_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10648 gen_logic_CC(tmp2
);
10651 case 0x3: /* lsr */
10652 if (s
->condexec_mask
) {
10653 gen_shr(tmp2
, tmp2
, tmp
);
10655 gen_helper_shr_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10656 gen_logic_CC(tmp2
);
10659 case 0x4: /* asr */
10660 if (s
->condexec_mask
) {
10661 gen_sar(tmp2
, tmp2
, tmp
);
10663 gen_helper_sar_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10664 gen_logic_CC(tmp2
);
10667 case 0x5: /* adc */
10668 if (s
->condexec_mask
) {
10669 gen_adc(tmp
, tmp2
);
10671 gen_adc_CC(tmp
, tmp
, tmp2
);
10674 case 0x6: /* sbc */
10675 if (s
->condexec_mask
) {
10676 gen_sub_carry(tmp
, tmp
, tmp2
);
10678 gen_sbc_CC(tmp
, tmp
, tmp2
);
10681 case 0x7: /* ror */
10682 if (s
->condexec_mask
) {
10683 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
10684 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
10686 gen_helper_ror_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10687 gen_logic_CC(tmp2
);
10690 case 0x8: /* tst */
10691 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
10695 case 0x9: /* neg */
10696 if (s
->condexec_mask
)
10697 tcg_gen_neg_i32(tmp
, tmp2
);
10699 gen_sub_CC(tmp
, tmp
, tmp2
);
10701 case 0xa: /* cmp */
10702 gen_sub_CC(tmp
, tmp
, tmp2
);
10705 case 0xb: /* cmn */
10706 gen_add_CC(tmp
, tmp
, tmp2
);
10709 case 0xc: /* orr */
10710 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
10711 if (!s
->condexec_mask
)
10714 case 0xd: /* mul */
10715 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
10716 if (!s
->condexec_mask
)
10719 case 0xe: /* bic */
10720 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
10721 if (!s
->condexec_mask
)
10724 case 0xf: /* mvn */
10725 tcg_gen_not_i32(tmp2
, tmp2
);
10726 if (!s
->condexec_mask
)
10727 gen_logic_CC(tmp2
);
10734 store_reg(s
, rm
, tmp2
);
10736 tcg_temp_free_i32(tmp
);
10738 store_reg(s
, rd
, tmp
);
10739 tcg_temp_free_i32(tmp2
);
10742 tcg_temp_free_i32(tmp
);
10743 tcg_temp_free_i32(tmp2
);
10748 /* load/store register offset. */
10750 rn
= (insn
>> 3) & 7;
10751 rm
= (insn
>> 6) & 7;
10752 op
= (insn
>> 9) & 7;
10753 addr
= load_reg(s
, rn
);
10754 tmp
= load_reg(s
, rm
);
10755 tcg_gen_add_i32(addr
, addr
, tmp
);
10756 tcg_temp_free_i32(tmp
);
10758 if (op
< 3) { /* store */
10759 tmp
= load_reg(s
, rd
);
10761 tmp
= tcg_temp_new_i32();
10766 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10769 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
10772 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
10774 case 3: /* ldrsb */
10775 gen_aa32_ld8s(tmp
, addr
, get_mem_index(s
));
10778 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10781 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
10784 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
10786 case 7: /* ldrsh */
10787 gen_aa32_ld16s(tmp
, addr
, get_mem_index(s
));
10790 if (op
>= 3) { /* load */
10791 store_reg(s
, rd
, tmp
);
10793 tcg_temp_free_i32(tmp
);
10795 tcg_temp_free_i32(addr
);
10799 /* load/store word immediate offset */
10801 rn
= (insn
>> 3) & 7;
10802 addr
= load_reg(s
, rn
);
10803 val
= (insn
>> 4) & 0x7c;
10804 tcg_gen_addi_i32(addr
, addr
, val
);
10806 if (insn
& (1 << 11)) {
10808 tmp
= tcg_temp_new_i32();
10809 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10810 store_reg(s
, rd
, tmp
);
10813 tmp
= load_reg(s
, rd
);
10814 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10815 tcg_temp_free_i32(tmp
);
10817 tcg_temp_free_i32(addr
);
10821 /* load/store byte immediate offset */
10823 rn
= (insn
>> 3) & 7;
10824 addr
= load_reg(s
, rn
);
10825 val
= (insn
>> 6) & 0x1f;
10826 tcg_gen_addi_i32(addr
, addr
, val
);
10828 if (insn
& (1 << 11)) {
10830 tmp
= tcg_temp_new_i32();
10831 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
10832 store_reg(s
, rd
, tmp
);
10835 tmp
= load_reg(s
, rd
);
10836 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
10837 tcg_temp_free_i32(tmp
);
10839 tcg_temp_free_i32(addr
);
10843 /* load/store halfword immediate offset */
10845 rn
= (insn
>> 3) & 7;
10846 addr
= load_reg(s
, rn
);
10847 val
= (insn
>> 5) & 0x3e;
10848 tcg_gen_addi_i32(addr
, addr
, val
);
10850 if (insn
& (1 << 11)) {
10852 tmp
= tcg_temp_new_i32();
10853 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
10854 store_reg(s
, rd
, tmp
);
10857 tmp
= load_reg(s
, rd
);
10858 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
10859 tcg_temp_free_i32(tmp
);
10861 tcg_temp_free_i32(addr
);
10865 /* load/store from stack */
10866 rd
= (insn
>> 8) & 7;
10867 addr
= load_reg(s
, 13);
10868 val
= (insn
& 0xff) * 4;
10869 tcg_gen_addi_i32(addr
, addr
, val
);
10871 if (insn
& (1 << 11)) {
10873 tmp
= tcg_temp_new_i32();
10874 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10875 store_reg(s
, rd
, tmp
);
10878 tmp
= load_reg(s
, rd
);
10879 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10880 tcg_temp_free_i32(tmp
);
10882 tcg_temp_free_i32(addr
);
10886 /* add to high reg */
10887 rd
= (insn
>> 8) & 7;
10888 if (insn
& (1 << 11)) {
10890 tmp
= load_reg(s
, 13);
10892 /* PC. bit 1 is ignored. */
10893 tmp
= tcg_temp_new_i32();
10894 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
10896 val
= (insn
& 0xff) * 4;
10897 tcg_gen_addi_i32(tmp
, tmp
, val
);
10898 store_reg(s
, rd
, tmp
);
10903 op
= (insn
>> 8) & 0xf;
10906 /* adjust stack pointer */
10907 tmp
= load_reg(s
, 13);
10908 val
= (insn
& 0x7f) * 4;
10909 if (insn
& (1 << 7))
10910 val
= -(int32_t)val
;
10911 tcg_gen_addi_i32(tmp
, tmp
, val
);
10912 store_reg(s
, 13, tmp
);
10915 case 2: /* sign/zero extend. */
10918 rm
= (insn
>> 3) & 7;
10919 tmp
= load_reg(s
, rm
);
10920 switch ((insn
>> 6) & 3) {
10921 case 0: gen_sxth(tmp
); break;
10922 case 1: gen_sxtb(tmp
); break;
10923 case 2: gen_uxth(tmp
); break;
10924 case 3: gen_uxtb(tmp
); break;
10926 store_reg(s
, rd
, tmp
);
10928 case 4: case 5: case 0xc: case 0xd:
10930 addr
= load_reg(s
, 13);
10931 if (insn
& (1 << 8))
10935 for (i
= 0; i
< 8; i
++) {
10936 if (insn
& (1 << i
))
10939 if ((insn
& (1 << 11)) == 0) {
10940 tcg_gen_addi_i32(addr
, addr
, -offset
);
10942 for (i
= 0; i
< 8; i
++) {
10943 if (insn
& (1 << i
)) {
10944 if (insn
& (1 << 11)) {
10946 tmp
= tcg_temp_new_i32();
10947 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10948 store_reg(s
, i
, tmp
);
10951 tmp
= load_reg(s
, i
);
10952 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10953 tcg_temp_free_i32(tmp
);
10955 /* advance to the next address. */
10956 tcg_gen_addi_i32(addr
, addr
, 4);
10959 TCGV_UNUSED_I32(tmp
);
10960 if (insn
& (1 << 8)) {
10961 if (insn
& (1 << 11)) {
10963 tmp
= tcg_temp_new_i32();
10964 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10965 /* don't set the pc until the rest of the instruction
10969 tmp
= load_reg(s
, 14);
10970 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10971 tcg_temp_free_i32(tmp
);
10973 tcg_gen_addi_i32(addr
, addr
, 4);
10975 if ((insn
& (1 << 11)) == 0) {
10976 tcg_gen_addi_i32(addr
, addr
, -offset
);
10978 /* write back the new stack pointer */
10979 store_reg(s
, 13, addr
);
10980 /* set the new PC value */
10981 if ((insn
& 0x0900) == 0x0900) {
10982 store_reg_from_load(s
, 15, tmp
);
10986 case 1: case 3: case 9: case 11: /* czb */
10988 tmp
= load_reg(s
, rm
);
10989 s
->condlabel
= gen_new_label();
10991 if (insn
& (1 << 11))
10992 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
10994 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
10995 tcg_temp_free_i32(tmp
);
10996 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
10997 val
= (uint32_t)s
->pc
+ 2;
11002 case 15: /* IT, nop-hint. */
11003 if ((insn
& 0xf) == 0) {
11004 gen_nop_hint(s
, (insn
>> 4) & 0xf);
11008 s
->condexec_cond
= (insn
>> 4) & 0xe;
11009 s
->condexec_mask
= insn
& 0x1f;
11010 /* No actual code generated for this insn, just setup state. */
11013 case 0xe: /* bkpt */
11015 int imm8
= extract32(insn
, 0, 8);
11017 gen_exception_insn(s
, 2, EXCP_BKPT
, syn_aa32_bkpt(imm8
, true),
11018 default_exception_el(s
));
11022 case 0xa: /* rev */
11024 rn
= (insn
>> 3) & 0x7;
11026 tmp
= load_reg(s
, rn
);
11027 switch ((insn
>> 6) & 3) {
11028 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
11029 case 1: gen_rev16(tmp
); break;
11030 case 3: gen_revsh(tmp
); break;
11031 default: goto illegal_op
;
11033 store_reg(s
, rd
, tmp
);
11037 switch ((insn
>> 5) & 7) {
11041 if (((insn
>> 3) & 1) != s
->bswap_code
) {
11042 /* Dynamic endianness switching not implemented. */
11043 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented setend\n");
11053 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
11054 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
11057 addr
= tcg_const_i32(19);
11058 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
11059 tcg_temp_free_i32(addr
);
11063 addr
= tcg_const_i32(16);
11064 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
11065 tcg_temp_free_i32(addr
);
11067 tcg_temp_free_i32(tmp
);
11070 if (insn
& (1 << 4)) {
11071 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
11075 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
11090 /* load/store multiple */
11091 TCGv_i32 loaded_var
;
11092 TCGV_UNUSED_I32(loaded_var
);
11093 rn
= (insn
>> 8) & 0x7;
11094 addr
= load_reg(s
, rn
);
11095 for (i
= 0; i
< 8; i
++) {
11096 if (insn
& (1 << i
)) {
11097 if (insn
& (1 << 11)) {
11099 tmp
= tcg_temp_new_i32();
11100 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
11104 store_reg(s
, i
, tmp
);
11108 tmp
= load_reg(s
, i
);
11109 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
11110 tcg_temp_free_i32(tmp
);
11112 /* advance to the next address */
11113 tcg_gen_addi_i32(addr
, addr
, 4);
11116 if ((insn
& (1 << rn
)) == 0) {
11117 /* base reg not in list: base register writeback */
11118 store_reg(s
, rn
, addr
);
11120 /* base reg in list: if load, complete it now */
11121 if (insn
& (1 << 11)) {
11122 store_reg(s
, rn
, loaded_var
);
11124 tcg_temp_free_i32(addr
);
11129 /* conditional branch or swi */
11130 cond
= (insn
>> 8) & 0xf;
11136 gen_set_pc_im(s
, s
->pc
);
11137 s
->svc_imm
= extract32(insn
, 0, 8);
11138 s
->is_jmp
= DISAS_SWI
;
11141 /* generate a conditional jump to next instruction */
11142 s
->condlabel
= gen_new_label();
11143 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
11146 /* jump to the offset */
11147 val
= (uint32_t)s
->pc
+ 2;
11148 offset
= ((int32_t)insn
<< 24) >> 24;
11149 val
+= offset
<< 1;
11154 if (insn
& (1 << 11)) {
11155 if (disas_thumb2_insn(env
, s
, insn
))
11159 /* unconditional branch */
11160 val
= (uint32_t)s
->pc
;
11161 offset
= ((int32_t)insn
<< 21) >> 21;
11162 val
+= (offset
<< 1) + 2;
11167 if (disas_thumb2_insn(env
, s
, insn
))
11173 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
11174 default_exception_el(s
));
11178 gen_exception_insn(s
, 2, EXCP_UDEF
, syn_uncategorized(),
11179 default_exception_el(s
));
11182 static bool insn_crosses_page(CPUARMState
*env
, DisasContext
*s
)
11184 /* Return true if the insn at dc->pc might cross a page boundary.
11185 * (False positives are OK, false negatives are not.)
11189 if ((s
->pc
& 3) == 0) {
11190 /* At a 4-aligned address we can't be crossing a page */
11194 /* This must be a Thumb insn */
11195 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
11197 if ((insn
>> 11) >= 0x1d) {
11198 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11199 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11200 * end up actually treating this as two 16-bit insns (see the
11201 * code at the start of disas_thumb2_insn()) but we don't bother
11202 * to check for that as it is unlikely, and false positives here
11207 /* Definitely a 16-bit insn, can't be crossing a page. */
11211 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
11212 basic block 'tb'. */
11213 void gen_intermediate_code(CPUARMState
*env
, TranslationBlock
*tb
)
11215 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11216 CPUState
*cs
= CPU(cpu
);
11217 DisasContext dc1
, *dc
= &dc1
;
11218 target_ulong pc_start
;
11219 target_ulong next_page_start
;
11224 /* generate intermediate code */
11226 /* The A64 decoder has its own top level loop, because it doesn't need
11227 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11229 if (ARM_TBFLAG_AARCH64_STATE(tb
->flags
)) {
11230 gen_intermediate_code_a64(cpu
, tb
);
11238 dc
->is_jmp
= DISAS_NEXT
;
11240 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
11244 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11245 * there is no secure EL1, so we route exceptions to EL3.
11247 dc
->secure_routed_to_el3
= arm_feature(env
, ARM_FEATURE_EL3
) &&
11248 !arm_el_is_aa64(env
, 3);
11249 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
11250 dc
->bswap_code
= ARM_TBFLAG_BSWAP_CODE(tb
->flags
);
11251 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
11252 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
11253 dc
->mmu_idx
= ARM_TBFLAG_MMUIDX(tb
->flags
);
11254 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
11255 #if !defined(CONFIG_USER_ONLY)
11256 dc
->user
= (dc
->current_el
== 0);
11258 dc
->ns
= ARM_TBFLAG_NS(tb
->flags
);
11259 dc
->fp_excp_el
= ARM_TBFLAG_FPEXC_EL(tb
->flags
);
11260 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
11261 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
11262 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
11263 dc
->c15_cpar
= ARM_TBFLAG_XSCALE_CPAR(tb
->flags
);
11264 dc
->cp_regs
= cpu
->cp_regs
;
11265 dc
->features
= env
->features
;
11267 /* Single step state. The code-generation logic here is:
11269 * generate code with no special handling for single-stepping (except
11270 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11271 * this happens anyway because those changes are all system register or
11273 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11274 * emit code for one insn
11275 * emit code to clear PSTATE.SS
11276 * emit code to generate software step exception for completed step
11277 * end TB (as usual for having generated an exception)
11278 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11279 * emit code to generate a software step exception
11282 dc
->ss_active
= ARM_TBFLAG_SS_ACTIVE(tb
->flags
);
11283 dc
->pstate_ss
= ARM_TBFLAG_PSTATE_SS(tb
->flags
);
11284 dc
->is_ldex
= false;
11285 dc
->ss_same_el
= false; /* Can't be true since EL_d must be AArch64 */
11287 cpu_F0s
= tcg_temp_new_i32();
11288 cpu_F1s
= tcg_temp_new_i32();
11289 cpu_F0d
= tcg_temp_new_i64();
11290 cpu_F1d
= tcg_temp_new_i64();
11293 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11294 cpu_M0
= tcg_temp_new_i64();
11295 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
11297 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
11298 if (max_insns
== 0) {
11299 max_insns
= CF_COUNT_MASK
;
11301 if (max_insns
> TCG_MAX_INSNS
) {
11302 max_insns
= TCG_MAX_INSNS
;
11307 tcg_clear_temp_count();
11309 /* A note on handling of the condexec (IT) bits:
11311 * We want to avoid the overhead of having to write the updated condexec
11312 * bits back to the CPUARMState for every instruction in an IT block. So:
11313 * (1) if the condexec bits are not already zero then we write
11314 * zero back into the CPUARMState now. This avoids complications trying
11315 * to do it at the end of the block. (For example if we don't do this
11316 * it's hard to identify whether we can safely skip writing condexec
11317 * at the end of the TB, which we definitely want to do for the case
11318 * where a TB doesn't do anything with the IT state at all.)
11319 * (2) if we are going to leave the TB then we call gen_set_condexec()
11320 * which will write the correct value into CPUARMState if zero is wrong.
11321 * This is done both for leaving the TB at the end, and for leaving
11322 * it because of an exception we know will happen, which is done in
11323 * gen_exception_insn(). The latter is necessary because we need to
11324 * leave the TB with the PC/IT state just prior to execution of the
11325 * instruction which caused the exception.
11326 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11327 * then the CPUARMState will be wrong and we need to reset it.
11328 * This is handled in the same way as restoration of the
11329 * PC in these situations; we save the value of the condexec bits
11330 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11331 * then uses this to restore them after an exception.
11333 * Note that there are no instructions which can read the condexec
11334 * bits, and none which can write non-static values to them, so
11335 * we don't need to care about whether CPUARMState is correct in the
11339 /* Reset the conditional execution bits immediately. This avoids
11340 complications trying to do it at the end of the block. */
11341 if (dc
->condexec_mask
|| dc
->condexec_cond
)
11343 TCGv_i32 tmp
= tcg_temp_new_i32();
11344 tcg_gen_movi_i32(tmp
, 0);
11345 store_cpu_field(tmp
, condexec_bits
);
11348 tcg_gen_insn_start(dc
->pc
,
11349 (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1));
11352 #ifdef CONFIG_USER_ONLY
11353 /* Intercept jump to the magic kernel page. */
11354 if (dc
->pc
>= 0xffff0000) {
11355 /* We always get here via a jump, so know we are not in a
11356 conditional execution block. */
11357 gen_exception_internal(EXCP_KERNEL_TRAP
);
11358 dc
->is_jmp
= DISAS_EXC
;
11362 if (dc
->pc
>= 0xfffffff0 && arm_dc_feature(dc
, ARM_FEATURE_M
)) {
11363 /* We always get here via a jump, so know we are not in a
11364 conditional execution block. */
11365 gen_exception_internal(EXCP_EXCEPTION_EXIT
);
11366 dc
->is_jmp
= DISAS_EXC
;
11371 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
11373 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
11374 if (bp
->pc
== dc
->pc
) {
11375 if (bp
->flags
& BP_CPU
) {
11376 gen_set_condexec(dc
);
11377 gen_set_pc_im(dc
, dc
->pc
);
11378 gen_helper_check_breakpoints(cpu_env
);
11379 /* End the TB early; it's likely not going to be executed */
11380 dc
->is_jmp
= DISAS_UPDATE
;
11382 gen_exception_internal_insn(dc
, 0, EXCP_DEBUG
);
11383 /* The address covered by the breakpoint must be
11384 included in [tb->pc, tb->pc + tb->size) in order
11385 to for it to be properly cleared -- thus we
11386 increment the PC here so that the logic setting
11387 tb->size below does the right thing. */
11388 /* TODO: Advance PC by correct instruction length to
11389 * avoid disassembler error messages */
11391 goto done_generating
;
11398 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
11402 if (dc
->ss_active
&& !dc
->pstate_ss
) {
11403 /* Singlestep state is Active-pending.
11404 * If we're in this state at the start of a TB then either
11405 * a) we just took an exception to an EL which is being debugged
11406 * and this is the first insn in the exception handler
11407 * b) debug exceptions were masked and we just unmasked them
11408 * without changing EL (eg by clearing PSTATE.D)
11409 * In either case we're going to take a swstep exception in the
11410 * "did not step an insn" case, and so the syndrome ISV and EX
11411 * bits should be zero.
11413 assert(num_insns
== 1);
11414 gen_exception(EXCP_UDEF
, syn_swstep(dc
->ss_same_el
, 0, 0),
11415 default_exception_el(dc
));
11416 goto done_generating
;
11420 disas_thumb_insn(env
, dc
);
11421 if (dc
->condexec_mask
) {
11422 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
11423 | ((dc
->condexec_mask
>> 4) & 1);
11424 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
11425 if (dc
->condexec_mask
== 0) {
11426 dc
->condexec_cond
= 0;
11430 unsigned int insn
= arm_ldl_code(env
, dc
->pc
, dc
->bswap_code
);
11432 disas_arm_insn(dc
, insn
);
11435 if (dc
->condjmp
&& !dc
->is_jmp
) {
11436 gen_set_label(dc
->condlabel
);
11440 if (tcg_check_temp_count()) {
11441 fprintf(stderr
, "TCG temporary leak before "TARGET_FMT_lx
"\n",
11445 /* Translation stops when a conditional branch is encountered.
11446 * Otherwise the subsequent code could get translated several times.
11447 * Also stop translation when a page boundary is reached. This
11448 * ensures prefetch aborts occur at the right place. */
11450 /* We want to stop the TB if the next insn starts in a new page,
11451 * or if it spans between this page and the next. This means that
11452 * if we're looking at the last halfword in the page we need to
11453 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11454 * or a 32-bit Thumb insn (which won't).
11455 * This is to avoid generating a silly TB with a single 16-bit insn
11456 * in it at the end of this page (which would execute correctly
11457 * but isn't very efficient).
11459 end_of_page
= (dc
->pc
>= next_page_start
) ||
11460 ((dc
->pc
>= next_page_start
- 3) && insn_crosses_page(env
, dc
));
11462 } while (!dc
->is_jmp
&& !tcg_op_buf_full() &&
11463 !cs
->singlestep_enabled
&&
11467 num_insns
< max_insns
);
11469 if (tb
->cflags
& CF_LAST_IO
) {
11471 /* FIXME: This can theoretically happen with self-modifying
11473 cpu_abort(cs
, "IO on conditional branch instruction");
11478 /* At this stage dc->condjmp will only be set when the skipped
11479 instruction was a conditional branch or trap, and the PC has
11480 already been written. */
11481 if (unlikely(cs
->singlestep_enabled
|| dc
->ss_active
)) {
11482 /* Unconditional and "condition passed" instruction codepath. */
11483 gen_set_condexec(dc
);
11484 switch (dc
->is_jmp
) {
11486 gen_ss_advance(dc
);
11487 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
11488 default_exception_el(dc
));
11491 gen_ss_advance(dc
);
11492 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
11495 gen_ss_advance(dc
);
11496 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
11500 gen_set_pc_im(dc
, dc
->pc
);
11503 if (dc
->ss_active
) {
11504 gen_step_complete_exception(dc
);
11506 /* FIXME: Single stepping a WFI insn will not halt
11508 gen_exception_internal(EXCP_DEBUG
);
11512 /* "Condition failed" instruction codepath. */
11513 gen_set_label(dc
->condlabel
);
11514 gen_set_condexec(dc
);
11515 gen_set_pc_im(dc
, dc
->pc
);
11516 if (dc
->ss_active
) {
11517 gen_step_complete_exception(dc
);
11519 gen_exception_internal(EXCP_DEBUG
);
11523 /* While branches must always occur at the end of an IT block,
11524 there are a few other things that can cause us to terminate
11525 the TB in the middle of an IT block:
11526 - Exception generating instructions (bkpt, swi, undefined).
11528 - Hardware watchpoints.
11529 Hardware breakpoints have already been handled and skip this code.
11531 gen_set_condexec(dc
);
11532 switch(dc
->is_jmp
) {
11534 gen_goto_tb(dc
, 1, dc
->pc
);
11537 gen_set_pc_im(dc
, dc
->pc
);
11541 /* indicate that the hash table must be used to find the next TB */
11542 tcg_gen_exit_tb(0);
11544 case DISAS_TB_JUMP
:
11545 /* nothing more to generate */
11548 gen_helper_wfi(cpu_env
);
11549 /* The helper doesn't necessarily throw an exception, but we
11550 * must go back to the main loop to check for interrupts anyway.
11552 tcg_gen_exit_tb(0);
11555 gen_helper_wfe(cpu_env
);
11558 gen_helper_yield(cpu_env
);
11561 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
11562 default_exception_el(dc
));
11565 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
11568 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
11572 gen_set_label(dc
->condlabel
);
11573 gen_set_condexec(dc
);
11574 gen_goto_tb(dc
, 1, dc
->pc
);
11580 gen_tb_end(tb
, num_insns
);
11583 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
11584 qemu_log("----------------\n");
11585 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
11586 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
,
11587 dc
->thumb
| (dc
->bswap_code
<< 1));
11591 tb
->size
= dc
->pc
- pc_start
;
11592 tb
->icount
= num_insns
;
11595 static const char *cpu_mode_names
[16] = {
11596 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11597 "???", "???", "hyp", "und", "???", "???", "???", "sys"
11600 void arm_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
11603 ARMCPU
*cpu
= ARM_CPU(cs
);
11604 CPUARMState
*env
= &cpu
->env
;
11607 const char *ns_status
;
11610 aarch64_cpu_dump_state(cs
, f
, cpu_fprintf
, flags
);
11614 for(i
=0;i
<16;i
++) {
11615 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
11617 cpu_fprintf(f
, "\n");
11619 cpu_fprintf(f
, " ");
11621 psr
= cpsr_read(env
);
11623 if (arm_feature(env
, ARM_FEATURE_EL3
) &&
11624 (psr
& CPSR_M
) != ARM_CPU_MODE_MON
) {
11625 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
11630 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%s%d\n",
11632 psr
& (1 << 31) ? 'N' : '-',
11633 psr
& (1 << 30) ? 'Z' : '-',
11634 psr
& (1 << 29) ? 'C' : '-',
11635 psr
& (1 << 28) ? 'V' : '-',
11636 psr
& CPSR_T
? 'T' : 'A',
11638 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
11640 if (flags
& CPU_DUMP_FPU
) {
11641 int numvfpregs
= 0;
11642 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
11645 if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
11648 for (i
= 0; i
< numvfpregs
; i
++) {
11649 uint64_t v
= float64_val(env
->vfp
.regs
[i
]);
11650 cpu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
11651 i
* 2, (uint32_t)v
,
11652 i
* 2 + 1, (uint32_t)(v
>> 32),
11655 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
11659 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
,
11660 target_ulong
*data
)
11664 env
->condexec_bits
= 0;
11666 env
->regs
[15] = data
[0];
11667 env
->condexec_bits
= data
[1];