4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "internals.h"
29 #include "disas/disas.h"
32 #include "qemu/bitops.h"
35 #include "exec/helper-proto.h"
36 #include "exec/helper-gen.h"
38 #include "trace-tcg.h"
40 #define CONFIG_ALIGNMENT_EXCEPTIONS 1
42 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
43 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
44 /* currently all emulated v5 cores are also v5TE, so don't bother */
45 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
46 #define ENABLE_ARCH_5J 0
47 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
48 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
49 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
50 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
51 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
53 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
55 #include "translate.h"
57 #if defined(CONFIG_USER_ONLY)
60 #define IS_USER(s) (s->user)
64 /* We reuse the same 64-bit temporaries for efficiency. */
65 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
66 static TCGv_i32 cpu_R
[16];
67 TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
68 TCGv_i64 cpu_exclusive_addr
;
69 TCGv_i64 cpu_exclusive_val
;
70 #ifdef CONFIG_USER_ONLY
71 TCGv_i64 cpu_exclusive_test
;
72 TCGv_i32 cpu_exclusive_info
;
75 /* FIXME: These should be removed. */
76 static TCGv_i32 cpu_F0s
, cpu_F1s
;
77 static TCGv_i64 cpu_F0d
, cpu_F1d
;
79 #include "exec/gen-icount.h"
81 static const char *regnames
[] =
82 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
83 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
85 /* initialize TCG globals. */
86 void arm_translate_init(void)
90 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
92 for (i
= 0; i
< 16; i
++) {
93 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
94 offsetof(CPUARMState
, regs
[i
]),
97 cpu_CF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, CF
), "CF");
98 cpu_NF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, NF
), "NF");
99 cpu_VF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, VF
), "VF");
100 cpu_ZF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, ZF
), "ZF");
102 cpu_exclusive_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
103 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
104 cpu_exclusive_val
= tcg_global_mem_new_i64(TCG_AREG0
,
105 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
106 #ifdef CONFIG_USER_ONLY
107 cpu_exclusive_test
= tcg_global_mem_new_i64(TCG_AREG0
,
108 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
109 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
110 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
113 a64_translate_init();
116 static inline ARMMMUIdx
get_a32_user_mem_index(DisasContext
*s
)
118 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
120 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
121 * otherwise, access as if at PL0.
123 switch (s
->mmu_idx
) {
124 case ARMMMUIdx_S1E2
: /* this one is UNPREDICTABLE */
125 case ARMMMUIdx_S12NSE0
:
126 case ARMMMUIdx_S12NSE1
:
127 return ARMMMUIdx_S12NSE0
;
129 case ARMMMUIdx_S1SE0
:
130 case ARMMMUIdx_S1SE1
:
131 return ARMMMUIdx_S1SE0
;
134 g_assert_not_reached();
138 static inline TCGv_i32
load_cpu_offset(int offset
)
140 TCGv_i32 tmp
= tcg_temp_new_i32();
141 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
145 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
147 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
149 tcg_gen_st_i32(var
, cpu_env
, offset
);
150 tcg_temp_free_i32(var
);
153 #define store_cpu_field(var, name) \
154 store_cpu_offset(var, offsetof(CPUARMState, name))
156 /* Set a variable to the value of a CPU register. */
157 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
161 /* normally, since we updated PC, we need only to add one insn */
163 addr
= (long)s
->pc
+ 2;
165 addr
= (long)s
->pc
+ 4;
166 tcg_gen_movi_i32(var
, addr
);
168 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
172 /* Create a new temporary and set it to the value of a CPU register. */
173 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
175 TCGv_i32 tmp
= tcg_temp_new_i32();
176 load_reg_var(s
, tmp
, reg
);
180 /* Set a CPU register. The source must be a temporary and will be
182 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
185 tcg_gen_andi_i32(var
, var
, ~1);
186 s
->is_jmp
= DISAS_JUMP
;
188 tcg_gen_mov_i32(cpu_R
[reg
], var
);
189 tcg_temp_free_i32(var
);
192 /* Value extensions. */
193 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
194 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
195 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
196 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
198 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
199 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
202 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
204 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
205 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
206 tcg_temp_free_i32(tmp_mask
);
208 /* Set NZCV flags from the high 4 bits of var. */
209 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
211 static void gen_exception_internal(int excp
)
213 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
215 assert(excp_is_internal(excp
));
216 gen_helper_exception_internal(cpu_env
, tcg_excp
);
217 tcg_temp_free_i32(tcg_excp
);
220 static void gen_exception(int excp
, uint32_t syndrome
, uint32_t target_el
)
222 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
223 TCGv_i32 tcg_syn
= tcg_const_i32(syndrome
);
224 TCGv_i32 tcg_el
= tcg_const_i32(target_el
);
226 gen_helper_exception_with_syndrome(cpu_env
, tcg_excp
,
229 tcg_temp_free_i32(tcg_el
);
230 tcg_temp_free_i32(tcg_syn
);
231 tcg_temp_free_i32(tcg_excp
);
234 static void gen_ss_advance(DisasContext
*s
)
236 /* If the singlestep state is Active-not-pending, advance to
241 gen_helper_clear_pstate_ss(cpu_env
);
245 static void gen_step_complete_exception(DisasContext
*s
)
247 /* We just completed step of an insn. Move from Active-not-pending
248 * to Active-pending, and then also take the swstep exception.
249 * This corresponds to making the (IMPDEF) choice to prioritize
250 * swstep exceptions over asynchronous exceptions taken to an exception
251 * level where debug is disabled. This choice has the advantage that
252 * we do not need to maintain internal state corresponding to the
253 * ISV/EX syndrome bits between completion of the step and generation
254 * of the exception, and our syndrome information is always correct.
257 gen_exception(EXCP_UDEF
, syn_swstep(s
->ss_same_el
, 1, s
->is_ldex
),
258 default_exception_el(s
));
259 s
->is_jmp
= DISAS_EXC
;
262 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
264 TCGv_i32 tmp1
= tcg_temp_new_i32();
265 TCGv_i32 tmp2
= tcg_temp_new_i32();
266 tcg_gen_ext16s_i32(tmp1
, a
);
267 tcg_gen_ext16s_i32(tmp2
, b
);
268 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
269 tcg_temp_free_i32(tmp2
);
270 tcg_gen_sari_i32(a
, a
, 16);
271 tcg_gen_sari_i32(b
, b
, 16);
272 tcg_gen_mul_i32(b
, b
, a
);
273 tcg_gen_mov_i32(a
, tmp1
);
274 tcg_temp_free_i32(tmp1
);
277 /* Byteswap each halfword. */
278 static void gen_rev16(TCGv_i32 var
)
280 TCGv_i32 tmp
= tcg_temp_new_i32();
281 tcg_gen_shri_i32(tmp
, var
, 8);
282 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
283 tcg_gen_shli_i32(var
, var
, 8);
284 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
285 tcg_gen_or_i32(var
, var
, tmp
);
286 tcg_temp_free_i32(tmp
);
289 /* Byteswap low halfword and sign extend. */
290 static void gen_revsh(TCGv_i32 var
)
292 tcg_gen_ext16u_i32(var
, var
);
293 tcg_gen_bswap16_i32(var
, var
);
294 tcg_gen_ext16s_i32(var
, var
);
297 /* Unsigned bitfield extract. */
298 static void gen_ubfx(TCGv_i32 var
, int shift
, uint32_t mask
)
301 tcg_gen_shri_i32(var
, var
, shift
);
302 tcg_gen_andi_i32(var
, var
, mask
);
305 /* Signed bitfield extract. */
306 static void gen_sbfx(TCGv_i32 var
, int shift
, int width
)
311 tcg_gen_sari_i32(var
, var
, shift
);
312 if (shift
+ width
< 32) {
313 signbit
= 1u << (width
- 1);
314 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
315 tcg_gen_xori_i32(var
, var
, signbit
);
316 tcg_gen_subi_i32(var
, var
, signbit
);
320 /* Return (b << 32) + a. Mark inputs as dead */
321 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv_i32 b
)
323 TCGv_i64 tmp64
= tcg_temp_new_i64();
325 tcg_gen_extu_i32_i64(tmp64
, b
);
326 tcg_temp_free_i32(b
);
327 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
328 tcg_gen_add_i64(a
, tmp64
, a
);
330 tcg_temp_free_i64(tmp64
);
334 /* Return (b << 32) - a. Mark inputs as dead. */
335 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv_i32 b
)
337 TCGv_i64 tmp64
= tcg_temp_new_i64();
339 tcg_gen_extu_i32_i64(tmp64
, b
);
340 tcg_temp_free_i32(b
);
341 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
342 tcg_gen_sub_i64(a
, tmp64
, a
);
344 tcg_temp_free_i64(tmp64
);
348 /* 32x32->64 multiply. Marks inputs as dead. */
349 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
351 TCGv_i32 lo
= tcg_temp_new_i32();
352 TCGv_i32 hi
= tcg_temp_new_i32();
355 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
356 tcg_temp_free_i32(a
);
357 tcg_temp_free_i32(b
);
359 ret
= tcg_temp_new_i64();
360 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
361 tcg_temp_free_i32(lo
);
362 tcg_temp_free_i32(hi
);
367 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
369 TCGv_i32 lo
= tcg_temp_new_i32();
370 TCGv_i32 hi
= tcg_temp_new_i32();
373 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
374 tcg_temp_free_i32(a
);
375 tcg_temp_free_i32(b
);
377 ret
= tcg_temp_new_i64();
378 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
379 tcg_temp_free_i32(lo
);
380 tcg_temp_free_i32(hi
);
385 /* Swap low and high halfwords. */
386 static void gen_swap_half(TCGv_i32 var
)
388 TCGv_i32 tmp
= tcg_temp_new_i32();
389 tcg_gen_shri_i32(tmp
, var
, 16);
390 tcg_gen_shli_i32(var
, var
, 16);
391 tcg_gen_or_i32(var
, var
, tmp
);
392 tcg_temp_free_i32(tmp
);
395 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
396 tmp = (t0 ^ t1) & 0x8000;
399 t0 = (t0 + t1) ^ tmp;
402 static void gen_add16(TCGv_i32 t0
, TCGv_i32 t1
)
404 TCGv_i32 tmp
= tcg_temp_new_i32();
405 tcg_gen_xor_i32(tmp
, t0
, t1
);
406 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
407 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
408 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
409 tcg_gen_add_i32(t0
, t0
, t1
);
410 tcg_gen_xor_i32(t0
, t0
, tmp
);
411 tcg_temp_free_i32(tmp
);
412 tcg_temp_free_i32(t1
);
415 /* Set CF to the top bit of var. */
416 static void gen_set_CF_bit31(TCGv_i32 var
)
418 tcg_gen_shri_i32(cpu_CF
, var
, 31);
421 /* Set N and Z flags from var. */
422 static inline void gen_logic_CC(TCGv_i32 var
)
424 tcg_gen_mov_i32(cpu_NF
, var
);
425 tcg_gen_mov_i32(cpu_ZF
, var
);
429 static void gen_adc(TCGv_i32 t0
, TCGv_i32 t1
)
431 tcg_gen_add_i32(t0
, t0
, t1
);
432 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
435 /* dest = T0 + T1 + CF. */
436 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
438 tcg_gen_add_i32(dest
, t0
, t1
);
439 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
442 /* dest = T0 - T1 + CF - 1. */
443 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
445 tcg_gen_sub_i32(dest
, t0
, t1
);
446 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
447 tcg_gen_subi_i32(dest
, dest
, 1);
450 /* dest = T0 + T1. Compute C, N, V and Z flags */
451 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
453 TCGv_i32 tmp
= tcg_temp_new_i32();
454 tcg_gen_movi_i32(tmp
, 0);
455 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
456 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
457 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
458 tcg_gen_xor_i32(tmp
, t0
, t1
);
459 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
460 tcg_temp_free_i32(tmp
);
461 tcg_gen_mov_i32(dest
, cpu_NF
);
464 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
465 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
467 TCGv_i32 tmp
= tcg_temp_new_i32();
468 if (TCG_TARGET_HAS_add2_i32
) {
469 tcg_gen_movi_i32(tmp
, 0);
470 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
471 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
473 TCGv_i64 q0
= tcg_temp_new_i64();
474 TCGv_i64 q1
= tcg_temp_new_i64();
475 tcg_gen_extu_i32_i64(q0
, t0
);
476 tcg_gen_extu_i32_i64(q1
, t1
);
477 tcg_gen_add_i64(q0
, q0
, q1
);
478 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
479 tcg_gen_add_i64(q0
, q0
, q1
);
480 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
481 tcg_temp_free_i64(q0
);
482 tcg_temp_free_i64(q1
);
484 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
485 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
486 tcg_gen_xor_i32(tmp
, t0
, t1
);
487 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
488 tcg_temp_free_i32(tmp
);
489 tcg_gen_mov_i32(dest
, cpu_NF
);
492 /* dest = T0 - T1. Compute C, N, V and Z flags */
493 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
496 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
497 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
498 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
499 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
500 tmp
= tcg_temp_new_i32();
501 tcg_gen_xor_i32(tmp
, t0
, t1
);
502 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
503 tcg_temp_free_i32(tmp
);
504 tcg_gen_mov_i32(dest
, cpu_NF
);
507 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
508 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
510 TCGv_i32 tmp
= tcg_temp_new_i32();
511 tcg_gen_not_i32(tmp
, t1
);
512 gen_adc_CC(dest
, t0
, tmp
);
513 tcg_temp_free_i32(tmp
);
516 #define GEN_SHIFT(name) \
517 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
519 TCGv_i32 tmp1, tmp2, tmp3; \
520 tmp1 = tcg_temp_new_i32(); \
521 tcg_gen_andi_i32(tmp1, t1, 0xff); \
522 tmp2 = tcg_const_i32(0); \
523 tmp3 = tcg_const_i32(0x1f); \
524 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
525 tcg_temp_free_i32(tmp3); \
526 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
527 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
528 tcg_temp_free_i32(tmp2); \
529 tcg_temp_free_i32(tmp1); \
535 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
538 tmp1
= tcg_temp_new_i32();
539 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
540 tmp2
= tcg_const_i32(0x1f);
541 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
542 tcg_temp_free_i32(tmp2
);
543 tcg_gen_sar_i32(dest
, t0
, tmp1
);
544 tcg_temp_free_i32(tmp1
);
547 static void tcg_gen_abs_i32(TCGv_i32 dest
, TCGv_i32 src
)
549 TCGv_i32 c0
= tcg_const_i32(0);
550 TCGv_i32 tmp
= tcg_temp_new_i32();
551 tcg_gen_neg_i32(tmp
, src
);
552 tcg_gen_movcond_i32(TCG_COND_GT
, dest
, src
, c0
, src
, tmp
);
553 tcg_temp_free_i32(c0
);
554 tcg_temp_free_i32(tmp
);
557 static void shifter_out_im(TCGv_i32 var
, int shift
)
560 tcg_gen_andi_i32(cpu_CF
, var
, 1);
562 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
564 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
569 /* Shift by immediate. Includes special handling for shift == 0. */
570 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
571 int shift
, int flags
)
577 shifter_out_im(var
, 32 - shift
);
578 tcg_gen_shli_i32(var
, var
, shift
);
584 tcg_gen_shri_i32(cpu_CF
, var
, 31);
586 tcg_gen_movi_i32(var
, 0);
589 shifter_out_im(var
, shift
- 1);
590 tcg_gen_shri_i32(var
, var
, shift
);
597 shifter_out_im(var
, shift
- 1);
600 tcg_gen_sari_i32(var
, var
, shift
);
602 case 3: /* ROR/RRX */
605 shifter_out_im(var
, shift
- 1);
606 tcg_gen_rotri_i32(var
, var
, shift
); break;
608 TCGv_i32 tmp
= tcg_temp_new_i32();
609 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
611 shifter_out_im(var
, 0);
612 tcg_gen_shri_i32(var
, var
, 1);
613 tcg_gen_or_i32(var
, var
, tmp
);
614 tcg_temp_free_i32(tmp
);
619 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
620 TCGv_i32 shift
, int flags
)
624 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
625 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
626 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
627 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
632 gen_shl(var
, var
, shift
);
635 gen_shr(var
, var
, shift
);
638 gen_sar(var
, var
, shift
);
640 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
641 tcg_gen_rotr_i32(var
, var
, shift
); break;
644 tcg_temp_free_i32(shift
);
647 #define PAS_OP(pfx) \
649 case 0: gen_pas_helper(glue(pfx,add16)); break; \
650 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
651 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
652 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
653 case 4: gen_pas_helper(glue(pfx,add8)); break; \
654 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
656 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
661 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
663 tmp
= tcg_temp_new_ptr();
664 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
666 tcg_temp_free_ptr(tmp
);
669 tmp
= tcg_temp_new_ptr();
670 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
672 tcg_temp_free_ptr(tmp
);
674 #undef gen_pas_helper
675 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
688 #undef gen_pas_helper
693 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
694 #define PAS_OP(pfx) \
696 case 0: gen_pas_helper(glue(pfx,add8)); break; \
697 case 1: gen_pas_helper(glue(pfx,add16)); break; \
698 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
699 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
700 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
701 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
703 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
708 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
710 tmp
= tcg_temp_new_ptr();
711 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
713 tcg_temp_free_ptr(tmp
);
716 tmp
= tcg_temp_new_ptr();
717 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
719 tcg_temp_free_ptr(tmp
);
721 #undef gen_pas_helper
722 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
735 #undef gen_pas_helper
741 * Generate a conditional based on ARM condition code cc.
742 * This is common between ARM and Aarch64 targets.
744 void arm_test_cc(DisasCompare
*cmp
, int cc
)
775 case 8: /* hi: C && !Z */
776 case 9: /* ls: !C || Z -> !(C && !Z) */
778 value
= tcg_temp_new_i32();
780 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
781 ZF is non-zero for !Z; so AND the two subexpressions. */
782 tcg_gen_neg_i32(value
, cpu_CF
);
783 tcg_gen_and_i32(value
, value
, cpu_ZF
);
786 case 10: /* ge: N == V -> N ^ V == 0 */
787 case 11: /* lt: N != V -> N ^ V != 0 */
788 /* Since we're only interested in the sign bit, == 0 is >= 0. */
790 value
= tcg_temp_new_i32();
792 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
795 case 12: /* gt: !Z && N == V */
796 case 13: /* le: Z || N != V */
798 value
= tcg_temp_new_i32();
800 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
801 * the sign bit then AND with ZF to yield the result. */
802 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
803 tcg_gen_sari_i32(value
, value
, 31);
804 tcg_gen_andc_i32(value
, cpu_ZF
, value
);
807 case 14: /* always */
808 case 15: /* always */
809 /* Use the ALWAYS condition, which will fold early.
810 * It doesn't matter what we use for the value. */
811 cond
= TCG_COND_ALWAYS
;
816 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
821 cond
= tcg_invert_cond(cond
);
827 cmp
->value_global
= global
;
830 void arm_free_cc(DisasCompare
*cmp
)
832 if (!cmp
->value_global
) {
833 tcg_temp_free_i32(cmp
->value
);
837 void arm_jump_cc(DisasCompare
*cmp
, TCGLabel
*label
)
839 tcg_gen_brcondi_i32(cmp
->cond
, cmp
->value
, 0, label
);
842 void arm_gen_test_cc(int cc
, TCGLabel
*label
)
845 arm_test_cc(&cmp
, cc
);
846 arm_jump_cc(&cmp
, label
);
850 static const uint8_t table_logic_cc
[16] = {
869 /* Set PC and Thumb state from an immediate address. */
870 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
874 s
->is_jmp
= DISAS_JUMP
;
875 if (s
->thumb
!= (addr
& 1)) {
876 tmp
= tcg_temp_new_i32();
877 tcg_gen_movi_i32(tmp
, addr
& 1);
878 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
879 tcg_temp_free_i32(tmp
);
881 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
884 /* Set PC and Thumb state from var. var is marked as dead. */
885 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
887 s
->is_jmp
= DISAS_JUMP
;
888 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
889 tcg_gen_andi_i32(var
, var
, 1);
890 store_cpu_field(var
, thumb
);
893 /* Variant of store_reg which uses branch&exchange logic when storing
894 to r15 in ARM architecture v7 and above. The source must be a temporary
895 and will be marked as dead. */
896 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv_i32 var
)
898 if (reg
== 15 && ENABLE_ARCH_7
) {
901 store_reg(s
, reg
, var
);
905 /* Variant of store_reg which uses branch&exchange logic when storing
906 * to r15 in ARM architecture v5T and above. This is used for storing
907 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
908 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
909 static inline void store_reg_from_load(DisasContext
*s
, int reg
, TCGv_i32 var
)
911 if (reg
== 15 && ENABLE_ARCH_5
) {
914 store_reg(s
, reg
, var
);
918 /* Abstractions of "generate code to do a guest load/store for
919 * AArch32", where a vaddr is always 32 bits (and is zero
920 * extended if we're a 64 bit core) and data is also
921 * 32 bits unless specifically doing a 64 bit access.
922 * These functions work like tcg_gen_qemu_{ld,st}* except
923 * that the address argument is TCGv_i32 rather than TCGv.
925 #if TARGET_LONG_BITS == 32
927 #define DO_GEN_LD(SUFF, OPC) \
928 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
930 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
933 #define DO_GEN_ST(SUFF, OPC) \
934 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
936 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
939 static inline void gen_aa32_ld64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
941 tcg_gen_qemu_ld_i64(val
, addr
, index
, MO_TEQ
);
944 static inline void gen_aa32_st64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
946 tcg_gen_qemu_st_i64(val
, addr
, index
, MO_TEQ
);
951 #define DO_GEN_LD(SUFF, OPC) \
952 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
954 TCGv addr64 = tcg_temp_new(); \
955 tcg_gen_extu_i32_i64(addr64, addr); \
956 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
957 tcg_temp_free(addr64); \
960 #define DO_GEN_ST(SUFF, OPC) \
961 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
963 TCGv addr64 = tcg_temp_new(); \
964 tcg_gen_extu_i32_i64(addr64, addr); \
965 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
966 tcg_temp_free(addr64); \
969 static inline void gen_aa32_ld64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
971 TCGv addr64
= tcg_temp_new();
972 tcg_gen_extu_i32_i64(addr64
, addr
);
973 tcg_gen_qemu_ld_i64(val
, addr64
, index
, MO_TEQ
);
974 tcg_temp_free(addr64
);
977 static inline void gen_aa32_st64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
979 TCGv addr64
= tcg_temp_new();
980 tcg_gen_extu_i32_i64(addr64
, addr
);
981 tcg_gen_qemu_st_i64(val
, addr64
, index
, MO_TEQ
);
982 tcg_temp_free(addr64
);
989 DO_GEN_LD(16s
, MO_TESW
)
990 DO_GEN_LD(16u, MO_TEUW
)
991 DO_GEN_LD(32u, MO_TEUL
)
993 DO_GEN_ST(16, MO_TEUW
)
994 DO_GEN_ST(32, MO_TEUL
)
996 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
998 tcg_gen_movi_i32(cpu_R
[15], val
);
1001 static inline void gen_hvc(DisasContext
*s
, int imm16
)
1003 /* The pre HVC helper handles cases when HVC gets trapped
1004 * as an undefined insn by runtime configuration (ie before
1005 * the insn really executes).
1007 gen_set_pc_im(s
, s
->pc
- 4);
1008 gen_helper_pre_hvc(cpu_env
);
1009 /* Otherwise we will treat this as a real exception which
1010 * happens after execution of the insn. (The distinction matters
1011 * for the PC value reported to the exception handler and also
1012 * for single stepping.)
1015 gen_set_pc_im(s
, s
->pc
);
1016 s
->is_jmp
= DISAS_HVC
;
1019 static inline void gen_smc(DisasContext
*s
)
1021 /* As with HVC, we may take an exception either before or after
1022 * the insn executes.
1026 gen_set_pc_im(s
, s
->pc
- 4);
1027 tmp
= tcg_const_i32(syn_aa32_smc());
1028 gen_helper_pre_smc(cpu_env
, tmp
);
1029 tcg_temp_free_i32(tmp
);
1030 gen_set_pc_im(s
, s
->pc
);
1031 s
->is_jmp
= DISAS_SMC
;
1035 gen_set_condexec (DisasContext
*s
)
1037 if (s
->condexec_mask
) {
1038 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
1039 TCGv_i32 tmp
= tcg_temp_new_i32();
1040 tcg_gen_movi_i32(tmp
, val
);
1041 store_cpu_field(tmp
, condexec_bits
);
1045 static void gen_exception_internal_insn(DisasContext
*s
, int offset
, int excp
)
1047 gen_set_condexec(s
);
1048 gen_set_pc_im(s
, s
->pc
- offset
);
1049 gen_exception_internal(excp
);
1050 s
->is_jmp
= DISAS_JUMP
;
1053 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
,
1054 int syn
, uint32_t target_el
)
1056 gen_set_condexec(s
);
1057 gen_set_pc_im(s
, s
->pc
- offset
);
1058 gen_exception(excp
, syn
, target_el
);
1059 s
->is_jmp
= DISAS_JUMP
;
1062 /* Emit an inline alignment check, which raises an exception if the given
1063 * address is not aligned according to "size" (which must be a power of 2). */
1064 static void gen_alignment_check(DisasContext
*s
, int pc_offset
,
1065 target_ulong size
, TCGv addr
)
1067 #ifdef CONFIG_ALIGNMENT_EXCEPTIONS
1068 TCGLabel
*alignok_label
= gen_new_label();
1069 TCGv tmp
= tcg_temp_new();
1071 /* check alignment, branch to alignok_label if aligned */
1072 tcg_gen_andi_tl(tmp
, addr
, size
- 1);
1073 tcg_gen_brcondi_tl(TCG_COND_EQ
, tmp
, 0, alignok_label
);
1075 /* emit alignment exception */
1076 gen_set_pc_im(s
, s
->pc
- pc_offset
);
1077 gen_helper_alignment_exception(cpu_env
, addr
);
1079 gen_set_label(alignok_label
);
1084 /* Force a TB lookup after an instruction that changes the CPU state. */
1085 static inline void gen_lookup_tb(DisasContext
*s
)
1087 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
1088 s
->is_jmp
= DISAS_JUMP
;
1091 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
1094 int val
, rm
, shift
, shiftop
;
1097 if (!(insn
& (1 << 25))) {
1100 if (!(insn
& (1 << 23)))
1103 tcg_gen_addi_i32(var
, var
, val
);
1105 /* shift/register */
1107 shift
= (insn
>> 7) & 0x1f;
1108 shiftop
= (insn
>> 5) & 3;
1109 offset
= load_reg(s
, rm
);
1110 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
1111 if (!(insn
& (1 << 23)))
1112 tcg_gen_sub_i32(var
, var
, offset
);
1114 tcg_gen_add_i32(var
, var
, offset
);
1115 tcg_temp_free_i32(offset
);
1119 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
1120 int extra
, TCGv_i32 var
)
1125 if (insn
& (1 << 22)) {
1127 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
1128 if (!(insn
& (1 << 23)))
1132 tcg_gen_addi_i32(var
, var
, val
);
1136 tcg_gen_addi_i32(var
, var
, extra
);
1138 offset
= load_reg(s
, rm
);
1139 if (!(insn
& (1 << 23)))
1140 tcg_gen_sub_i32(var
, var
, offset
);
1142 tcg_gen_add_i32(var
, var
, offset
);
1143 tcg_temp_free_i32(offset
);
1147 static TCGv_ptr
get_fpstatus_ptr(int neon
)
1149 TCGv_ptr statusptr
= tcg_temp_new_ptr();
1152 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
1154 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
1156 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
1160 #define VFP_OP2(name) \
1161 static inline void gen_vfp_##name(int dp) \
1163 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1165 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1167 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1169 tcg_temp_free_ptr(fpst); \
1179 static inline void gen_vfp_F1_mul(int dp
)
1181 /* Like gen_vfp_mul() but put result in F1 */
1182 TCGv_ptr fpst
= get_fpstatus_ptr(0);
1184 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
1186 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
1188 tcg_temp_free_ptr(fpst
);
1191 static inline void gen_vfp_F1_neg(int dp
)
1193 /* Like gen_vfp_neg() but put result in F1 */
1195 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
1197 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
1201 static inline void gen_vfp_abs(int dp
)
1204 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
1206 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
1209 static inline void gen_vfp_neg(int dp
)
1212 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1214 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1217 static inline void gen_vfp_sqrt(int dp
)
1220 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
1222 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1225 static inline void gen_vfp_cmp(int dp
)
1228 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1230 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1233 static inline void gen_vfp_cmpe(int dp
)
1236 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1238 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1241 static inline void gen_vfp_F1_ld0(int dp
)
1244 tcg_gen_movi_i64(cpu_F1d
, 0);
1246 tcg_gen_movi_i32(cpu_F1s
, 0);
1249 #define VFP_GEN_ITOF(name) \
1250 static inline void gen_vfp_##name(int dp, int neon) \
1252 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1254 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1256 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1258 tcg_temp_free_ptr(statusptr); \
1265 #define VFP_GEN_FTOI(name) \
1266 static inline void gen_vfp_##name(int dp, int neon) \
1268 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1270 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1272 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1274 tcg_temp_free_ptr(statusptr); \
1283 #define VFP_GEN_FIX(name, round) \
1284 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1286 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1287 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1289 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1292 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1295 tcg_temp_free_i32(tmp_shift); \
1296 tcg_temp_free_ptr(statusptr); \
1298 VFP_GEN_FIX(tosh
, _round_to_zero
)
1299 VFP_GEN_FIX(tosl
, _round_to_zero
)
1300 VFP_GEN_FIX(touh
, _round_to_zero
)
1301 VFP_GEN_FIX(toul
, _round_to_zero
)
1308 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1311 gen_aa32_ld64(cpu_F0d
, addr
, get_mem_index(s
));
1313 gen_aa32_ld32u(cpu_F0s
, addr
, get_mem_index(s
));
1317 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1320 gen_aa32_st64(cpu_F0d
, addr
, get_mem_index(s
));
1322 gen_aa32_st32(cpu_F0s
, addr
, get_mem_index(s
));
1327 vfp_reg_offset (int dp
, int reg
)
1330 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1332 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1333 + offsetof(CPU_DoubleU
, l
.upper
);
1335 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1336 + offsetof(CPU_DoubleU
, l
.lower
);
1340 /* Return the offset of a 32-bit piece of a NEON register.
1341 zero is the least significant end of the register. */
1343 neon_reg_offset (int reg
, int n
)
1347 return vfp_reg_offset(0, sreg
);
1350 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1352 TCGv_i32 tmp
= tcg_temp_new_i32();
1353 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1357 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1359 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1360 tcg_temp_free_i32(var
);
1363 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1365 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1368 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1370 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1373 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1374 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1375 #define tcg_gen_st_f32 tcg_gen_st_i32
1376 #define tcg_gen_st_f64 tcg_gen_st_i64
1378 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1381 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1383 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1386 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1389 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1391 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1394 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1397 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1399 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1402 #define ARM_CP_RW_BIT (1 << 20)
1404 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1406 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1409 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1411 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1414 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1416 TCGv_i32 var
= tcg_temp_new_i32();
1417 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1421 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1423 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1424 tcg_temp_free_i32(var
);
1427 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1429 iwmmxt_store_reg(cpu_M0
, rn
);
1432 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1434 iwmmxt_load_reg(cpu_M0
, rn
);
1437 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1439 iwmmxt_load_reg(cpu_V1
, rn
);
1440 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1443 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1445 iwmmxt_load_reg(cpu_V1
, rn
);
1446 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1449 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1451 iwmmxt_load_reg(cpu_V1
, rn
);
1452 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1455 #define IWMMXT_OP(name) \
1456 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1458 iwmmxt_load_reg(cpu_V1, rn); \
1459 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1462 #define IWMMXT_OP_ENV(name) \
1463 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1465 iwmmxt_load_reg(cpu_V1, rn); \
1466 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1469 #define IWMMXT_OP_ENV_SIZE(name) \
1470 IWMMXT_OP_ENV(name##b) \
1471 IWMMXT_OP_ENV(name##w) \
1472 IWMMXT_OP_ENV(name##l)
1474 #define IWMMXT_OP_ENV1(name) \
1475 static inline void gen_op_iwmmxt_##name##_M0(void) \
1477 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1491 IWMMXT_OP_ENV_SIZE(unpackl
)
1492 IWMMXT_OP_ENV_SIZE(unpackh
)
1494 IWMMXT_OP_ENV1(unpacklub
)
1495 IWMMXT_OP_ENV1(unpackluw
)
1496 IWMMXT_OP_ENV1(unpacklul
)
1497 IWMMXT_OP_ENV1(unpackhub
)
1498 IWMMXT_OP_ENV1(unpackhuw
)
1499 IWMMXT_OP_ENV1(unpackhul
)
1500 IWMMXT_OP_ENV1(unpacklsb
)
1501 IWMMXT_OP_ENV1(unpacklsw
)
1502 IWMMXT_OP_ENV1(unpacklsl
)
1503 IWMMXT_OP_ENV1(unpackhsb
)
1504 IWMMXT_OP_ENV1(unpackhsw
)
1505 IWMMXT_OP_ENV1(unpackhsl
)
1507 IWMMXT_OP_ENV_SIZE(cmpeq
)
1508 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1509 IWMMXT_OP_ENV_SIZE(cmpgts
)
1511 IWMMXT_OP_ENV_SIZE(mins
)
1512 IWMMXT_OP_ENV_SIZE(minu
)
1513 IWMMXT_OP_ENV_SIZE(maxs
)
1514 IWMMXT_OP_ENV_SIZE(maxu
)
1516 IWMMXT_OP_ENV_SIZE(subn
)
1517 IWMMXT_OP_ENV_SIZE(addn
)
1518 IWMMXT_OP_ENV_SIZE(subu
)
1519 IWMMXT_OP_ENV_SIZE(addu
)
1520 IWMMXT_OP_ENV_SIZE(subs
)
1521 IWMMXT_OP_ENV_SIZE(adds
)
1523 IWMMXT_OP_ENV(avgb0
)
1524 IWMMXT_OP_ENV(avgb1
)
1525 IWMMXT_OP_ENV(avgw0
)
1526 IWMMXT_OP_ENV(avgw1
)
1528 IWMMXT_OP_ENV(packuw
)
1529 IWMMXT_OP_ENV(packul
)
1530 IWMMXT_OP_ENV(packuq
)
1531 IWMMXT_OP_ENV(packsw
)
1532 IWMMXT_OP_ENV(packsl
)
1533 IWMMXT_OP_ENV(packsq
)
1535 static void gen_op_iwmmxt_set_mup(void)
1538 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1539 tcg_gen_ori_i32(tmp
, tmp
, 2);
1540 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1543 static void gen_op_iwmmxt_set_cup(void)
1546 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1547 tcg_gen_ori_i32(tmp
, tmp
, 1);
1548 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1551 static void gen_op_iwmmxt_setpsr_nz(void)
1553 TCGv_i32 tmp
= tcg_temp_new_i32();
1554 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1555 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1558 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1560 iwmmxt_load_reg(cpu_V1
, rn
);
1561 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1562 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1565 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1572 rd
= (insn
>> 16) & 0xf;
1573 tmp
= load_reg(s
, rd
);
1575 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1576 if (insn
& (1 << 24)) {
1578 if (insn
& (1 << 23))
1579 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1581 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1582 tcg_gen_mov_i32(dest
, tmp
);
1583 if (insn
& (1 << 21))
1584 store_reg(s
, rd
, tmp
);
1586 tcg_temp_free_i32(tmp
);
1587 } else if (insn
& (1 << 21)) {
1589 tcg_gen_mov_i32(dest
, tmp
);
1590 if (insn
& (1 << 23))
1591 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1593 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1594 store_reg(s
, rd
, tmp
);
1595 } else if (!(insn
& (1 << 23)))
1600 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1602 int rd
= (insn
>> 0) & 0xf;
1605 if (insn
& (1 << 8)) {
1606 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1609 tmp
= iwmmxt_load_creg(rd
);
1612 tmp
= tcg_temp_new_i32();
1613 iwmmxt_load_reg(cpu_V0
, rd
);
1614 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
1616 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1617 tcg_gen_mov_i32(dest
, tmp
);
1618 tcg_temp_free_i32(tmp
);
1622 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1623 (ie. an undefined instruction). */
1624 static int disas_iwmmxt_insn(DisasContext
*s
, uint32_t insn
)
1627 int rdhi
, rdlo
, rd0
, rd1
, i
;
1629 TCGv_i32 tmp
, tmp2
, tmp3
;
1631 if ((insn
& 0x0e000e00) == 0x0c000000) {
1632 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1634 rdlo
= (insn
>> 12) & 0xf;
1635 rdhi
= (insn
>> 16) & 0xf;
1636 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1637 iwmmxt_load_reg(cpu_V0
, wrd
);
1638 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1639 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1640 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1641 } else { /* TMCRR */
1642 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1643 iwmmxt_store_reg(cpu_V0
, wrd
);
1644 gen_op_iwmmxt_set_mup();
1649 wrd
= (insn
>> 12) & 0xf;
1650 addr
= tcg_temp_new_i32();
1651 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1652 tcg_temp_free_i32(addr
);
1655 if (insn
& ARM_CP_RW_BIT
) {
1656 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1657 tmp
= tcg_temp_new_i32();
1658 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
1659 iwmmxt_store_creg(wrd
, tmp
);
1662 if (insn
& (1 << 8)) {
1663 if (insn
& (1 << 22)) { /* WLDRD */
1664 gen_aa32_ld64(cpu_M0
, addr
, get_mem_index(s
));
1666 } else { /* WLDRW wRd */
1667 tmp
= tcg_temp_new_i32();
1668 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
1671 tmp
= tcg_temp_new_i32();
1672 if (insn
& (1 << 22)) { /* WLDRH */
1673 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
1674 } else { /* WLDRB */
1675 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
1679 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1680 tcg_temp_free_i32(tmp
);
1682 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1685 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1686 tmp
= iwmmxt_load_creg(wrd
);
1687 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
1689 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1690 tmp
= tcg_temp_new_i32();
1691 if (insn
& (1 << 8)) {
1692 if (insn
& (1 << 22)) { /* WSTRD */
1693 gen_aa32_st64(cpu_M0
, addr
, get_mem_index(s
));
1694 } else { /* WSTRW wRd */
1695 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1696 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
1699 if (insn
& (1 << 22)) { /* WSTRH */
1700 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1701 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
1702 } else { /* WSTRB */
1703 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1704 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
1708 tcg_temp_free_i32(tmp
);
1710 tcg_temp_free_i32(addr
);
1714 if ((insn
& 0x0f000000) != 0x0e000000)
1717 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1718 case 0x000: /* WOR */
1719 wrd
= (insn
>> 12) & 0xf;
1720 rd0
= (insn
>> 0) & 0xf;
1721 rd1
= (insn
>> 16) & 0xf;
1722 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1723 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1724 gen_op_iwmmxt_setpsr_nz();
1725 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1726 gen_op_iwmmxt_set_mup();
1727 gen_op_iwmmxt_set_cup();
1729 case 0x011: /* TMCR */
1732 rd
= (insn
>> 12) & 0xf;
1733 wrd
= (insn
>> 16) & 0xf;
1735 case ARM_IWMMXT_wCID
:
1736 case ARM_IWMMXT_wCASF
:
1738 case ARM_IWMMXT_wCon
:
1739 gen_op_iwmmxt_set_cup();
1741 case ARM_IWMMXT_wCSSF
:
1742 tmp
= iwmmxt_load_creg(wrd
);
1743 tmp2
= load_reg(s
, rd
);
1744 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1745 tcg_temp_free_i32(tmp2
);
1746 iwmmxt_store_creg(wrd
, tmp
);
1748 case ARM_IWMMXT_wCGR0
:
1749 case ARM_IWMMXT_wCGR1
:
1750 case ARM_IWMMXT_wCGR2
:
1751 case ARM_IWMMXT_wCGR3
:
1752 gen_op_iwmmxt_set_cup();
1753 tmp
= load_reg(s
, rd
);
1754 iwmmxt_store_creg(wrd
, tmp
);
1760 case 0x100: /* WXOR */
1761 wrd
= (insn
>> 12) & 0xf;
1762 rd0
= (insn
>> 0) & 0xf;
1763 rd1
= (insn
>> 16) & 0xf;
1764 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1765 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1766 gen_op_iwmmxt_setpsr_nz();
1767 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1768 gen_op_iwmmxt_set_mup();
1769 gen_op_iwmmxt_set_cup();
1771 case 0x111: /* TMRC */
1774 rd
= (insn
>> 12) & 0xf;
1775 wrd
= (insn
>> 16) & 0xf;
1776 tmp
= iwmmxt_load_creg(wrd
);
1777 store_reg(s
, rd
, tmp
);
1779 case 0x300: /* WANDN */
1780 wrd
= (insn
>> 12) & 0xf;
1781 rd0
= (insn
>> 0) & 0xf;
1782 rd1
= (insn
>> 16) & 0xf;
1783 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1784 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1785 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1786 gen_op_iwmmxt_setpsr_nz();
1787 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1788 gen_op_iwmmxt_set_mup();
1789 gen_op_iwmmxt_set_cup();
1791 case 0x200: /* WAND */
1792 wrd
= (insn
>> 12) & 0xf;
1793 rd0
= (insn
>> 0) & 0xf;
1794 rd1
= (insn
>> 16) & 0xf;
1795 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1796 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1797 gen_op_iwmmxt_setpsr_nz();
1798 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1799 gen_op_iwmmxt_set_mup();
1800 gen_op_iwmmxt_set_cup();
1802 case 0x810: case 0xa10: /* WMADD */
1803 wrd
= (insn
>> 12) & 0xf;
1804 rd0
= (insn
>> 0) & 0xf;
1805 rd1
= (insn
>> 16) & 0xf;
1806 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1807 if (insn
& (1 << 21))
1808 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1810 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1811 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1812 gen_op_iwmmxt_set_mup();
1814 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1815 wrd
= (insn
>> 12) & 0xf;
1816 rd0
= (insn
>> 16) & 0xf;
1817 rd1
= (insn
>> 0) & 0xf;
1818 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1819 switch ((insn
>> 22) & 3) {
1821 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1824 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1827 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1832 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1833 gen_op_iwmmxt_set_mup();
1834 gen_op_iwmmxt_set_cup();
1836 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1837 wrd
= (insn
>> 12) & 0xf;
1838 rd0
= (insn
>> 16) & 0xf;
1839 rd1
= (insn
>> 0) & 0xf;
1840 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1841 switch ((insn
>> 22) & 3) {
1843 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1846 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1849 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1854 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1855 gen_op_iwmmxt_set_mup();
1856 gen_op_iwmmxt_set_cup();
1858 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1859 wrd
= (insn
>> 12) & 0xf;
1860 rd0
= (insn
>> 16) & 0xf;
1861 rd1
= (insn
>> 0) & 0xf;
1862 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1863 if (insn
& (1 << 22))
1864 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1866 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1867 if (!(insn
& (1 << 20)))
1868 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1869 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1870 gen_op_iwmmxt_set_mup();
1872 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1873 wrd
= (insn
>> 12) & 0xf;
1874 rd0
= (insn
>> 16) & 0xf;
1875 rd1
= (insn
>> 0) & 0xf;
1876 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1877 if (insn
& (1 << 21)) {
1878 if (insn
& (1 << 20))
1879 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1881 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1883 if (insn
& (1 << 20))
1884 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1886 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1888 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1889 gen_op_iwmmxt_set_mup();
1891 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1892 wrd
= (insn
>> 12) & 0xf;
1893 rd0
= (insn
>> 16) & 0xf;
1894 rd1
= (insn
>> 0) & 0xf;
1895 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1896 if (insn
& (1 << 21))
1897 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1899 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1900 if (!(insn
& (1 << 20))) {
1901 iwmmxt_load_reg(cpu_V1
, wrd
);
1902 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1904 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1905 gen_op_iwmmxt_set_mup();
1907 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1908 wrd
= (insn
>> 12) & 0xf;
1909 rd0
= (insn
>> 16) & 0xf;
1910 rd1
= (insn
>> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1912 switch ((insn
>> 22) & 3) {
1914 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1917 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1920 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1925 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1926 gen_op_iwmmxt_set_mup();
1927 gen_op_iwmmxt_set_cup();
1929 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1930 wrd
= (insn
>> 12) & 0xf;
1931 rd0
= (insn
>> 16) & 0xf;
1932 rd1
= (insn
>> 0) & 0xf;
1933 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1934 if (insn
& (1 << 22)) {
1935 if (insn
& (1 << 20))
1936 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1938 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1940 if (insn
& (1 << 20))
1941 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1943 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1945 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1946 gen_op_iwmmxt_set_mup();
1947 gen_op_iwmmxt_set_cup();
1949 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1950 wrd
= (insn
>> 12) & 0xf;
1951 rd0
= (insn
>> 16) & 0xf;
1952 rd1
= (insn
>> 0) & 0xf;
1953 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1954 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1955 tcg_gen_andi_i32(tmp
, tmp
, 7);
1956 iwmmxt_load_reg(cpu_V1
, rd1
);
1957 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1958 tcg_temp_free_i32(tmp
);
1959 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1960 gen_op_iwmmxt_set_mup();
1962 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1963 if (((insn
>> 6) & 3) == 3)
1965 rd
= (insn
>> 12) & 0xf;
1966 wrd
= (insn
>> 16) & 0xf;
1967 tmp
= load_reg(s
, rd
);
1968 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1969 switch ((insn
>> 6) & 3) {
1971 tmp2
= tcg_const_i32(0xff);
1972 tmp3
= tcg_const_i32((insn
& 7) << 3);
1975 tmp2
= tcg_const_i32(0xffff);
1976 tmp3
= tcg_const_i32((insn
& 3) << 4);
1979 tmp2
= tcg_const_i32(0xffffffff);
1980 tmp3
= tcg_const_i32((insn
& 1) << 5);
1983 TCGV_UNUSED_I32(tmp2
);
1984 TCGV_UNUSED_I32(tmp3
);
1986 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1987 tcg_temp_free_i32(tmp3
);
1988 tcg_temp_free_i32(tmp2
);
1989 tcg_temp_free_i32(tmp
);
1990 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1991 gen_op_iwmmxt_set_mup();
1993 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1994 rd
= (insn
>> 12) & 0xf;
1995 wrd
= (insn
>> 16) & 0xf;
1996 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1998 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1999 tmp
= tcg_temp_new_i32();
2000 switch ((insn
>> 22) & 3) {
2002 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
2003 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2005 tcg_gen_ext8s_i32(tmp
, tmp
);
2007 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
2011 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
2012 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2014 tcg_gen_ext16s_i32(tmp
, tmp
);
2016 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
2020 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
2021 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2024 store_reg(s
, rd
, tmp
);
2026 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2027 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2029 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2030 switch ((insn
>> 22) & 3) {
2032 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
2035 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
2038 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
2041 tcg_gen_shli_i32(tmp
, tmp
, 28);
2043 tcg_temp_free_i32(tmp
);
2045 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2046 if (((insn
>> 6) & 3) == 3)
2048 rd
= (insn
>> 12) & 0xf;
2049 wrd
= (insn
>> 16) & 0xf;
2050 tmp
= load_reg(s
, rd
);
2051 switch ((insn
>> 6) & 3) {
2053 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
2056 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
2059 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
2062 tcg_temp_free_i32(tmp
);
2063 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2064 gen_op_iwmmxt_set_mup();
2066 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2067 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2069 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2070 tmp2
= tcg_temp_new_i32();
2071 tcg_gen_mov_i32(tmp2
, tmp
);
2072 switch ((insn
>> 22) & 3) {
2074 for (i
= 0; i
< 7; i
++) {
2075 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2076 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2080 for (i
= 0; i
< 3; i
++) {
2081 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2082 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2086 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2087 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2091 tcg_temp_free_i32(tmp2
);
2092 tcg_temp_free_i32(tmp
);
2094 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2095 wrd
= (insn
>> 12) & 0xf;
2096 rd0
= (insn
>> 16) & 0xf;
2097 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2098 switch ((insn
>> 22) & 3) {
2100 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
2103 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
2106 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
2111 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2112 gen_op_iwmmxt_set_mup();
2114 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2115 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2117 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2118 tmp2
= tcg_temp_new_i32();
2119 tcg_gen_mov_i32(tmp2
, tmp
);
2120 switch ((insn
>> 22) & 3) {
2122 for (i
= 0; i
< 7; i
++) {
2123 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2124 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2128 for (i
= 0; i
< 3; i
++) {
2129 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2130 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2134 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2135 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2139 tcg_temp_free_i32(tmp2
);
2140 tcg_temp_free_i32(tmp
);
2142 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2143 rd
= (insn
>> 12) & 0xf;
2144 rd0
= (insn
>> 16) & 0xf;
2145 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
2147 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2148 tmp
= tcg_temp_new_i32();
2149 switch ((insn
>> 22) & 3) {
2151 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
2154 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
2157 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
2160 store_reg(s
, rd
, tmp
);
2162 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2163 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2164 wrd
= (insn
>> 12) & 0xf;
2165 rd0
= (insn
>> 16) & 0xf;
2166 rd1
= (insn
>> 0) & 0xf;
2167 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2168 switch ((insn
>> 22) & 3) {
2170 if (insn
& (1 << 21))
2171 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2173 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2176 if (insn
& (1 << 21))
2177 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2179 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2182 if (insn
& (1 << 21))
2183 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2185 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2190 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2191 gen_op_iwmmxt_set_mup();
2192 gen_op_iwmmxt_set_cup();
2194 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2195 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2196 wrd
= (insn
>> 12) & 0xf;
2197 rd0
= (insn
>> 16) & 0xf;
2198 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2199 switch ((insn
>> 22) & 3) {
2201 if (insn
& (1 << 21))
2202 gen_op_iwmmxt_unpacklsb_M0();
2204 gen_op_iwmmxt_unpacklub_M0();
2207 if (insn
& (1 << 21))
2208 gen_op_iwmmxt_unpacklsw_M0();
2210 gen_op_iwmmxt_unpackluw_M0();
2213 if (insn
& (1 << 21))
2214 gen_op_iwmmxt_unpacklsl_M0();
2216 gen_op_iwmmxt_unpacklul_M0();
2221 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2222 gen_op_iwmmxt_set_mup();
2223 gen_op_iwmmxt_set_cup();
2225 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2226 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2227 wrd
= (insn
>> 12) & 0xf;
2228 rd0
= (insn
>> 16) & 0xf;
2229 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2230 switch ((insn
>> 22) & 3) {
2232 if (insn
& (1 << 21))
2233 gen_op_iwmmxt_unpackhsb_M0();
2235 gen_op_iwmmxt_unpackhub_M0();
2238 if (insn
& (1 << 21))
2239 gen_op_iwmmxt_unpackhsw_M0();
2241 gen_op_iwmmxt_unpackhuw_M0();
2244 if (insn
& (1 << 21))
2245 gen_op_iwmmxt_unpackhsl_M0();
2247 gen_op_iwmmxt_unpackhul_M0();
2252 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2253 gen_op_iwmmxt_set_mup();
2254 gen_op_iwmmxt_set_cup();
2256 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2257 case 0x214: case 0x614: case 0xa14: case 0xe14:
2258 if (((insn
>> 22) & 3) == 0)
2260 wrd
= (insn
>> 12) & 0xf;
2261 rd0
= (insn
>> 16) & 0xf;
2262 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2263 tmp
= tcg_temp_new_i32();
2264 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2265 tcg_temp_free_i32(tmp
);
2268 switch ((insn
>> 22) & 3) {
2270 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2273 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2276 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2279 tcg_temp_free_i32(tmp
);
2280 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2281 gen_op_iwmmxt_set_mup();
2282 gen_op_iwmmxt_set_cup();
2284 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2285 case 0x014: case 0x414: case 0x814: case 0xc14:
2286 if (((insn
>> 22) & 3) == 0)
2288 wrd
= (insn
>> 12) & 0xf;
2289 rd0
= (insn
>> 16) & 0xf;
2290 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2291 tmp
= tcg_temp_new_i32();
2292 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2293 tcg_temp_free_i32(tmp
);
2296 switch ((insn
>> 22) & 3) {
2298 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2301 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2304 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2307 tcg_temp_free_i32(tmp
);
2308 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2309 gen_op_iwmmxt_set_mup();
2310 gen_op_iwmmxt_set_cup();
2312 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2313 case 0x114: case 0x514: case 0x914: case 0xd14:
2314 if (((insn
>> 22) & 3) == 0)
2316 wrd
= (insn
>> 12) & 0xf;
2317 rd0
= (insn
>> 16) & 0xf;
2318 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2319 tmp
= tcg_temp_new_i32();
2320 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2321 tcg_temp_free_i32(tmp
);
2324 switch ((insn
>> 22) & 3) {
2326 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2329 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2332 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2335 tcg_temp_free_i32(tmp
);
2336 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2337 gen_op_iwmmxt_set_mup();
2338 gen_op_iwmmxt_set_cup();
2340 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2341 case 0x314: case 0x714: case 0xb14: case 0xf14:
2342 if (((insn
>> 22) & 3) == 0)
2344 wrd
= (insn
>> 12) & 0xf;
2345 rd0
= (insn
>> 16) & 0xf;
2346 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2347 tmp
= tcg_temp_new_i32();
2348 switch ((insn
>> 22) & 3) {
2350 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2351 tcg_temp_free_i32(tmp
);
2354 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2357 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2358 tcg_temp_free_i32(tmp
);
2361 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2364 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2365 tcg_temp_free_i32(tmp
);
2368 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2371 tcg_temp_free_i32(tmp
);
2372 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2373 gen_op_iwmmxt_set_mup();
2374 gen_op_iwmmxt_set_cup();
2376 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2377 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2378 wrd
= (insn
>> 12) & 0xf;
2379 rd0
= (insn
>> 16) & 0xf;
2380 rd1
= (insn
>> 0) & 0xf;
2381 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2382 switch ((insn
>> 22) & 3) {
2384 if (insn
& (1 << 21))
2385 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2387 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2390 if (insn
& (1 << 21))
2391 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2393 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2396 if (insn
& (1 << 21))
2397 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2399 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2404 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2405 gen_op_iwmmxt_set_mup();
2407 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2408 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2409 wrd
= (insn
>> 12) & 0xf;
2410 rd0
= (insn
>> 16) & 0xf;
2411 rd1
= (insn
>> 0) & 0xf;
2412 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2413 switch ((insn
>> 22) & 3) {
2415 if (insn
& (1 << 21))
2416 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2418 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2421 if (insn
& (1 << 21))
2422 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2424 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2427 if (insn
& (1 << 21))
2428 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2430 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2435 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2436 gen_op_iwmmxt_set_mup();
2438 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2439 case 0x402: case 0x502: case 0x602: case 0x702:
2440 wrd
= (insn
>> 12) & 0xf;
2441 rd0
= (insn
>> 16) & 0xf;
2442 rd1
= (insn
>> 0) & 0xf;
2443 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2444 tmp
= tcg_const_i32((insn
>> 20) & 3);
2445 iwmmxt_load_reg(cpu_V1
, rd1
);
2446 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2447 tcg_temp_free_i32(tmp
);
2448 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2449 gen_op_iwmmxt_set_mup();
2451 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2452 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2453 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2454 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2455 wrd
= (insn
>> 12) & 0xf;
2456 rd0
= (insn
>> 16) & 0xf;
2457 rd1
= (insn
>> 0) & 0xf;
2458 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2459 switch ((insn
>> 20) & 0xf) {
2461 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2464 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2467 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2470 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2473 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2476 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2479 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2482 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2485 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2490 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2491 gen_op_iwmmxt_set_mup();
2492 gen_op_iwmmxt_set_cup();
2494 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2495 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2496 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2497 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2498 wrd
= (insn
>> 12) & 0xf;
2499 rd0
= (insn
>> 16) & 0xf;
2500 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2501 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2502 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2503 tcg_temp_free_i32(tmp
);
2504 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2505 gen_op_iwmmxt_set_mup();
2506 gen_op_iwmmxt_set_cup();
2508 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2509 case 0x418: case 0x518: case 0x618: case 0x718:
2510 case 0x818: case 0x918: case 0xa18: case 0xb18:
2511 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2512 wrd
= (insn
>> 12) & 0xf;
2513 rd0
= (insn
>> 16) & 0xf;
2514 rd1
= (insn
>> 0) & 0xf;
2515 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2516 switch ((insn
>> 20) & 0xf) {
2518 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2521 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2524 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2527 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2530 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2533 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2536 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2539 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2542 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2547 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2548 gen_op_iwmmxt_set_mup();
2549 gen_op_iwmmxt_set_cup();
2551 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2552 case 0x408: case 0x508: case 0x608: case 0x708:
2553 case 0x808: case 0x908: case 0xa08: case 0xb08:
2554 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2555 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2557 wrd
= (insn
>> 12) & 0xf;
2558 rd0
= (insn
>> 16) & 0xf;
2559 rd1
= (insn
>> 0) & 0xf;
2560 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2561 switch ((insn
>> 22) & 3) {
2563 if (insn
& (1 << 21))
2564 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2566 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2569 if (insn
& (1 << 21))
2570 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2572 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2575 if (insn
& (1 << 21))
2576 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2578 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2581 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2582 gen_op_iwmmxt_set_mup();
2583 gen_op_iwmmxt_set_cup();
2585 case 0x201: case 0x203: case 0x205: case 0x207:
2586 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2587 case 0x211: case 0x213: case 0x215: case 0x217:
2588 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2589 wrd
= (insn
>> 5) & 0xf;
2590 rd0
= (insn
>> 12) & 0xf;
2591 rd1
= (insn
>> 0) & 0xf;
2592 if (rd0
== 0xf || rd1
== 0xf)
2594 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2595 tmp
= load_reg(s
, rd0
);
2596 tmp2
= load_reg(s
, rd1
);
2597 switch ((insn
>> 16) & 0xf) {
2598 case 0x0: /* TMIA */
2599 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2601 case 0x8: /* TMIAPH */
2602 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2604 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2605 if (insn
& (1 << 16))
2606 tcg_gen_shri_i32(tmp
, tmp
, 16);
2607 if (insn
& (1 << 17))
2608 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2609 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2612 tcg_temp_free_i32(tmp2
);
2613 tcg_temp_free_i32(tmp
);
2616 tcg_temp_free_i32(tmp2
);
2617 tcg_temp_free_i32(tmp
);
2618 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2619 gen_op_iwmmxt_set_mup();
2628 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2629 (ie. an undefined instruction). */
2630 static int disas_dsp_insn(DisasContext
*s
, uint32_t insn
)
2632 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2635 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2636 /* Multiply with Internal Accumulate Format */
2637 rd0
= (insn
>> 12) & 0xf;
2639 acc
= (insn
>> 5) & 7;
2644 tmp
= load_reg(s
, rd0
);
2645 tmp2
= load_reg(s
, rd1
);
2646 switch ((insn
>> 16) & 0xf) {
2648 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2650 case 0x8: /* MIAPH */
2651 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2653 case 0xc: /* MIABB */
2654 case 0xd: /* MIABT */
2655 case 0xe: /* MIATB */
2656 case 0xf: /* MIATT */
2657 if (insn
& (1 << 16))
2658 tcg_gen_shri_i32(tmp
, tmp
, 16);
2659 if (insn
& (1 << 17))
2660 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2661 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2666 tcg_temp_free_i32(tmp2
);
2667 tcg_temp_free_i32(tmp
);
2669 gen_op_iwmmxt_movq_wRn_M0(acc
);
2673 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2674 /* Internal Accumulator Access Format */
2675 rdhi
= (insn
>> 16) & 0xf;
2676 rdlo
= (insn
>> 12) & 0xf;
2682 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2683 iwmmxt_load_reg(cpu_V0
, acc
);
2684 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2685 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2686 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2687 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2689 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2690 iwmmxt_store_reg(cpu_V0
, acc
);
2698 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2699 #define VFP_SREG(insn, bigbit, smallbit) \
2700 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2701 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2702 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2703 reg = (((insn) >> (bigbit)) & 0x0f) \
2704 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2706 if (insn & (1 << (smallbit))) \
2708 reg = ((insn) >> (bigbit)) & 0x0f; \
2711 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2712 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2713 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2714 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2715 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2716 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2718 /* Move between integer and VFP cores. */
2719 static TCGv_i32
gen_vfp_mrs(void)
2721 TCGv_i32 tmp
= tcg_temp_new_i32();
2722 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2726 static void gen_vfp_msr(TCGv_i32 tmp
)
2728 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2729 tcg_temp_free_i32(tmp
);
2732 static void gen_neon_dup_u8(TCGv_i32 var
, int shift
)
2734 TCGv_i32 tmp
= tcg_temp_new_i32();
2736 tcg_gen_shri_i32(var
, var
, shift
);
2737 tcg_gen_ext8u_i32(var
, var
);
2738 tcg_gen_shli_i32(tmp
, var
, 8);
2739 tcg_gen_or_i32(var
, var
, tmp
);
2740 tcg_gen_shli_i32(tmp
, var
, 16);
2741 tcg_gen_or_i32(var
, var
, tmp
);
2742 tcg_temp_free_i32(tmp
);
2745 static void gen_neon_dup_low16(TCGv_i32 var
)
2747 TCGv_i32 tmp
= tcg_temp_new_i32();
2748 tcg_gen_ext16u_i32(var
, var
);
2749 tcg_gen_shli_i32(tmp
, var
, 16);
2750 tcg_gen_or_i32(var
, var
, tmp
);
2751 tcg_temp_free_i32(tmp
);
2754 static void gen_neon_dup_high16(TCGv_i32 var
)
2756 TCGv_i32 tmp
= tcg_temp_new_i32();
2757 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2758 tcg_gen_shri_i32(tmp
, var
, 16);
2759 tcg_gen_or_i32(var
, var
, tmp
);
2760 tcg_temp_free_i32(tmp
);
2763 static TCGv_i32
gen_load_and_replicate(DisasContext
*s
, TCGv_i32 addr
, int size
)
2765 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2766 TCGv_i32 tmp
= tcg_temp_new_i32();
2769 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
2770 gen_neon_dup_u8(tmp
, 0);
2773 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
2774 gen_neon_dup_low16(tmp
);
2777 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
2779 default: /* Avoid compiler warnings. */
2785 static int handle_vsel(uint32_t insn
, uint32_t rd
, uint32_t rn
, uint32_t rm
,
2788 uint32_t cc
= extract32(insn
, 20, 2);
2791 TCGv_i64 frn
, frm
, dest
;
2792 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
2794 zero
= tcg_const_i64(0);
2796 frn
= tcg_temp_new_i64();
2797 frm
= tcg_temp_new_i64();
2798 dest
= tcg_temp_new_i64();
2800 zf
= tcg_temp_new_i64();
2801 nf
= tcg_temp_new_i64();
2802 vf
= tcg_temp_new_i64();
2804 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
2805 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
2806 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
2808 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2809 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2812 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
2816 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
2819 case 2: /* ge: N == V -> N ^ V == 0 */
2820 tmp
= tcg_temp_new_i64();
2821 tcg_gen_xor_i64(tmp
, vf
, nf
);
2822 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2824 tcg_temp_free_i64(tmp
);
2826 case 3: /* gt: !Z && N == V */
2827 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
2829 tmp
= tcg_temp_new_i64();
2830 tcg_gen_xor_i64(tmp
, vf
, nf
);
2831 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2833 tcg_temp_free_i64(tmp
);
2836 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2837 tcg_temp_free_i64(frn
);
2838 tcg_temp_free_i64(frm
);
2839 tcg_temp_free_i64(dest
);
2841 tcg_temp_free_i64(zf
);
2842 tcg_temp_free_i64(nf
);
2843 tcg_temp_free_i64(vf
);
2845 tcg_temp_free_i64(zero
);
2847 TCGv_i32 frn
, frm
, dest
;
2850 zero
= tcg_const_i32(0);
2852 frn
= tcg_temp_new_i32();
2853 frm
= tcg_temp_new_i32();
2854 dest
= tcg_temp_new_i32();
2855 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2856 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2859 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
2863 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
2866 case 2: /* ge: N == V -> N ^ V == 0 */
2867 tmp
= tcg_temp_new_i32();
2868 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2869 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2871 tcg_temp_free_i32(tmp
);
2873 case 3: /* gt: !Z && N == V */
2874 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
2876 tmp
= tcg_temp_new_i32();
2877 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2878 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2880 tcg_temp_free_i32(tmp
);
2883 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2884 tcg_temp_free_i32(frn
);
2885 tcg_temp_free_i32(frm
);
2886 tcg_temp_free_i32(dest
);
2888 tcg_temp_free_i32(zero
);
2894 static int handle_vminmaxnm(uint32_t insn
, uint32_t rd
, uint32_t rn
,
2895 uint32_t rm
, uint32_t dp
)
2897 uint32_t vmin
= extract32(insn
, 6, 1);
2898 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2901 TCGv_i64 frn
, frm
, dest
;
2903 frn
= tcg_temp_new_i64();
2904 frm
= tcg_temp_new_i64();
2905 dest
= tcg_temp_new_i64();
2907 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2908 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2910 gen_helper_vfp_minnumd(dest
, frn
, frm
, fpst
);
2912 gen_helper_vfp_maxnumd(dest
, frn
, frm
, fpst
);
2914 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2915 tcg_temp_free_i64(frn
);
2916 tcg_temp_free_i64(frm
);
2917 tcg_temp_free_i64(dest
);
2919 TCGv_i32 frn
, frm
, dest
;
2921 frn
= tcg_temp_new_i32();
2922 frm
= tcg_temp_new_i32();
2923 dest
= tcg_temp_new_i32();
2925 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2926 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2928 gen_helper_vfp_minnums(dest
, frn
, frm
, fpst
);
2930 gen_helper_vfp_maxnums(dest
, frn
, frm
, fpst
);
2932 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2933 tcg_temp_free_i32(frn
);
2934 tcg_temp_free_i32(frm
);
2935 tcg_temp_free_i32(dest
);
2938 tcg_temp_free_ptr(fpst
);
2942 static int handle_vrint(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
2945 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2948 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
2949 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2954 tcg_op
= tcg_temp_new_i64();
2955 tcg_res
= tcg_temp_new_i64();
2956 tcg_gen_ld_f64(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
2957 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
2958 tcg_gen_st_f64(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
2959 tcg_temp_free_i64(tcg_op
);
2960 tcg_temp_free_i64(tcg_res
);
2964 tcg_op
= tcg_temp_new_i32();
2965 tcg_res
= tcg_temp_new_i32();
2966 tcg_gen_ld_f32(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
2967 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
2968 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
2969 tcg_temp_free_i32(tcg_op
);
2970 tcg_temp_free_i32(tcg_res
);
2973 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2974 tcg_temp_free_i32(tcg_rmode
);
2976 tcg_temp_free_ptr(fpst
);
2980 static int handle_vcvt(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
2983 bool is_signed
= extract32(insn
, 7, 1);
2984 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2985 TCGv_i32 tcg_rmode
, tcg_shift
;
2987 tcg_shift
= tcg_const_i32(0);
2989 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
2990 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2993 TCGv_i64 tcg_double
, tcg_res
;
2995 /* Rd is encoded as a single precision register even when the source
2996 * is double precision.
2998 rd
= ((rd
<< 1) & 0x1e) | ((rd
>> 4) & 0x1);
2999 tcg_double
= tcg_temp_new_i64();
3000 tcg_res
= tcg_temp_new_i64();
3001 tcg_tmp
= tcg_temp_new_i32();
3002 tcg_gen_ld_f64(tcg_double
, cpu_env
, vfp_reg_offset(1, rm
));
3004 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
3006 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
3008 tcg_gen_extrl_i64_i32(tcg_tmp
, tcg_res
);
3009 tcg_gen_st_f32(tcg_tmp
, cpu_env
, vfp_reg_offset(0, rd
));
3010 tcg_temp_free_i32(tcg_tmp
);
3011 tcg_temp_free_i64(tcg_res
);
3012 tcg_temp_free_i64(tcg_double
);
3014 TCGv_i32 tcg_single
, tcg_res
;
3015 tcg_single
= tcg_temp_new_i32();
3016 tcg_res
= tcg_temp_new_i32();
3017 tcg_gen_ld_f32(tcg_single
, cpu_env
, vfp_reg_offset(0, rm
));
3019 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
3021 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
3023 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(0, rd
));
3024 tcg_temp_free_i32(tcg_res
);
3025 tcg_temp_free_i32(tcg_single
);
3028 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3029 tcg_temp_free_i32(tcg_rmode
);
3031 tcg_temp_free_i32(tcg_shift
);
3033 tcg_temp_free_ptr(fpst
);
3038 /* Table for converting the most common AArch32 encoding of
3039 * rounding mode to arm_fprounding order (which matches the
3040 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3042 static const uint8_t fp_decode_rm
[] = {
3049 static int disas_vfp_v8_insn(DisasContext
*s
, uint32_t insn
)
3051 uint32_t rd
, rn
, rm
, dp
= extract32(insn
, 8, 1);
3053 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
3058 VFP_DREG_D(rd
, insn
);
3059 VFP_DREG_N(rn
, insn
);
3060 VFP_DREG_M(rm
, insn
);
3062 rd
= VFP_SREG_D(insn
);
3063 rn
= VFP_SREG_N(insn
);
3064 rm
= VFP_SREG_M(insn
);
3067 if ((insn
& 0x0f800e50) == 0x0e000a00) {
3068 return handle_vsel(insn
, rd
, rn
, rm
, dp
);
3069 } else if ((insn
& 0x0fb00e10) == 0x0e800a00) {
3070 return handle_vminmaxnm(insn
, rd
, rn
, rm
, dp
);
3071 } else if ((insn
& 0x0fbc0ed0) == 0x0eb80a40) {
3072 /* VRINTA, VRINTN, VRINTP, VRINTM */
3073 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
3074 return handle_vrint(insn
, rd
, rm
, dp
, rounding
);
3075 } else if ((insn
& 0x0fbc0e50) == 0x0ebc0a40) {
3076 /* VCVTA, VCVTN, VCVTP, VCVTM */
3077 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
3078 return handle_vcvt(insn
, rd
, rm
, dp
, rounding
);
3083 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3084 (ie. an undefined instruction). */
3085 static int disas_vfp_insn(DisasContext
*s
, uint32_t insn
)
3087 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
3093 if (!arm_dc_feature(s
, ARM_FEATURE_VFP
)) {
3097 /* FIXME: this access check should not take precedence over UNDEF
3098 * for invalid encodings; we will generate incorrect syndrome information
3099 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3101 if (s
->fp_excp_el
) {
3102 gen_exception_insn(s
, 4, EXCP_UDEF
,
3103 syn_fp_access_trap(1, 0xe, s
->thumb
), s
->fp_excp_el
);
3107 if (!s
->vfp_enabled
) {
3108 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3109 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
3111 rn
= (insn
>> 16) & 0xf;
3112 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
&& rn
!= ARM_VFP_MVFR2
3113 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
) {
3118 if (extract32(insn
, 28, 4) == 0xf) {
3119 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3120 * only used in v8 and above.
3122 return disas_vfp_v8_insn(s
, insn
);
3125 dp
= ((insn
& 0xf00) == 0xb00);
3126 switch ((insn
>> 24) & 0xf) {
3128 if (insn
& (1 << 4)) {
3129 /* single register transfer */
3130 rd
= (insn
>> 12) & 0xf;
3135 VFP_DREG_N(rn
, insn
);
3138 if (insn
& 0x00c00060
3139 && !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
3143 pass
= (insn
>> 21) & 1;
3144 if (insn
& (1 << 22)) {
3146 offset
= ((insn
>> 5) & 3) * 8;
3147 } else if (insn
& (1 << 5)) {
3149 offset
= (insn
& (1 << 6)) ? 16 : 0;
3154 if (insn
& ARM_CP_RW_BIT
) {
3156 tmp
= neon_load_reg(rn
, pass
);
3160 tcg_gen_shri_i32(tmp
, tmp
, offset
);
3161 if (insn
& (1 << 23))
3167 if (insn
& (1 << 23)) {
3169 tcg_gen_shri_i32(tmp
, tmp
, 16);
3175 tcg_gen_sari_i32(tmp
, tmp
, 16);
3184 store_reg(s
, rd
, tmp
);
3187 tmp
= load_reg(s
, rd
);
3188 if (insn
& (1 << 23)) {
3191 gen_neon_dup_u8(tmp
, 0);
3192 } else if (size
== 1) {
3193 gen_neon_dup_low16(tmp
);
3195 for (n
= 0; n
<= pass
* 2; n
++) {
3196 tmp2
= tcg_temp_new_i32();
3197 tcg_gen_mov_i32(tmp2
, tmp
);
3198 neon_store_reg(rn
, n
, tmp2
);
3200 neon_store_reg(rn
, n
, tmp
);
3205 tmp2
= neon_load_reg(rn
, pass
);
3206 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
3207 tcg_temp_free_i32(tmp2
);
3210 tmp2
= neon_load_reg(rn
, pass
);
3211 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
3212 tcg_temp_free_i32(tmp2
);
3217 neon_store_reg(rn
, pass
, tmp
);
3221 if ((insn
& 0x6f) != 0x00)
3223 rn
= VFP_SREG_N(insn
);
3224 if (insn
& ARM_CP_RW_BIT
) {
3226 if (insn
& (1 << 21)) {
3227 /* system register */
3232 /* VFP2 allows access to FSID from userspace.
3233 VFP3 restricts all id registers to privileged
3236 && arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3239 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3244 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3246 case ARM_VFP_FPINST
:
3247 case ARM_VFP_FPINST2
:
3248 /* Not present in VFP3. */
3250 || arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3253 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3257 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
3258 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
3260 tmp
= tcg_temp_new_i32();
3261 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
3265 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
3272 || !arm_dc_feature(s
, ARM_FEATURE_MVFR
)) {
3275 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3281 gen_mov_F0_vreg(0, rn
);
3282 tmp
= gen_vfp_mrs();
3285 /* Set the 4 flag bits in the CPSR. */
3287 tcg_temp_free_i32(tmp
);
3289 store_reg(s
, rd
, tmp
);
3293 if (insn
& (1 << 21)) {
3295 /* system register */
3300 /* Writes are ignored. */
3303 tmp
= load_reg(s
, rd
);
3304 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
3305 tcg_temp_free_i32(tmp
);
3311 /* TODO: VFP subarchitecture support.
3312 * For now, keep the EN bit only */
3313 tmp
= load_reg(s
, rd
);
3314 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
3315 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3318 case ARM_VFP_FPINST
:
3319 case ARM_VFP_FPINST2
:
3323 tmp
= load_reg(s
, rd
);
3324 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3330 tmp
= load_reg(s
, rd
);
3332 gen_mov_vreg_F0(0, rn
);
3337 /* data processing */
3338 /* The opcode is in bits 23, 21, 20 and 6. */
3339 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
3343 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
3345 /* rn is register number */
3346 VFP_DREG_N(rn
, insn
);
3349 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18) ||
3350 ((rn
& 0x1e) == 0x6))) {
3351 /* Integer or single/half precision destination. */
3352 rd
= VFP_SREG_D(insn
);
3354 VFP_DREG_D(rd
, insn
);
3357 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14) ||
3358 ((rn
& 0x1e) == 0x4))) {
3359 /* VCVT from int or half precision is always from S reg
3360 * regardless of dp bit. VCVT with immediate frac_bits
3361 * has same format as SREG_M.
3363 rm
= VFP_SREG_M(insn
);
3365 VFP_DREG_M(rm
, insn
);
3368 rn
= VFP_SREG_N(insn
);
3369 if (op
== 15 && rn
== 15) {
3370 /* Double precision destination. */
3371 VFP_DREG_D(rd
, insn
);
3373 rd
= VFP_SREG_D(insn
);
3375 /* NB that we implicitly rely on the encoding for the frac_bits
3376 * in VCVT of fixed to float being the same as that of an SREG_M
3378 rm
= VFP_SREG_M(insn
);
3381 veclen
= s
->vec_len
;
3382 if (op
== 15 && rn
> 3)
3385 /* Shut up compiler warnings. */
3396 /* Figure out what type of vector operation this is. */
3397 if ((rd
& bank_mask
) == 0) {
3402 delta_d
= (s
->vec_stride
>> 1) + 1;
3404 delta_d
= s
->vec_stride
+ 1;
3406 if ((rm
& bank_mask
) == 0) {
3407 /* mixed scalar/vector */
3416 /* Load the initial operands. */
3421 /* Integer source */
3422 gen_mov_F0_vreg(0, rm
);
3427 gen_mov_F0_vreg(dp
, rd
);
3428 gen_mov_F1_vreg(dp
, rm
);
3432 /* Compare with zero */
3433 gen_mov_F0_vreg(dp
, rd
);
3444 /* Source and destination the same. */
3445 gen_mov_F0_vreg(dp
, rd
);
3451 /* VCVTB, VCVTT: only present with the halfprec extension
3452 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3453 * (we choose to UNDEF)
3455 if ((dp
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) ||
3456 !arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
)) {
3459 if (!extract32(rn
, 1, 1)) {
3460 /* Half precision source. */
3461 gen_mov_F0_vreg(0, rm
);
3464 /* Otherwise fall through */
3466 /* One source operand. */
3467 gen_mov_F0_vreg(dp
, rm
);
3471 /* Two source operands. */
3472 gen_mov_F0_vreg(dp
, rn
);
3473 gen_mov_F1_vreg(dp
, rm
);
3477 /* Perform the calculation. */
3479 case 0: /* VMLA: fd + (fn * fm) */
3480 /* Note that order of inputs to the add matters for NaNs */
3482 gen_mov_F0_vreg(dp
, rd
);
3485 case 1: /* VMLS: fd + -(fn * fm) */
3488 gen_mov_F0_vreg(dp
, rd
);
3491 case 2: /* VNMLS: -fd + (fn * fm) */
3492 /* Note that it isn't valid to replace (-A + B) with (B - A)
3493 * or similar plausible looking simplifications
3494 * because this will give wrong results for NaNs.
3497 gen_mov_F0_vreg(dp
, rd
);
3501 case 3: /* VNMLA: -fd + -(fn * fm) */
3504 gen_mov_F0_vreg(dp
, rd
);
3508 case 4: /* mul: fn * fm */
3511 case 5: /* nmul: -(fn * fm) */
3515 case 6: /* add: fn + fm */
3518 case 7: /* sub: fn - fm */
3521 case 8: /* div: fn / fm */
3524 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3525 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3526 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3527 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3528 /* These are fused multiply-add, and must be done as one
3529 * floating point operation with no rounding between the
3530 * multiplication and addition steps.
3531 * NB that doing the negations here as separate steps is
3532 * correct : an input NaN should come out with its sign bit
3533 * flipped if it is a negated-input.
3535 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
)) {
3543 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
3545 frd
= tcg_temp_new_i64();
3546 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3549 gen_helper_vfp_negd(frd
, frd
);
3551 fpst
= get_fpstatus_ptr(0);
3552 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
3553 cpu_F1d
, frd
, fpst
);
3554 tcg_temp_free_ptr(fpst
);
3555 tcg_temp_free_i64(frd
);
3561 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
3563 frd
= tcg_temp_new_i32();
3564 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3566 gen_helper_vfp_negs(frd
, frd
);
3568 fpst
= get_fpstatus_ptr(0);
3569 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
3570 cpu_F1s
, frd
, fpst
);
3571 tcg_temp_free_ptr(fpst
);
3572 tcg_temp_free_i32(frd
);
3575 case 14: /* fconst */
3576 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3580 n
= (insn
<< 12) & 0x80000000;
3581 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3588 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3595 tcg_gen_movi_i32(cpu_F0s
, n
);
3598 case 15: /* extension space */
3612 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3613 tmp
= gen_vfp_mrs();
3614 tcg_gen_ext16u_i32(tmp
, tmp
);
3616 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3619 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3622 tcg_temp_free_i32(tmp
);
3624 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3625 tmp
= gen_vfp_mrs();
3626 tcg_gen_shri_i32(tmp
, tmp
, 16);
3628 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3631 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3634 tcg_temp_free_i32(tmp
);
3636 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3637 tmp
= tcg_temp_new_i32();
3639 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3642 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3645 gen_mov_F0_vreg(0, rd
);
3646 tmp2
= gen_vfp_mrs();
3647 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3648 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3649 tcg_temp_free_i32(tmp2
);
3652 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3653 tmp
= tcg_temp_new_i32();
3655 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3658 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3661 tcg_gen_shli_i32(tmp
, tmp
, 16);
3662 gen_mov_F0_vreg(0, rd
);
3663 tmp2
= gen_vfp_mrs();
3664 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3665 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3666 tcg_temp_free_i32(tmp2
);
3678 case 11: /* cmpez */
3682 case 12: /* vrintr */
3684 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3686 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
3688 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
3690 tcg_temp_free_ptr(fpst
);
3693 case 13: /* vrintz */
3695 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3697 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3698 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3700 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
3702 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
3704 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3705 tcg_temp_free_i32(tcg_rmode
);
3706 tcg_temp_free_ptr(fpst
);
3709 case 14: /* vrintx */
3711 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3713 gen_helper_rintd_exact(cpu_F0d
, cpu_F0d
, fpst
);
3715 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpst
);
3717 tcg_temp_free_ptr(fpst
);
3720 case 15: /* single<->double conversion */
3722 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3724 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3726 case 16: /* fuito */
3727 gen_vfp_uito(dp
, 0);
3729 case 17: /* fsito */
3730 gen_vfp_sito(dp
, 0);
3732 case 20: /* fshto */
3733 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3736 gen_vfp_shto(dp
, 16 - rm
, 0);
3738 case 21: /* fslto */
3739 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3742 gen_vfp_slto(dp
, 32 - rm
, 0);
3744 case 22: /* fuhto */
3745 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3748 gen_vfp_uhto(dp
, 16 - rm
, 0);
3750 case 23: /* fulto */
3751 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3754 gen_vfp_ulto(dp
, 32 - rm
, 0);
3756 case 24: /* ftoui */
3757 gen_vfp_toui(dp
, 0);
3759 case 25: /* ftouiz */
3760 gen_vfp_touiz(dp
, 0);
3762 case 26: /* ftosi */
3763 gen_vfp_tosi(dp
, 0);
3765 case 27: /* ftosiz */
3766 gen_vfp_tosiz(dp
, 0);
3768 case 28: /* ftosh */
3769 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3772 gen_vfp_tosh(dp
, 16 - rm
, 0);
3774 case 29: /* ftosl */
3775 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3778 gen_vfp_tosl(dp
, 32 - rm
, 0);
3780 case 30: /* ftouh */
3781 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3784 gen_vfp_touh(dp
, 16 - rm
, 0);
3786 case 31: /* ftoul */
3787 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3790 gen_vfp_toul(dp
, 32 - rm
, 0);
3792 default: /* undefined */
3796 default: /* undefined */
3800 /* Write back the result. */
3801 if (op
== 15 && (rn
>= 8 && rn
<= 11)) {
3802 /* Comparison, do nothing. */
3803 } else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18 ||
3804 (rn
& 0x1e) == 0x6)) {
3805 /* VCVT double to int: always integer result.
3806 * VCVT double to half precision is always a single
3809 gen_mov_vreg_F0(0, rd
);
3810 } else if (op
== 15 && rn
== 15) {
3812 gen_mov_vreg_F0(!dp
, rd
);
3814 gen_mov_vreg_F0(dp
, rd
);
3817 /* break out of the loop if we have finished */
3821 if (op
== 15 && delta_m
== 0) {
3822 /* single source one-many */
3824 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3826 gen_mov_vreg_F0(dp
, rd
);
3830 /* Setup the next operands. */
3832 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3836 /* One source operand. */
3837 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3839 gen_mov_F0_vreg(dp
, rm
);
3841 /* Two source operands. */
3842 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3844 gen_mov_F0_vreg(dp
, rn
);
3846 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3848 gen_mov_F1_vreg(dp
, rm
);
3856 if ((insn
& 0x03e00000) == 0x00400000) {
3857 /* two-register transfer */
3858 rn
= (insn
>> 16) & 0xf;
3859 rd
= (insn
>> 12) & 0xf;
3861 VFP_DREG_M(rm
, insn
);
3863 rm
= VFP_SREG_M(insn
);
3866 if (insn
& ARM_CP_RW_BIT
) {
3869 gen_mov_F0_vreg(0, rm
* 2);
3870 tmp
= gen_vfp_mrs();
3871 store_reg(s
, rd
, tmp
);
3872 gen_mov_F0_vreg(0, rm
* 2 + 1);
3873 tmp
= gen_vfp_mrs();
3874 store_reg(s
, rn
, tmp
);
3876 gen_mov_F0_vreg(0, rm
);
3877 tmp
= gen_vfp_mrs();
3878 store_reg(s
, rd
, tmp
);
3879 gen_mov_F0_vreg(0, rm
+ 1);
3880 tmp
= gen_vfp_mrs();
3881 store_reg(s
, rn
, tmp
);
3886 tmp
= load_reg(s
, rd
);
3888 gen_mov_vreg_F0(0, rm
* 2);
3889 tmp
= load_reg(s
, rn
);
3891 gen_mov_vreg_F0(0, rm
* 2 + 1);
3893 tmp
= load_reg(s
, rd
);
3895 gen_mov_vreg_F0(0, rm
);
3896 tmp
= load_reg(s
, rn
);
3898 gen_mov_vreg_F0(0, rm
+ 1);
3903 rn
= (insn
>> 16) & 0xf;
3905 VFP_DREG_D(rd
, insn
);
3907 rd
= VFP_SREG_D(insn
);
3908 if ((insn
& 0x01200000) == 0x01000000) {
3909 /* Single load/store */
3910 offset
= (insn
& 0xff) << 2;
3911 if ((insn
& (1 << 23)) == 0)
3913 if (s
->thumb
&& rn
== 15) {
3914 /* This is actually UNPREDICTABLE */
3915 addr
= tcg_temp_new_i32();
3916 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3918 addr
= load_reg(s
, rn
);
3920 tcg_gen_addi_i32(addr
, addr
, offset
);
3921 if (insn
& (1 << 20)) {
3922 gen_vfp_ld(s
, dp
, addr
);
3923 gen_mov_vreg_F0(dp
, rd
);
3925 gen_mov_F0_vreg(dp
, rd
);
3926 gen_vfp_st(s
, dp
, addr
);
3928 tcg_temp_free_i32(addr
);
3930 /* load/store multiple */
3931 int w
= insn
& (1 << 21);
3933 n
= (insn
>> 1) & 0x7f;
3937 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
3938 /* P == U , W == 1 => UNDEF */
3941 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
3942 /* UNPREDICTABLE cases for bad immediates: we choose to
3943 * UNDEF to avoid generating huge numbers of TCG ops
3947 if (rn
== 15 && w
) {
3948 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3952 if (s
->thumb
&& rn
== 15) {
3953 /* This is actually UNPREDICTABLE */
3954 addr
= tcg_temp_new_i32();
3955 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3957 addr
= load_reg(s
, rn
);
3959 if (insn
& (1 << 24)) /* pre-decrement */
3960 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3966 for (i
= 0; i
< n
; i
++) {
3967 if (insn
& ARM_CP_RW_BIT
) {
3969 gen_vfp_ld(s
, dp
, addr
);
3970 gen_mov_vreg_F0(dp
, rd
+ i
);
3973 gen_mov_F0_vreg(dp
, rd
+ i
);
3974 gen_vfp_st(s
, dp
, addr
);
3976 tcg_gen_addi_i32(addr
, addr
, offset
);
3980 if (insn
& (1 << 24))
3981 offset
= -offset
* n
;
3982 else if (dp
&& (insn
& 1))
3988 tcg_gen_addi_i32(addr
, addr
, offset
);
3989 store_reg(s
, rn
, addr
);
3991 tcg_temp_free_i32(addr
);
3997 /* Should never happen. */
4003 static inline void gen_goto_tb(DisasContext
*s
, int n
, target_ulong dest
)
4005 TranslationBlock
*tb
;
4008 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
4010 gen_set_pc_im(s
, dest
);
4011 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
4013 gen_set_pc_im(s
, dest
);
4018 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
4020 if (unlikely(s
->singlestep_enabled
|| s
->ss_active
)) {
4021 /* An indirect jump so that we still trigger the debug exception. */
4026 gen_goto_tb(s
, 0, dest
);
4027 s
->is_jmp
= DISAS_TB_JUMP
;
4031 static inline void gen_mulxy(TCGv_i32 t0
, TCGv_i32 t1
, int x
, int y
)
4034 tcg_gen_sari_i32(t0
, t0
, 16);
4038 tcg_gen_sari_i32(t1
, t1
, 16);
4041 tcg_gen_mul_i32(t0
, t0
, t1
);
4044 /* Return the mask of PSR bits set by a MSR instruction. */
4045 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
)
4050 if (flags
& (1 << 0))
4052 if (flags
& (1 << 1))
4054 if (flags
& (1 << 2))
4056 if (flags
& (1 << 3))
4059 /* Mask out undefined bits. */
4060 mask
&= ~CPSR_RESERVED
;
4061 if (!arm_dc_feature(s
, ARM_FEATURE_V4T
)) {
4064 if (!arm_dc_feature(s
, ARM_FEATURE_V5
)) {
4065 mask
&= ~CPSR_Q
; /* V5TE in reality*/
4067 if (!arm_dc_feature(s
, ARM_FEATURE_V6
)) {
4068 mask
&= ~(CPSR_E
| CPSR_GE
);
4070 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB2
)) {
4073 /* Mask out execution state and reserved bits. */
4075 mask
&= ~(CPSR_EXEC
| CPSR_RESERVED
);
4077 /* Mask out privileged bits. */
4083 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4084 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv_i32 t0
)
4088 /* ??? This is also undefined in system mode. */
4092 tmp
= load_cpu_field(spsr
);
4093 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
4094 tcg_gen_andi_i32(t0
, t0
, mask
);
4095 tcg_gen_or_i32(tmp
, tmp
, t0
);
4096 store_cpu_field(tmp
, spsr
);
4098 gen_set_cpsr(t0
, mask
);
4100 tcg_temp_free_i32(t0
);
4105 /* Returns nonzero if access to the PSR is not permitted. */
4106 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
4109 tmp
= tcg_temp_new_i32();
4110 tcg_gen_movi_i32(tmp
, val
);
4111 return gen_set_psr(s
, mask
, spsr
, tmp
);
4114 /* Generate an old-style exception return. Marks pc as dead. */
4115 static void gen_exception_return(DisasContext
*s
, TCGv_i32 pc
)
4118 store_reg(s
, 15, pc
);
4119 tmp
= load_cpu_field(spsr
);
4120 gen_set_cpsr(tmp
, CPSR_ERET_MASK
);
4121 tcg_temp_free_i32(tmp
);
4122 s
->is_jmp
= DISAS_JUMP
;
4125 /* Generate a v6 exception return. Marks both values as dead. */
4126 static void gen_rfe(DisasContext
*s
, TCGv_i32 pc
, TCGv_i32 cpsr
)
4128 gen_set_cpsr(cpsr
, CPSR_ERET_MASK
);
4129 tcg_temp_free_i32(cpsr
);
4130 store_reg(s
, 15, pc
);
4131 s
->is_jmp
= DISAS_JUMP
;
4134 static void gen_nop_hint(DisasContext
*s
, int val
)
4138 gen_set_pc_im(s
, s
->pc
);
4139 s
->is_jmp
= DISAS_YIELD
;
4142 gen_set_pc_im(s
, s
->pc
);
4143 s
->is_jmp
= DISAS_WFI
;
4146 gen_set_pc_im(s
, s
->pc
);
4147 s
->is_jmp
= DISAS_WFE
;
4151 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4157 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4159 static inline void gen_neon_add(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
4162 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
4163 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
4164 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
4169 static inline void gen_neon_rsb(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
4172 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
4173 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
4174 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
4179 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4180 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4181 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4182 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4183 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4185 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4186 switch ((size << 1) | u) { \
4188 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4191 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4194 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4197 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4200 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4203 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4205 default: return 1; \
4208 #define GEN_NEON_INTEGER_OP(name) do { \
4209 switch ((size << 1) | u) { \
4211 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4214 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4217 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4220 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4223 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4226 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4228 default: return 1; \
4231 static TCGv_i32
neon_load_scratch(int scratch
)
4233 TCGv_i32 tmp
= tcg_temp_new_i32();
4234 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4238 static void neon_store_scratch(int scratch
, TCGv_i32 var
)
4240 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4241 tcg_temp_free_i32(var
);
4244 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
4248 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
4250 gen_neon_dup_high16(tmp
);
4252 gen_neon_dup_low16(tmp
);
4255 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
4260 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
4263 if (!q
&& size
== 2) {
4266 tmp
= tcg_const_i32(rd
);
4267 tmp2
= tcg_const_i32(rm
);
4271 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
4274 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
4277 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
4285 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
4288 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
4294 tcg_temp_free_i32(tmp
);
4295 tcg_temp_free_i32(tmp2
);
4299 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
4302 if (!q
&& size
== 2) {
4305 tmp
= tcg_const_i32(rd
);
4306 tmp2
= tcg_const_i32(rm
);
4310 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
4313 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
4316 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
4324 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
4327 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
4333 tcg_temp_free_i32(tmp
);
4334 tcg_temp_free_i32(tmp2
);
4338 static void gen_neon_trn_u8(TCGv_i32 t0
, TCGv_i32 t1
)
4342 rd
= tcg_temp_new_i32();
4343 tmp
= tcg_temp_new_i32();
4345 tcg_gen_shli_i32(rd
, t0
, 8);
4346 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
4347 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
4348 tcg_gen_or_i32(rd
, rd
, tmp
);
4350 tcg_gen_shri_i32(t1
, t1
, 8);
4351 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
4352 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
4353 tcg_gen_or_i32(t1
, t1
, tmp
);
4354 tcg_gen_mov_i32(t0
, rd
);
4356 tcg_temp_free_i32(tmp
);
4357 tcg_temp_free_i32(rd
);
4360 static void gen_neon_trn_u16(TCGv_i32 t0
, TCGv_i32 t1
)
4364 rd
= tcg_temp_new_i32();
4365 tmp
= tcg_temp_new_i32();
4367 tcg_gen_shli_i32(rd
, t0
, 16);
4368 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
4369 tcg_gen_or_i32(rd
, rd
, tmp
);
4370 tcg_gen_shri_i32(t1
, t1
, 16);
4371 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
4372 tcg_gen_or_i32(t1
, t1
, tmp
);
4373 tcg_gen_mov_i32(t0
, rd
);
4375 tcg_temp_free_i32(tmp
);
4376 tcg_temp_free_i32(rd
);
4384 } neon_ls_element_type
[11] = {
4398 /* Translate a NEON load/store element instruction. Return nonzero if the
4399 instruction is invalid. */
4400 static int disas_neon_ls_insn(DisasContext
*s
, uint32_t insn
)
4419 /* FIXME: this access check should not take precedence over UNDEF
4420 * for invalid encodings; we will generate incorrect syndrome information
4421 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4423 if (s
->fp_excp_el
) {
4424 gen_exception_insn(s
, 4, EXCP_UDEF
,
4425 syn_fp_access_trap(1, 0xe, s
->thumb
), s
->fp_excp_el
);
4429 if (!s
->vfp_enabled
)
4431 VFP_DREG_D(rd
, insn
);
4432 rn
= (insn
>> 16) & 0xf;
4434 load
= (insn
& (1 << 21)) != 0;
4435 if ((insn
& (1 << 23)) == 0) {
4436 /* Load store all elements. */
4437 op
= (insn
>> 8) & 0xf;
4438 size
= (insn
>> 6) & 3;
4441 /* Catch UNDEF cases for bad values of align field */
4444 if (((insn
>> 5) & 1) == 1) {
4449 if (((insn
>> 4) & 3) == 3) {
4456 nregs
= neon_ls_element_type
[op
].nregs
;
4457 interleave
= neon_ls_element_type
[op
].interleave
;
4458 spacing
= neon_ls_element_type
[op
].spacing
;
4459 if (size
== 3 && (interleave
| spacing
) != 1)
4461 addr
= tcg_temp_new_i32();
4462 load_reg_var(s
, addr
, rn
);
4463 stride
= (1 << size
) * interleave
;
4464 for (reg
= 0; reg
< nregs
; reg
++) {
4465 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
4466 load_reg_var(s
, addr
, rn
);
4467 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
4468 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
4469 load_reg_var(s
, addr
, rn
);
4470 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4473 tmp64
= tcg_temp_new_i64();
4475 gen_aa32_ld64(tmp64
, addr
, get_mem_index(s
));
4476 neon_store_reg64(tmp64
, rd
);
4478 neon_load_reg64(tmp64
, rd
);
4479 gen_aa32_st64(tmp64
, addr
, get_mem_index(s
));
4481 tcg_temp_free_i64(tmp64
);
4482 tcg_gen_addi_i32(addr
, addr
, stride
);
4484 for (pass
= 0; pass
< 2; pass
++) {
4487 tmp
= tcg_temp_new_i32();
4488 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
4489 neon_store_reg(rd
, pass
, tmp
);
4491 tmp
= neon_load_reg(rd
, pass
);
4492 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
4493 tcg_temp_free_i32(tmp
);
4495 tcg_gen_addi_i32(addr
, addr
, stride
);
4496 } else if (size
== 1) {
4498 tmp
= tcg_temp_new_i32();
4499 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
4500 tcg_gen_addi_i32(addr
, addr
, stride
);
4501 tmp2
= tcg_temp_new_i32();
4502 gen_aa32_ld16u(tmp2
, addr
, get_mem_index(s
));
4503 tcg_gen_addi_i32(addr
, addr
, stride
);
4504 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
4505 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4506 tcg_temp_free_i32(tmp2
);
4507 neon_store_reg(rd
, pass
, tmp
);
4509 tmp
= neon_load_reg(rd
, pass
);
4510 tmp2
= tcg_temp_new_i32();
4511 tcg_gen_shri_i32(tmp2
, tmp
, 16);
4512 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
4513 tcg_temp_free_i32(tmp
);
4514 tcg_gen_addi_i32(addr
, addr
, stride
);
4515 gen_aa32_st16(tmp2
, addr
, get_mem_index(s
));
4516 tcg_temp_free_i32(tmp2
);
4517 tcg_gen_addi_i32(addr
, addr
, stride
);
4519 } else /* size == 0 */ {
4521 TCGV_UNUSED_I32(tmp2
);
4522 for (n
= 0; n
< 4; n
++) {
4523 tmp
= tcg_temp_new_i32();
4524 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
4525 tcg_gen_addi_i32(addr
, addr
, stride
);
4529 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
4530 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
4531 tcg_temp_free_i32(tmp
);
4534 neon_store_reg(rd
, pass
, tmp2
);
4536 tmp2
= neon_load_reg(rd
, pass
);
4537 for (n
= 0; n
< 4; n
++) {
4538 tmp
= tcg_temp_new_i32();
4540 tcg_gen_mov_i32(tmp
, tmp2
);
4542 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
4544 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
4545 tcg_temp_free_i32(tmp
);
4546 tcg_gen_addi_i32(addr
, addr
, stride
);
4548 tcg_temp_free_i32(tmp2
);
4555 tcg_temp_free_i32(addr
);
4558 size
= (insn
>> 10) & 3;
4560 /* Load single element to all lanes. */
4561 int a
= (insn
>> 4) & 1;
4565 size
= (insn
>> 6) & 3;
4566 nregs
= ((insn
>> 8) & 3) + 1;
4569 if (nregs
!= 4 || a
== 0) {
4572 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4575 if (nregs
== 1 && a
== 1 && size
== 0) {
4578 if (nregs
== 3 && a
== 1) {
4581 addr
= tcg_temp_new_i32();
4582 load_reg_var(s
, addr
, rn
);
4584 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4585 tmp
= gen_load_and_replicate(s
, addr
, size
);
4586 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4587 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4588 if (insn
& (1 << 5)) {
4589 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
4590 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
4592 tcg_temp_free_i32(tmp
);
4594 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4595 stride
= (insn
& (1 << 5)) ? 2 : 1;
4596 for (reg
= 0; reg
< nregs
; reg
++) {
4597 tmp
= gen_load_and_replicate(s
, addr
, size
);
4598 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4599 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4600 tcg_temp_free_i32(tmp
);
4601 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4605 tcg_temp_free_i32(addr
);
4606 stride
= (1 << size
) * nregs
;
4608 /* Single element. */
4609 int idx
= (insn
>> 4) & 0xf;
4610 pass
= (insn
>> 7) & 1;
4613 shift
= ((insn
>> 5) & 3) * 8;
4617 shift
= ((insn
>> 6) & 1) * 16;
4618 stride
= (insn
& (1 << 5)) ? 2 : 1;
4622 stride
= (insn
& (1 << 6)) ? 2 : 1;
4627 nregs
= ((insn
>> 8) & 3) + 1;
4628 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4631 if (((idx
& (1 << size
)) != 0) ||
4632 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
4637 if ((idx
& 1) != 0) {
4642 if (size
== 2 && (idx
& 2) != 0) {
4647 if ((size
== 2) && ((idx
& 3) == 3)) {
4654 if ((rd
+ stride
* (nregs
- 1)) > 31) {
4655 /* Attempts to write off the end of the register file
4656 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4657 * the neon_load_reg() would write off the end of the array.
4661 addr
= tcg_temp_new_i32();
4662 load_reg_var(s
, addr
, rn
);
4663 for (reg
= 0; reg
< nregs
; reg
++) {
4665 tmp
= tcg_temp_new_i32();
4668 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
4671 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
4674 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
4676 default: /* Avoid compiler warnings. */
4680 tmp2
= neon_load_reg(rd
, pass
);
4681 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
,
4682 shift
, size
? 16 : 8);
4683 tcg_temp_free_i32(tmp2
);
4685 neon_store_reg(rd
, pass
, tmp
);
4686 } else { /* Store */
4687 tmp
= neon_load_reg(rd
, pass
);
4689 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4692 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
4695 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
4698 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
4701 tcg_temp_free_i32(tmp
);
4704 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4706 tcg_temp_free_i32(addr
);
4707 stride
= nregs
* (1 << size
);
4713 base
= load_reg(s
, rn
);
4715 tcg_gen_addi_i32(base
, base
, stride
);
4718 index
= load_reg(s
, rm
);
4719 tcg_gen_add_i32(base
, base
, index
);
4720 tcg_temp_free_i32(index
);
4722 store_reg(s
, rn
, base
);
4727 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4728 static void gen_neon_bsl(TCGv_i32 dest
, TCGv_i32 t
, TCGv_i32 f
, TCGv_i32 c
)
4730 tcg_gen_and_i32(t
, t
, c
);
4731 tcg_gen_andc_i32(f
, f
, c
);
4732 tcg_gen_or_i32(dest
, t
, f
);
4735 static inline void gen_neon_narrow(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4738 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4739 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4740 case 2: tcg_gen_extrl_i64_i32(dest
, src
); break;
4745 static inline void gen_neon_narrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4748 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4749 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4750 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4755 static inline void gen_neon_narrow_satu(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4758 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4759 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4760 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4765 static inline void gen_neon_unarrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4768 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4769 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4770 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4775 static inline void gen_neon_shift_narrow(int size
, TCGv_i32 var
, TCGv_i32 shift
,
4781 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4782 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4787 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4788 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4795 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4796 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4801 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4802 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4809 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv_i32 src
, int size
, int u
)
4813 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4814 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4815 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4820 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4821 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4822 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4826 tcg_temp_free_i32(src
);
4829 static inline void gen_neon_addl(int size
)
4832 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4833 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4834 case 2: tcg_gen_add_i64(CPU_V001
); break;
4839 static inline void gen_neon_subl(int size
)
4842 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4843 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4844 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4849 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4852 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4853 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4855 tcg_gen_neg_i64(var
, var
);
4861 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4864 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4865 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4870 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv_i32 a
, TCGv_i32 b
,
4875 switch ((size
<< 1) | u
) {
4876 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4877 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4878 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4879 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4881 tmp
= gen_muls_i64_i32(a
, b
);
4882 tcg_gen_mov_i64(dest
, tmp
);
4883 tcg_temp_free_i64(tmp
);
4886 tmp
= gen_mulu_i64_i32(a
, b
);
4887 tcg_gen_mov_i64(dest
, tmp
);
4888 tcg_temp_free_i64(tmp
);
4893 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4894 Don't forget to clean them now. */
4896 tcg_temp_free_i32(a
);
4897 tcg_temp_free_i32(b
);
4901 static void gen_neon_narrow_op(int op
, int u
, int size
,
4902 TCGv_i32 dest
, TCGv_i64 src
)
4906 gen_neon_unarrow_sats(size
, dest
, src
);
4908 gen_neon_narrow(size
, dest
, src
);
4912 gen_neon_narrow_satu(size
, dest
, src
);
4914 gen_neon_narrow_sats(size
, dest
, src
);
4919 /* Symbolic constants for op fields for Neon 3-register same-length.
4920 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4923 #define NEON_3R_VHADD 0
4924 #define NEON_3R_VQADD 1
4925 #define NEON_3R_VRHADD 2
4926 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4927 #define NEON_3R_VHSUB 4
4928 #define NEON_3R_VQSUB 5
4929 #define NEON_3R_VCGT 6
4930 #define NEON_3R_VCGE 7
4931 #define NEON_3R_VSHL 8
4932 #define NEON_3R_VQSHL 9
4933 #define NEON_3R_VRSHL 10
4934 #define NEON_3R_VQRSHL 11
4935 #define NEON_3R_VMAX 12
4936 #define NEON_3R_VMIN 13
4937 #define NEON_3R_VABD 14
4938 #define NEON_3R_VABA 15
4939 #define NEON_3R_VADD_VSUB 16
4940 #define NEON_3R_VTST_VCEQ 17
4941 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4942 #define NEON_3R_VMUL 19
4943 #define NEON_3R_VPMAX 20
4944 #define NEON_3R_VPMIN 21
4945 #define NEON_3R_VQDMULH_VQRDMULH 22
4946 #define NEON_3R_VPADD 23
4947 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4948 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4949 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4950 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4951 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4952 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4953 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4954 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4956 static const uint8_t neon_3r_sizes
[] = {
4957 [NEON_3R_VHADD
] = 0x7,
4958 [NEON_3R_VQADD
] = 0xf,
4959 [NEON_3R_VRHADD
] = 0x7,
4960 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4961 [NEON_3R_VHSUB
] = 0x7,
4962 [NEON_3R_VQSUB
] = 0xf,
4963 [NEON_3R_VCGT
] = 0x7,
4964 [NEON_3R_VCGE
] = 0x7,
4965 [NEON_3R_VSHL
] = 0xf,
4966 [NEON_3R_VQSHL
] = 0xf,
4967 [NEON_3R_VRSHL
] = 0xf,
4968 [NEON_3R_VQRSHL
] = 0xf,
4969 [NEON_3R_VMAX
] = 0x7,
4970 [NEON_3R_VMIN
] = 0x7,
4971 [NEON_3R_VABD
] = 0x7,
4972 [NEON_3R_VABA
] = 0x7,
4973 [NEON_3R_VADD_VSUB
] = 0xf,
4974 [NEON_3R_VTST_VCEQ
] = 0x7,
4975 [NEON_3R_VML
] = 0x7,
4976 [NEON_3R_VMUL
] = 0x7,
4977 [NEON_3R_VPMAX
] = 0x7,
4978 [NEON_3R_VPMIN
] = 0x7,
4979 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4980 [NEON_3R_VPADD
] = 0x7,
4981 [NEON_3R_SHA
] = 0xf, /* size field encodes op type */
4982 [NEON_3R_VFM
] = 0x5, /* size bit 1 encodes op */
4983 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4984 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4985 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4986 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4987 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4988 [NEON_3R_FLOAT_MISC
] = 0x5, /* size bit 1 encodes op */
4991 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4992 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4995 #define NEON_2RM_VREV64 0
4996 #define NEON_2RM_VREV32 1
4997 #define NEON_2RM_VREV16 2
4998 #define NEON_2RM_VPADDL 4
4999 #define NEON_2RM_VPADDL_U 5
5000 #define NEON_2RM_AESE 6 /* Includes AESD */
5001 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5002 #define NEON_2RM_VCLS 8
5003 #define NEON_2RM_VCLZ 9
5004 #define NEON_2RM_VCNT 10
5005 #define NEON_2RM_VMVN 11
5006 #define NEON_2RM_VPADAL 12
5007 #define NEON_2RM_VPADAL_U 13
5008 #define NEON_2RM_VQABS 14
5009 #define NEON_2RM_VQNEG 15
5010 #define NEON_2RM_VCGT0 16
5011 #define NEON_2RM_VCGE0 17
5012 #define NEON_2RM_VCEQ0 18
5013 #define NEON_2RM_VCLE0 19
5014 #define NEON_2RM_VCLT0 20
5015 #define NEON_2RM_SHA1H 21
5016 #define NEON_2RM_VABS 22
5017 #define NEON_2RM_VNEG 23
5018 #define NEON_2RM_VCGT0_F 24
5019 #define NEON_2RM_VCGE0_F 25
5020 #define NEON_2RM_VCEQ0_F 26
5021 #define NEON_2RM_VCLE0_F 27
5022 #define NEON_2RM_VCLT0_F 28
5023 #define NEON_2RM_VABS_F 30
5024 #define NEON_2RM_VNEG_F 31
5025 #define NEON_2RM_VSWP 32
5026 #define NEON_2RM_VTRN 33
5027 #define NEON_2RM_VUZP 34
5028 #define NEON_2RM_VZIP 35
5029 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5030 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5031 #define NEON_2RM_VSHLL 38
5032 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5033 #define NEON_2RM_VRINTN 40
5034 #define NEON_2RM_VRINTX 41
5035 #define NEON_2RM_VRINTA 42
5036 #define NEON_2RM_VRINTZ 43
5037 #define NEON_2RM_VCVT_F16_F32 44
5038 #define NEON_2RM_VRINTM 45
5039 #define NEON_2RM_VCVT_F32_F16 46
5040 #define NEON_2RM_VRINTP 47
5041 #define NEON_2RM_VCVTAU 48
5042 #define NEON_2RM_VCVTAS 49
5043 #define NEON_2RM_VCVTNU 50
5044 #define NEON_2RM_VCVTNS 51
5045 #define NEON_2RM_VCVTPU 52
5046 #define NEON_2RM_VCVTPS 53
5047 #define NEON_2RM_VCVTMU 54
5048 #define NEON_2RM_VCVTMS 55
5049 #define NEON_2RM_VRECPE 56
5050 #define NEON_2RM_VRSQRTE 57
5051 #define NEON_2RM_VRECPE_F 58
5052 #define NEON_2RM_VRSQRTE_F 59
5053 #define NEON_2RM_VCVT_FS 60
5054 #define NEON_2RM_VCVT_FU 61
5055 #define NEON_2RM_VCVT_SF 62
5056 #define NEON_2RM_VCVT_UF 63
5058 static int neon_2rm_is_float_op(int op
)
5060 /* Return true if this neon 2reg-misc op is float-to-float */
5061 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
5062 (op
>= NEON_2RM_VRINTN
&& op
<= NEON_2RM_VRINTZ
) ||
5063 op
== NEON_2RM_VRINTM
||
5064 (op
>= NEON_2RM_VRINTP
&& op
<= NEON_2RM_VCVTMS
) ||
5065 op
>= NEON_2RM_VRECPE_F
);
5068 /* Each entry in this array has bit n set if the insn allows
5069 * size value n (otherwise it will UNDEF). Since unallocated
5070 * op values will have no bits set they always UNDEF.
5072 static const uint8_t neon_2rm_sizes
[] = {
5073 [NEON_2RM_VREV64
] = 0x7,
5074 [NEON_2RM_VREV32
] = 0x3,
5075 [NEON_2RM_VREV16
] = 0x1,
5076 [NEON_2RM_VPADDL
] = 0x7,
5077 [NEON_2RM_VPADDL_U
] = 0x7,
5078 [NEON_2RM_AESE
] = 0x1,
5079 [NEON_2RM_AESMC
] = 0x1,
5080 [NEON_2RM_VCLS
] = 0x7,
5081 [NEON_2RM_VCLZ
] = 0x7,
5082 [NEON_2RM_VCNT
] = 0x1,
5083 [NEON_2RM_VMVN
] = 0x1,
5084 [NEON_2RM_VPADAL
] = 0x7,
5085 [NEON_2RM_VPADAL_U
] = 0x7,
5086 [NEON_2RM_VQABS
] = 0x7,
5087 [NEON_2RM_VQNEG
] = 0x7,
5088 [NEON_2RM_VCGT0
] = 0x7,
5089 [NEON_2RM_VCGE0
] = 0x7,
5090 [NEON_2RM_VCEQ0
] = 0x7,
5091 [NEON_2RM_VCLE0
] = 0x7,
5092 [NEON_2RM_VCLT0
] = 0x7,
5093 [NEON_2RM_SHA1H
] = 0x4,
5094 [NEON_2RM_VABS
] = 0x7,
5095 [NEON_2RM_VNEG
] = 0x7,
5096 [NEON_2RM_VCGT0_F
] = 0x4,
5097 [NEON_2RM_VCGE0_F
] = 0x4,
5098 [NEON_2RM_VCEQ0_F
] = 0x4,
5099 [NEON_2RM_VCLE0_F
] = 0x4,
5100 [NEON_2RM_VCLT0_F
] = 0x4,
5101 [NEON_2RM_VABS_F
] = 0x4,
5102 [NEON_2RM_VNEG_F
] = 0x4,
5103 [NEON_2RM_VSWP
] = 0x1,
5104 [NEON_2RM_VTRN
] = 0x7,
5105 [NEON_2RM_VUZP
] = 0x7,
5106 [NEON_2RM_VZIP
] = 0x7,
5107 [NEON_2RM_VMOVN
] = 0x7,
5108 [NEON_2RM_VQMOVN
] = 0x7,
5109 [NEON_2RM_VSHLL
] = 0x7,
5110 [NEON_2RM_SHA1SU1
] = 0x4,
5111 [NEON_2RM_VRINTN
] = 0x4,
5112 [NEON_2RM_VRINTX
] = 0x4,
5113 [NEON_2RM_VRINTA
] = 0x4,
5114 [NEON_2RM_VRINTZ
] = 0x4,
5115 [NEON_2RM_VCVT_F16_F32
] = 0x2,
5116 [NEON_2RM_VRINTM
] = 0x4,
5117 [NEON_2RM_VCVT_F32_F16
] = 0x2,
5118 [NEON_2RM_VRINTP
] = 0x4,
5119 [NEON_2RM_VCVTAU
] = 0x4,
5120 [NEON_2RM_VCVTAS
] = 0x4,
5121 [NEON_2RM_VCVTNU
] = 0x4,
5122 [NEON_2RM_VCVTNS
] = 0x4,
5123 [NEON_2RM_VCVTPU
] = 0x4,
5124 [NEON_2RM_VCVTPS
] = 0x4,
5125 [NEON_2RM_VCVTMU
] = 0x4,
5126 [NEON_2RM_VCVTMS
] = 0x4,
5127 [NEON_2RM_VRECPE
] = 0x4,
5128 [NEON_2RM_VRSQRTE
] = 0x4,
5129 [NEON_2RM_VRECPE_F
] = 0x4,
5130 [NEON_2RM_VRSQRTE_F
] = 0x4,
5131 [NEON_2RM_VCVT_FS
] = 0x4,
5132 [NEON_2RM_VCVT_FU
] = 0x4,
5133 [NEON_2RM_VCVT_SF
] = 0x4,
5134 [NEON_2RM_VCVT_UF
] = 0x4,
5137 /* Translate a NEON data processing instruction. Return nonzero if the
5138 instruction is invalid.
5139 We process data in a mixture of 32-bit and 64-bit chunks.
5140 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5142 static int disas_neon_data_insn(DisasContext
*s
, uint32_t insn
)
5154 TCGv_i32 tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
5157 /* FIXME: this access check should not take precedence over UNDEF
5158 * for invalid encodings; we will generate incorrect syndrome information
5159 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5161 if (s
->fp_excp_el
) {
5162 gen_exception_insn(s
, 4, EXCP_UDEF
,
5163 syn_fp_access_trap(1, 0xe, s
->thumb
), s
->fp_excp_el
);
5167 if (!s
->vfp_enabled
)
5169 q
= (insn
& (1 << 6)) != 0;
5170 u
= (insn
>> 24) & 1;
5171 VFP_DREG_D(rd
, insn
);
5172 VFP_DREG_N(rn
, insn
);
5173 VFP_DREG_M(rm
, insn
);
5174 size
= (insn
>> 20) & 3;
5175 if ((insn
& (1 << 23)) == 0) {
5176 /* Three register same length. */
5177 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
5178 /* Catch invalid op and bad size combinations: UNDEF */
5179 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
5182 /* All insns of this form UNDEF for either this condition or the
5183 * superset of cases "Q==1"; we catch the latter later.
5185 if (q
&& ((rd
| rn
| rm
) & 1)) {
5189 * The SHA-1/SHA-256 3-register instructions require special treatment
5190 * here, as their size field is overloaded as an op type selector, and
5191 * they all consume their input in a single pass.
5193 if (op
== NEON_3R_SHA
) {
5197 if (!u
) { /* SHA-1 */
5198 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)) {
5201 tmp
= tcg_const_i32(rd
);
5202 tmp2
= tcg_const_i32(rn
);
5203 tmp3
= tcg_const_i32(rm
);
5204 tmp4
= tcg_const_i32(size
);
5205 gen_helper_crypto_sha1_3reg(cpu_env
, tmp
, tmp2
, tmp3
, tmp4
);
5206 tcg_temp_free_i32(tmp4
);
5207 } else { /* SHA-256 */
5208 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA256
) || size
== 3) {
5211 tmp
= tcg_const_i32(rd
);
5212 tmp2
= tcg_const_i32(rn
);
5213 tmp3
= tcg_const_i32(rm
);
5216 gen_helper_crypto_sha256h(cpu_env
, tmp
, tmp2
, tmp3
);
5219 gen_helper_crypto_sha256h2(cpu_env
, tmp
, tmp2
, tmp3
);
5222 gen_helper_crypto_sha256su1(cpu_env
, tmp
, tmp2
, tmp3
);
5226 tcg_temp_free_i32(tmp
);
5227 tcg_temp_free_i32(tmp2
);
5228 tcg_temp_free_i32(tmp3
);
5231 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
5232 /* 64-bit element instructions. */
5233 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5234 neon_load_reg64(cpu_V0
, rn
+ pass
);
5235 neon_load_reg64(cpu_V1
, rm
+ pass
);
5239 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
5242 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
5248 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
5251 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
5257 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5259 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5264 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5267 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5273 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5275 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5278 case NEON_3R_VQRSHL
:
5280 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
5283 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
5287 case NEON_3R_VADD_VSUB
:
5289 tcg_gen_sub_i64(CPU_V001
);
5291 tcg_gen_add_i64(CPU_V001
);
5297 neon_store_reg64(cpu_V0
, rd
+ pass
);
5306 case NEON_3R_VQRSHL
:
5309 /* Shift instruction operands are reversed. */
5324 case NEON_3R_FLOAT_ARITH
:
5325 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
5327 case NEON_3R_FLOAT_MINMAX
:
5328 pairwise
= u
; /* if VPMIN/VPMAX (float) */
5330 case NEON_3R_FLOAT_CMP
:
5332 /* no encoding for U=0 C=1x */
5336 case NEON_3R_FLOAT_ACMP
:
5341 case NEON_3R_FLOAT_MISC
:
5342 /* VMAXNM/VMINNM in ARMv8 */
5343 if (u
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
5348 if (u
&& (size
!= 0)) {
5349 /* UNDEF on invalid size for polynomial subcase */
5354 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
) || u
) {
5362 if (pairwise
&& q
) {
5363 /* All the pairwise insns UNDEF if Q is set */
5367 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5372 tmp
= neon_load_reg(rn
, 0);
5373 tmp2
= neon_load_reg(rn
, 1);
5375 tmp
= neon_load_reg(rm
, 0);
5376 tmp2
= neon_load_reg(rm
, 1);
5380 tmp
= neon_load_reg(rn
, pass
);
5381 tmp2
= neon_load_reg(rm
, pass
);
5385 GEN_NEON_INTEGER_OP(hadd
);
5388 GEN_NEON_INTEGER_OP_ENV(qadd
);
5390 case NEON_3R_VRHADD
:
5391 GEN_NEON_INTEGER_OP(rhadd
);
5393 case NEON_3R_LOGIC
: /* Logic ops. */
5394 switch ((u
<< 2) | size
) {
5396 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
5399 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
5402 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5405 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
5408 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
5411 tmp3
= neon_load_reg(rd
, pass
);
5412 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
5413 tcg_temp_free_i32(tmp3
);
5416 tmp3
= neon_load_reg(rd
, pass
);
5417 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
5418 tcg_temp_free_i32(tmp3
);
5421 tmp3
= neon_load_reg(rd
, pass
);
5422 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
5423 tcg_temp_free_i32(tmp3
);
5428 GEN_NEON_INTEGER_OP(hsub
);
5431 GEN_NEON_INTEGER_OP_ENV(qsub
);
5434 GEN_NEON_INTEGER_OP(cgt
);
5437 GEN_NEON_INTEGER_OP(cge
);
5440 GEN_NEON_INTEGER_OP(shl
);
5443 GEN_NEON_INTEGER_OP_ENV(qshl
);
5446 GEN_NEON_INTEGER_OP(rshl
);
5448 case NEON_3R_VQRSHL
:
5449 GEN_NEON_INTEGER_OP_ENV(qrshl
);
5452 GEN_NEON_INTEGER_OP(max
);
5455 GEN_NEON_INTEGER_OP(min
);
5458 GEN_NEON_INTEGER_OP(abd
);
5461 GEN_NEON_INTEGER_OP(abd
);
5462 tcg_temp_free_i32(tmp2
);
5463 tmp2
= neon_load_reg(rd
, pass
);
5464 gen_neon_add(size
, tmp
, tmp2
);
5466 case NEON_3R_VADD_VSUB
:
5467 if (!u
) { /* VADD */
5468 gen_neon_add(size
, tmp
, tmp2
);
5471 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
5472 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
5473 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
5478 case NEON_3R_VTST_VCEQ
:
5479 if (!u
) { /* VTST */
5481 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
5482 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
5483 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
5488 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5489 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5490 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5495 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
5497 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5498 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5499 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5502 tcg_temp_free_i32(tmp2
);
5503 tmp2
= neon_load_reg(rd
, pass
);
5505 gen_neon_rsb(size
, tmp
, tmp2
);
5507 gen_neon_add(size
, tmp
, tmp2
);
5511 if (u
) { /* polynomial */
5512 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
5513 } else { /* Integer */
5515 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5516 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5517 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5523 GEN_NEON_INTEGER_OP(pmax
);
5526 GEN_NEON_INTEGER_OP(pmin
);
5528 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
5529 if (!u
) { /* VQDMULH */
5532 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5535 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5539 } else { /* VQRDMULH */
5542 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5545 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5553 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
5554 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
5555 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
5559 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
5561 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5562 switch ((u
<< 2) | size
) {
5565 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5568 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
5571 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
5576 tcg_temp_free_ptr(fpstatus
);
5579 case NEON_3R_FLOAT_MULTIPLY
:
5581 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5582 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5584 tcg_temp_free_i32(tmp2
);
5585 tmp2
= neon_load_reg(rd
, pass
);
5587 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5589 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5592 tcg_temp_free_ptr(fpstatus
);
5595 case NEON_3R_FLOAT_CMP
:
5597 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5599 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
5602 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5604 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5607 tcg_temp_free_ptr(fpstatus
);
5610 case NEON_3R_FLOAT_ACMP
:
5612 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5614 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5616 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5618 tcg_temp_free_ptr(fpstatus
);
5621 case NEON_3R_FLOAT_MINMAX
:
5623 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5625 gen_helper_vfp_maxs(tmp
, tmp
, tmp2
, fpstatus
);
5627 gen_helper_vfp_mins(tmp
, tmp
, tmp2
, fpstatus
);
5629 tcg_temp_free_ptr(fpstatus
);
5632 case NEON_3R_FLOAT_MISC
:
5635 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5637 gen_helper_vfp_maxnums(tmp
, tmp
, tmp2
, fpstatus
);
5639 gen_helper_vfp_minnums(tmp
, tmp
, tmp2
, fpstatus
);
5641 tcg_temp_free_ptr(fpstatus
);
5644 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
5646 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
5652 /* VFMA, VFMS: fused multiply-add */
5653 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5654 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
5657 gen_helper_vfp_negs(tmp
, tmp
);
5659 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
5660 tcg_temp_free_i32(tmp3
);
5661 tcg_temp_free_ptr(fpstatus
);
5667 tcg_temp_free_i32(tmp2
);
5669 /* Save the result. For elementwise operations we can put it
5670 straight into the destination register. For pairwise operations
5671 we have to be careful to avoid clobbering the source operands. */
5672 if (pairwise
&& rd
== rm
) {
5673 neon_store_scratch(pass
, tmp
);
5675 neon_store_reg(rd
, pass
, tmp
);
5679 if (pairwise
&& rd
== rm
) {
5680 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5681 tmp
= neon_load_scratch(pass
);
5682 neon_store_reg(rd
, pass
, tmp
);
5685 /* End of 3 register same size operations. */
5686 } else if (insn
& (1 << 4)) {
5687 if ((insn
& 0x00380080) != 0) {
5688 /* Two registers and shift. */
5689 op
= (insn
>> 8) & 0xf;
5690 if (insn
& (1 << 7)) {
5698 while ((insn
& (1 << (size
+ 19))) == 0)
5701 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
5702 /* To avoid excessive duplication of ops we implement shift
5703 by immediate using the variable shift operations. */
5705 /* Shift by immediate:
5706 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5707 if (q
&& ((rd
| rm
) & 1)) {
5710 if (!u
&& (op
== 4 || op
== 6)) {
5713 /* Right shifts are encoded as N - shift, where N is the
5714 element size in bits. */
5716 shift
= shift
- (1 << (size
+ 3));
5724 imm
= (uint8_t) shift
;
5729 imm
= (uint16_t) shift
;
5740 for (pass
= 0; pass
< count
; pass
++) {
5742 neon_load_reg64(cpu_V0
, rm
+ pass
);
5743 tcg_gen_movi_i64(cpu_V1
, imm
);
5748 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5750 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5755 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5757 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5760 case 5: /* VSHL, VSLI */
5761 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5763 case 6: /* VQSHLU */
5764 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
5769 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5772 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5777 if (op
== 1 || op
== 3) {
5779 neon_load_reg64(cpu_V1
, rd
+ pass
);
5780 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5781 } else if (op
== 4 || (op
== 5 && u
)) {
5783 neon_load_reg64(cpu_V1
, rd
+ pass
);
5785 if (shift
< -63 || shift
> 63) {
5789 mask
= 0xffffffffffffffffull
>> -shift
;
5791 mask
= 0xffffffffffffffffull
<< shift
;
5794 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
5795 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5797 neon_store_reg64(cpu_V0
, rd
+ pass
);
5798 } else { /* size < 3 */
5799 /* Operands in T0 and T1. */
5800 tmp
= neon_load_reg(rm
, pass
);
5801 tmp2
= tcg_temp_new_i32();
5802 tcg_gen_movi_i32(tmp2
, imm
);
5806 GEN_NEON_INTEGER_OP(shl
);
5810 GEN_NEON_INTEGER_OP(rshl
);
5813 case 5: /* VSHL, VSLI */
5815 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
5816 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
5817 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
5821 case 6: /* VQSHLU */
5824 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5828 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5832 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5840 GEN_NEON_INTEGER_OP_ENV(qshl
);
5843 tcg_temp_free_i32(tmp2
);
5845 if (op
== 1 || op
== 3) {
5847 tmp2
= neon_load_reg(rd
, pass
);
5848 gen_neon_add(size
, tmp
, tmp2
);
5849 tcg_temp_free_i32(tmp2
);
5850 } else if (op
== 4 || (op
== 5 && u
)) {
5855 mask
= 0xff >> -shift
;
5857 mask
= (uint8_t)(0xff << shift
);
5863 mask
= 0xffff >> -shift
;
5865 mask
= (uint16_t)(0xffff << shift
);
5869 if (shift
< -31 || shift
> 31) {
5873 mask
= 0xffffffffu
>> -shift
;
5875 mask
= 0xffffffffu
<< shift
;
5881 tmp2
= neon_load_reg(rd
, pass
);
5882 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5883 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5884 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5885 tcg_temp_free_i32(tmp2
);
5887 neon_store_reg(rd
, pass
, tmp
);
5890 } else if (op
< 10) {
5891 /* Shift by immediate and narrow:
5892 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5893 int input_unsigned
= (op
== 8) ? !u
: u
;
5897 shift
= shift
- (1 << (size
+ 3));
5900 tmp64
= tcg_const_i64(shift
);
5901 neon_load_reg64(cpu_V0
, rm
);
5902 neon_load_reg64(cpu_V1
, rm
+ 1);
5903 for (pass
= 0; pass
< 2; pass
++) {
5911 if (input_unsigned
) {
5912 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5914 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5917 if (input_unsigned
) {
5918 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5920 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5923 tmp
= tcg_temp_new_i32();
5924 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5925 neon_store_reg(rd
, pass
, tmp
);
5927 tcg_temp_free_i64(tmp64
);
5930 imm
= (uint16_t)shift
;
5934 imm
= (uint32_t)shift
;
5936 tmp2
= tcg_const_i32(imm
);
5937 tmp4
= neon_load_reg(rm
+ 1, 0);
5938 tmp5
= neon_load_reg(rm
+ 1, 1);
5939 for (pass
= 0; pass
< 2; pass
++) {
5941 tmp
= neon_load_reg(rm
, 0);
5945 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5948 tmp3
= neon_load_reg(rm
, 1);
5952 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5954 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5955 tcg_temp_free_i32(tmp
);
5956 tcg_temp_free_i32(tmp3
);
5957 tmp
= tcg_temp_new_i32();
5958 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5959 neon_store_reg(rd
, pass
, tmp
);
5961 tcg_temp_free_i32(tmp2
);
5963 } else if (op
== 10) {
5965 if (q
|| (rd
& 1)) {
5968 tmp
= neon_load_reg(rm
, 0);
5969 tmp2
= neon_load_reg(rm
, 1);
5970 for (pass
= 0; pass
< 2; pass
++) {
5974 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5977 /* The shift is less than the width of the source
5978 type, so we can just shift the whole register. */
5979 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5980 /* Widen the result of shift: we need to clear
5981 * the potential overflow bits resulting from
5982 * left bits of the narrow input appearing as
5983 * right bits of left the neighbour narrow
5985 if (size
< 2 || !u
) {
5988 imm
= (0xffu
>> (8 - shift
));
5990 } else if (size
== 1) {
5991 imm
= 0xffff >> (16 - shift
);
5994 imm
= 0xffffffff >> (32 - shift
);
5997 imm64
= imm
| (((uint64_t)imm
) << 32);
6001 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
6004 neon_store_reg64(cpu_V0
, rd
+ pass
);
6006 } else if (op
>= 14) {
6007 /* VCVT fixed-point. */
6008 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
6011 /* We have already masked out the must-be-1 top bit of imm6,
6012 * hence this 32-shift where the ARM ARM has 64-imm6.
6015 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6016 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
6019 gen_vfp_ulto(0, shift
, 1);
6021 gen_vfp_slto(0, shift
, 1);
6024 gen_vfp_toul(0, shift
, 1);
6026 gen_vfp_tosl(0, shift
, 1);
6028 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
6033 } else { /* (insn & 0x00380080) == 0 */
6035 if (q
&& (rd
& 1)) {
6039 op
= (insn
>> 8) & 0xf;
6040 /* One register and immediate. */
6041 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
6042 invert
= (insn
& (1 << 5)) != 0;
6043 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6044 * We choose to not special-case this and will behave as if a
6045 * valid constant encoding of 0 had been given.
6064 imm
= (imm
<< 8) | (imm
<< 24);
6067 imm
= (imm
<< 8) | 0xff;
6070 imm
= (imm
<< 16) | 0xffff;
6073 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
6081 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
6082 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
6088 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6089 if (op
& 1 && op
< 12) {
6090 tmp
= neon_load_reg(rd
, pass
);
6092 /* The immediate value has already been inverted, so
6094 tcg_gen_andi_i32(tmp
, tmp
, imm
);
6096 tcg_gen_ori_i32(tmp
, tmp
, imm
);
6100 tmp
= tcg_temp_new_i32();
6101 if (op
== 14 && invert
) {
6105 for (n
= 0; n
< 4; n
++) {
6106 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
6107 val
|= 0xff << (n
* 8);
6109 tcg_gen_movi_i32(tmp
, val
);
6111 tcg_gen_movi_i32(tmp
, imm
);
6114 neon_store_reg(rd
, pass
, tmp
);
6117 } else { /* (insn & 0x00800010 == 0x00800000) */
6119 op
= (insn
>> 8) & 0xf;
6120 if ((insn
& (1 << 6)) == 0) {
6121 /* Three registers of different lengths. */
6125 /* undefreq: bit 0 : UNDEF if size == 0
6126 * bit 1 : UNDEF if size == 1
6127 * bit 2 : UNDEF if size == 2
6128 * bit 3 : UNDEF if U == 1
6129 * Note that [2:0] set implies 'always UNDEF'
6132 /* prewiden, src1_wide, src2_wide, undefreq */
6133 static const int neon_3reg_wide
[16][4] = {
6134 {1, 0, 0, 0}, /* VADDL */
6135 {1, 1, 0, 0}, /* VADDW */
6136 {1, 0, 0, 0}, /* VSUBL */
6137 {1, 1, 0, 0}, /* VSUBW */
6138 {0, 1, 1, 0}, /* VADDHN */
6139 {0, 0, 0, 0}, /* VABAL */
6140 {0, 1, 1, 0}, /* VSUBHN */
6141 {0, 0, 0, 0}, /* VABDL */
6142 {0, 0, 0, 0}, /* VMLAL */
6143 {0, 0, 0, 9}, /* VQDMLAL */
6144 {0, 0, 0, 0}, /* VMLSL */
6145 {0, 0, 0, 9}, /* VQDMLSL */
6146 {0, 0, 0, 0}, /* Integer VMULL */
6147 {0, 0, 0, 1}, /* VQDMULL */
6148 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6149 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6152 prewiden
= neon_3reg_wide
[op
][0];
6153 src1_wide
= neon_3reg_wide
[op
][1];
6154 src2_wide
= neon_3reg_wide
[op
][2];
6155 undefreq
= neon_3reg_wide
[op
][3];
6157 if ((undefreq
& (1 << size
)) ||
6158 ((undefreq
& 8) && u
)) {
6161 if ((src1_wide
&& (rn
& 1)) ||
6162 (src2_wide
&& (rm
& 1)) ||
6163 (!src2_wide
&& (rd
& 1))) {
6167 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6168 * outside the loop below as it only performs a single pass.
6170 if (op
== 14 && size
== 2) {
6171 TCGv_i64 tcg_rn
, tcg_rm
, tcg_rd
;
6173 if (!arm_dc_feature(s
, ARM_FEATURE_V8_PMULL
)) {
6176 tcg_rn
= tcg_temp_new_i64();
6177 tcg_rm
= tcg_temp_new_i64();
6178 tcg_rd
= tcg_temp_new_i64();
6179 neon_load_reg64(tcg_rn
, rn
);
6180 neon_load_reg64(tcg_rm
, rm
);
6181 gen_helper_neon_pmull_64_lo(tcg_rd
, tcg_rn
, tcg_rm
);
6182 neon_store_reg64(tcg_rd
, rd
);
6183 gen_helper_neon_pmull_64_hi(tcg_rd
, tcg_rn
, tcg_rm
);
6184 neon_store_reg64(tcg_rd
, rd
+ 1);
6185 tcg_temp_free_i64(tcg_rn
);
6186 tcg_temp_free_i64(tcg_rm
);
6187 tcg_temp_free_i64(tcg_rd
);
6191 /* Avoid overlapping operands. Wide source operands are
6192 always aligned so will never overlap with wide
6193 destinations in problematic ways. */
6194 if (rd
== rm
&& !src2_wide
) {
6195 tmp
= neon_load_reg(rm
, 1);
6196 neon_store_scratch(2, tmp
);
6197 } else if (rd
== rn
&& !src1_wide
) {
6198 tmp
= neon_load_reg(rn
, 1);
6199 neon_store_scratch(2, tmp
);
6201 TCGV_UNUSED_I32(tmp3
);
6202 for (pass
= 0; pass
< 2; pass
++) {
6204 neon_load_reg64(cpu_V0
, rn
+ pass
);
6205 TCGV_UNUSED_I32(tmp
);
6207 if (pass
== 1 && rd
== rn
) {
6208 tmp
= neon_load_scratch(2);
6210 tmp
= neon_load_reg(rn
, pass
);
6213 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
6217 neon_load_reg64(cpu_V1
, rm
+ pass
);
6218 TCGV_UNUSED_I32(tmp2
);
6220 if (pass
== 1 && rd
== rm
) {
6221 tmp2
= neon_load_scratch(2);
6223 tmp2
= neon_load_reg(rm
, pass
);
6226 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
6230 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6231 gen_neon_addl(size
);
6233 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6234 gen_neon_subl(size
);
6236 case 5: case 7: /* VABAL, VABDL */
6237 switch ((size
<< 1) | u
) {
6239 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
6242 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
6245 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
6248 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
6251 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
6254 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
6258 tcg_temp_free_i32(tmp2
);
6259 tcg_temp_free_i32(tmp
);
6261 case 8: case 9: case 10: case 11: case 12: case 13:
6262 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6263 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6265 case 14: /* Polynomial VMULL */
6266 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
6267 tcg_temp_free_i32(tmp2
);
6268 tcg_temp_free_i32(tmp
);
6270 default: /* 15 is RESERVED: caught earlier */
6275 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6276 neon_store_reg64(cpu_V0
, rd
+ pass
);
6277 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
6279 neon_load_reg64(cpu_V1
, rd
+ pass
);
6281 case 10: /* VMLSL */
6282 gen_neon_negl(cpu_V0
, size
);
6284 case 5: case 8: /* VABAL, VMLAL */
6285 gen_neon_addl(size
);
6287 case 9: case 11: /* VQDMLAL, VQDMLSL */
6288 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6290 gen_neon_negl(cpu_V0
, size
);
6292 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6297 neon_store_reg64(cpu_V0
, rd
+ pass
);
6298 } else if (op
== 4 || op
== 6) {
6299 /* Narrowing operation. */
6300 tmp
= tcg_temp_new_i32();
6304 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
6307 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
6310 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6311 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
6318 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
6321 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
6324 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
6325 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6326 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
6334 neon_store_reg(rd
, 0, tmp3
);
6335 neon_store_reg(rd
, 1, tmp
);
6338 /* Write back the result. */
6339 neon_store_reg64(cpu_V0
, rd
+ pass
);
6343 /* Two registers and a scalar. NB that for ops of this form
6344 * the ARM ARM labels bit 24 as Q, but it is in our variable
6351 case 1: /* Float VMLA scalar */
6352 case 5: /* Floating point VMLS scalar */
6353 case 9: /* Floating point VMUL scalar */
6358 case 0: /* Integer VMLA scalar */
6359 case 4: /* Integer VMLS scalar */
6360 case 8: /* Integer VMUL scalar */
6361 case 12: /* VQDMULH scalar */
6362 case 13: /* VQRDMULH scalar */
6363 if (u
&& ((rd
| rn
) & 1)) {
6366 tmp
= neon_get_scalar(size
, rm
);
6367 neon_store_scratch(0, tmp
);
6368 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
6369 tmp
= neon_load_scratch(0);
6370 tmp2
= neon_load_reg(rn
, pass
);
6373 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6375 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6377 } else if (op
== 13) {
6379 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6381 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6383 } else if (op
& 1) {
6384 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6385 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
6386 tcg_temp_free_ptr(fpstatus
);
6389 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
6390 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
6391 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
6395 tcg_temp_free_i32(tmp2
);
6398 tmp2
= neon_load_reg(rd
, pass
);
6401 gen_neon_add(size
, tmp
, tmp2
);
6405 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6406 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
6407 tcg_temp_free_ptr(fpstatus
);
6411 gen_neon_rsb(size
, tmp
, tmp2
);
6415 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6416 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
6417 tcg_temp_free_ptr(fpstatus
);
6423 tcg_temp_free_i32(tmp2
);
6425 neon_store_reg(rd
, pass
, tmp
);
6428 case 3: /* VQDMLAL scalar */
6429 case 7: /* VQDMLSL scalar */
6430 case 11: /* VQDMULL scalar */
6435 case 2: /* VMLAL sclar */
6436 case 6: /* VMLSL scalar */
6437 case 10: /* VMULL scalar */
6441 tmp2
= neon_get_scalar(size
, rm
);
6442 /* We need a copy of tmp2 because gen_neon_mull
6443 * deletes it during pass 0. */
6444 tmp4
= tcg_temp_new_i32();
6445 tcg_gen_mov_i32(tmp4
, tmp2
);
6446 tmp3
= neon_load_reg(rn
, 1);
6448 for (pass
= 0; pass
< 2; pass
++) {
6450 tmp
= neon_load_reg(rn
, 0);
6455 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6457 neon_load_reg64(cpu_V1
, rd
+ pass
);
6461 gen_neon_negl(cpu_V0
, size
);
6464 gen_neon_addl(size
);
6467 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6469 gen_neon_negl(cpu_V0
, size
);
6471 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6477 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6482 neon_store_reg64(cpu_V0
, rd
+ pass
);
6487 default: /* 14 and 15 are RESERVED */
6491 } else { /* size == 3 */
6494 imm
= (insn
>> 8) & 0xf;
6499 if (q
&& ((rd
| rn
| rm
) & 1)) {
6504 neon_load_reg64(cpu_V0
, rn
);
6506 neon_load_reg64(cpu_V1
, rn
+ 1);
6508 } else if (imm
== 8) {
6509 neon_load_reg64(cpu_V0
, rn
+ 1);
6511 neon_load_reg64(cpu_V1
, rm
);
6514 tmp64
= tcg_temp_new_i64();
6516 neon_load_reg64(cpu_V0
, rn
);
6517 neon_load_reg64(tmp64
, rn
+ 1);
6519 neon_load_reg64(cpu_V0
, rn
+ 1);
6520 neon_load_reg64(tmp64
, rm
);
6522 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
6523 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
6524 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6526 neon_load_reg64(cpu_V1
, rm
);
6528 neon_load_reg64(cpu_V1
, rm
+ 1);
6531 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6532 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
6533 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
6534 tcg_temp_free_i64(tmp64
);
6537 neon_load_reg64(cpu_V0
, rn
);
6538 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
6539 neon_load_reg64(cpu_V1
, rm
);
6540 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6541 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6543 neon_store_reg64(cpu_V0
, rd
);
6545 neon_store_reg64(cpu_V1
, rd
+ 1);
6547 } else if ((insn
& (1 << 11)) == 0) {
6548 /* Two register misc. */
6549 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
6550 size
= (insn
>> 18) & 3;
6551 /* UNDEF for unknown op values and bad op-size combinations */
6552 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
6555 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
6556 q
&& ((rm
| rd
) & 1)) {
6560 case NEON_2RM_VREV64
:
6561 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
6562 tmp
= neon_load_reg(rm
, pass
* 2);
6563 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
6565 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6566 case 1: gen_swap_half(tmp
); break;
6567 case 2: /* no-op */ break;
6570 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
6572 neon_store_reg(rd
, pass
* 2, tmp2
);
6575 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
6576 case 1: gen_swap_half(tmp2
); break;
6579 neon_store_reg(rd
, pass
* 2, tmp2
);
6583 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
6584 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
6585 for (pass
= 0; pass
< q
+ 1; pass
++) {
6586 tmp
= neon_load_reg(rm
, pass
* 2);
6587 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
6588 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
6589 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
6591 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
6592 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
6593 case 2: tcg_gen_add_i64(CPU_V001
); break;
6596 if (op
>= NEON_2RM_VPADAL
) {
6598 neon_load_reg64(cpu_V1
, rd
+ pass
);
6599 gen_neon_addl(size
);
6601 neon_store_reg64(cpu_V0
, rd
+ pass
);
6607 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
6608 tmp
= neon_load_reg(rm
, n
);
6609 tmp2
= neon_load_reg(rd
, n
+ 1);
6610 neon_store_reg(rm
, n
, tmp2
);
6611 neon_store_reg(rd
, n
+ 1, tmp
);
6618 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
6623 if (gen_neon_zip(rd
, rm
, size
, q
)) {
6627 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
6628 /* also VQMOVUN; op field and mnemonics don't line up */
6632 TCGV_UNUSED_I32(tmp2
);
6633 for (pass
= 0; pass
< 2; pass
++) {
6634 neon_load_reg64(cpu_V0
, rm
+ pass
);
6635 tmp
= tcg_temp_new_i32();
6636 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
6641 neon_store_reg(rd
, 0, tmp2
);
6642 neon_store_reg(rd
, 1, tmp
);
6646 case NEON_2RM_VSHLL
:
6647 if (q
|| (rd
& 1)) {
6650 tmp
= neon_load_reg(rm
, 0);
6651 tmp2
= neon_load_reg(rm
, 1);
6652 for (pass
= 0; pass
< 2; pass
++) {
6655 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
6656 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
6657 neon_store_reg64(cpu_V0
, rd
+ pass
);
6660 case NEON_2RM_VCVT_F16_F32
:
6661 if (!arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
) ||
6665 tmp
= tcg_temp_new_i32();
6666 tmp2
= tcg_temp_new_i32();
6667 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
6668 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
6669 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
6670 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
6671 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6672 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6673 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
6674 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
6675 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
6676 neon_store_reg(rd
, 0, tmp2
);
6677 tmp2
= tcg_temp_new_i32();
6678 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
6679 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6680 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6681 neon_store_reg(rd
, 1, tmp2
);
6682 tcg_temp_free_i32(tmp
);
6684 case NEON_2RM_VCVT_F32_F16
:
6685 if (!arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
) ||
6689 tmp3
= tcg_temp_new_i32();
6690 tmp
= neon_load_reg(rm
, 0);
6691 tmp2
= neon_load_reg(rm
, 1);
6692 tcg_gen_ext16u_i32(tmp3
, tmp
);
6693 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6694 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
6695 tcg_gen_shri_i32(tmp3
, tmp
, 16);
6696 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6697 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
6698 tcg_temp_free_i32(tmp
);
6699 tcg_gen_ext16u_i32(tmp3
, tmp2
);
6700 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6701 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
6702 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
6703 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6704 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
6705 tcg_temp_free_i32(tmp2
);
6706 tcg_temp_free_i32(tmp3
);
6708 case NEON_2RM_AESE
: case NEON_2RM_AESMC
:
6709 if (!arm_dc_feature(s
, ARM_FEATURE_V8_AES
)
6710 || ((rm
| rd
) & 1)) {
6713 tmp
= tcg_const_i32(rd
);
6714 tmp2
= tcg_const_i32(rm
);
6716 /* Bit 6 is the lowest opcode bit; it distinguishes between
6717 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6719 tmp3
= tcg_const_i32(extract32(insn
, 6, 1));
6721 if (op
== NEON_2RM_AESE
) {
6722 gen_helper_crypto_aese(cpu_env
, tmp
, tmp2
, tmp3
);
6724 gen_helper_crypto_aesmc(cpu_env
, tmp
, tmp2
, tmp3
);
6726 tcg_temp_free_i32(tmp
);
6727 tcg_temp_free_i32(tmp2
);
6728 tcg_temp_free_i32(tmp3
);
6730 case NEON_2RM_SHA1H
:
6731 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)
6732 || ((rm
| rd
) & 1)) {
6735 tmp
= tcg_const_i32(rd
);
6736 tmp2
= tcg_const_i32(rm
);
6738 gen_helper_crypto_sha1h(cpu_env
, tmp
, tmp2
);
6740 tcg_temp_free_i32(tmp
);
6741 tcg_temp_free_i32(tmp2
);
6743 case NEON_2RM_SHA1SU1
:
6744 if ((rm
| rd
) & 1) {
6747 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6749 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA256
)) {
6752 } else if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)) {
6755 tmp
= tcg_const_i32(rd
);
6756 tmp2
= tcg_const_i32(rm
);
6758 gen_helper_crypto_sha256su0(cpu_env
, tmp
, tmp2
);
6760 gen_helper_crypto_sha1su1(cpu_env
, tmp
, tmp2
);
6762 tcg_temp_free_i32(tmp
);
6763 tcg_temp_free_i32(tmp2
);
6767 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6768 if (neon_2rm_is_float_op(op
)) {
6769 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
6770 neon_reg_offset(rm
, pass
));
6771 TCGV_UNUSED_I32(tmp
);
6773 tmp
= neon_load_reg(rm
, pass
);
6776 case NEON_2RM_VREV32
:
6778 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6779 case 1: gen_swap_half(tmp
); break;
6783 case NEON_2RM_VREV16
:
6788 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
6789 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
6790 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
6796 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
6797 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
6798 case 2: gen_helper_clz(tmp
, tmp
); break;
6803 gen_helper_neon_cnt_u8(tmp
, tmp
);
6806 tcg_gen_not_i32(tmp
, tmp
);
6808 case NEON_2RM_VQABS
:
6811 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
6814 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
6817 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
6822 case NEON_2RM_VQNEG
:
6825 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
6828 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
6831 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
6836 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
6837 tmp2
= tcg_const_i32(0);
6839 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
6840 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
6841 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
6844 tcg_temp_free_i32(tmp2
);
6845 if (op
== NEON_2RM_VCLE0
) {
6846 tcg_gen_not_i32(tmp
, tmp
);
6849 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
6850 tmp2
= tcg_const_i32(0);
6852 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
6853 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
6854 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
6857 tcg_temp_free_i32(tmp2
);
6858 if (op
== NEON_2RM_VCLT0
) {
6859 tcg_gen_not_i32(tmp
, tmp
);
6862 case NEON_2RM_VCEQ0
:
6863 tmp2
= tcg_const_i32(0);
6865 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
6866 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
6867 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
6870 tcg_temp_free_i32(tmp2
);
6874 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
6875 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
6876 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
6881 tmp2
= tcg_const_i32(0);
6882 gen_neon_rsb(size
, tmp
, tmp2
);
6883 tcg_temp_free_i32(tmp2
);
6885 case NEON_2RM_VCGT0_F
:
6887 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6888 tmp2
= tcg_const_i32(0);
6889 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6890 tcg_temp_free_i32(tmp2
);
6891 tcg_temp_free_ptr(fpstatus
);
6894 case NEON_2RM_VCGE0_F
:
6896 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6897 tmp2
= tcg_const_i32(0);
6898 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6899 tcg_temp_free_i32(tmp2
);
6900 tcg_temp_free_ptr(fpstatus
);
6903 case NEON_2RM_VCEQ0_F
:
6905 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6906 tmp2
= tcg_const_i32(0);
6907 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6908 tcg_temp_free_i32(tmp2
);
6909 tcg_temp_free_ptr(fpstatus
);
6912 case NEON_2RM_VCLE0_F
:
6914 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6915 tmp2
= tcg_const_i32(0);
6916 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6917 tcg_temp_free_i32(tmp2
);
6918 tcg_temp_free_ptr(fpstatus
);
6921 case NEON_2RM_VCLT0_F
:
6923 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6924 tmp2
= tcg_const_i32(0);
6925 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6926 tcg_temp_free_i32(tmp2
);
6927 tcg_temp_free_ptr(fpstatus
);
6930 case NEON_2RM_VABS_F
:
6933 case NEON_2RM_VNEG_F
:
6937 tmp2
= neon_load_reg(rd
, pass
);
6938 neon_store_reg(rm
, pass
, tmp2
);
6941 tmp2
= neon_load_reg(rd
, pass
);
6943 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6944 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6947 neon_store_reg(rm
, pass
, tmp2
);
6949 case NEON_2RM_VRINTN
:
6950 case NEON_2RM_VRINTA
:
6951 case NEON_2RM_VRINTM
:
6952 case NEON_2RM_VRINTP
:
6953 case NEON_2RM_VRINTZ
:
6956 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6959 if (op
== NEON_2RM_VRINTZ
) {
6960 rmode
= FPROUNDING_ZERO
;
6962 rmode
= fp_decode_rm
[((op
& 0x6) >> 1) ^ 1];
6965 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6966 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6968 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpstatus
);
6969 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6971 tcg_temp_free_ptr(fpstatus
);
6972 tcg_temp_free_i32(tcg_rmode
);
6975 case NEON_2RM_VRINTX
:
6977 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6978 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpstatus
);
6979 tcg_temp_free_ptr(fpstatus
);
6982 case NEON_2RM_VCVTAU
:
6983 case NEON_2RM_VCVTAS
:
6984 case NEON_2RM_VCVTNU
:
6985 case NEON_2RM_VCVTNS
:
6986 case NEON_2RM_VCVTPU
:
6987 case NEON_2RM_VCVTPS
:
6988 case NEON_2RM_VCVTMU
:
6989 case NEON_2RM_VCVTMS
:
6991 bool is_signed
= !extract32(insn
, 7, 1);
6992 TCGv_ptr fpst
= get_fpstatus_ptr(1);
6993 TCGv_i32 tcg_rmode
, tcg_shift
;
6994 int rmode
= fp_decode_rm
[extract32(insn
, 8, 2)];
6996 tcg_shift
= tcg_const_i32(0);
6997 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6998 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7002 gen_helper_vfp_tosls(cpu_F0s
, cpu_F0s
,
7005 gen_helper_vfp_touls(cpu_F0s
, cpu_F0s
,
7009 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7011 tcg_temp_free_i32(tcg_rmode
);
7012 tcg_temp_free_i32(tcg_shift
);
7013 tcg_temp_free_ptr(fpst
);
7016 case NEON_2RM_VRECPE
:
7018 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7019 gen_helper_recpe_u32(tmp
, tmp
, fpstatus
);
7020 tcg_temp_free_ptr(fpstatus
);
7023 case NEON_2RM_VRSQRTE
:
7025 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7026 gen_helper_rsqrte_u32(tmp
, tmp
, fpstatus
);
7027 tcg_temp_free_ptr(fpstatus
);
7030 case NEON_2RM_VRECPE_F
:
7032 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7033 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
7034 tcg_temp_free_ptr(fpstatus
);
7037 case NEON_2RM_VRSQRTE_F
:
7039 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7040 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
7041 tcg_temp_free_ptr(fpstatus
);
7044 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
7047 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
7050 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
7051 gen_vfp_tosiz(0, 1);
7053 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
7054 gen_vfp_touiz(0, 1);
7057 /* Reserved op values were caught by the
7058 * neon_2rm_sizes[] check earlier.
7062 if (neon_2rm_is_float_op(op
)) {
7063 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
7064 neon_reg_offset(rd
, pass
));
7066 neon_store_reg(rd
, pass
, tmp
);
7071 } else if ((insn
& (1 << 10)) == 0) {
7073 int n
= ((insn
>> 8) & 3) + 1;
7074 if ((rn
+ n
) > 32) {
7075 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7076 * helper function running off the end of the register file.
7081 if (insn
& (1 << 6)) {
7082 tmp
= neon_load_reg(rd
, 0);
7084 tmp
= tcg_temp_new_i32();
7085 tcg_gen_movi_i32(tmp
, 0);
7087 tmp2
= neon_load_reg(rm
, 0);
7088 tmp4
= tcg_const_i32(rn
);
7089 tmp5
= tcg_const_i32(n
);
7090 gen_helper_neon_tbl(tmp2
, cpu_env
, tmp2
, tmp
, tmp4
, tmp5
);
7091 tcg_temp_free_i32(tmp
);
7092 if (insn
& (1 << 6)) {
7093 tmp
= neon_load_reg(rd
, 1);
7095 tmp
= tcg_temp_new_i32();
7096 tcg_gen_movi_i32(tmp
, 0);
7098 tmp3
= neon_load_reg(rm
, 1);
7099 gen_helper_neon_tbl(tmp3
, cpu_env
, tmp3
, tmp
, tmp4
, tmp5
);
7100 tcg_temp_free_i32(tmp5
);
7101 tcg_temp_free_i32(tmp4
);
7102 neon_store_reg(rd
, 0, tmp2
);
7103 neon_store_reg(rd
, 1, tmp3
);
7104 tcg_temp_free_i32(tmp
);
7105 } else if ((insn
& 0x380) == 0) {
7107 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
7110 if (insn
& (1 << 19)) {
7111 tmp
= neon_load_reg(rm
, 1);
7113 tmp
= neon_load_reg(rm
, 0);
7115 if (insn
& (1 << 16)) {
7116 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
7117 } else if (insn
& (1 << 17)) {
7118 if ((insn
>> 18) & 1)
7119 gen_neon_dup_high16(tmp
);
7121 gen_neon_dup_low16(tmp
);
7123 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
7124 tmp2
= tcg_temp_new_i32();
7125 tcg_gen_mov_i32(tmp2
, tmp
);
7126 neon_store_reg(rd
, pass
, tmp2
);
7128 tcg_temp_free_i32(tmp
);
7137 static int disas_coproc_insn(DisasContext
*s
, uint32_t insn
)
7139 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
7140 const ARMCPRegInfo
*ri
;
7142 cpnum
= (insn
>> 8) & 0xf;
7144 /* First check for coprocessor space used for XScale/iwMMXt insns */
7145 if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && (cpnum
< 2)) {
7146 if (extract32(s
->c15_cpar
, cpnum
, 1) == 0) {
7149 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
7150 return disas_iwmmxt_insn(s
, insn
);
7151 } else if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
)) {
7152 return disas_dsp_insn(s
, insn
);
7157 /* Otherwise treat as a generic register access */
7158 is64
= (insn
& (1 << 25)) == 0;
7159 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
7167 opc1
= (insn
>> 4) & 0xf;
7169 rt2
= (insn
>> 16) & 0xf;
7171 crn
= (insn
>> 16) & 0xf;
7172 opc1
= (insn
>> 21) & 7;
7173 opc2
= (insn
>> 5) & 7;
7176 isread
= (insn
>> 20) & 1;
7177 rt
= (insn
>> 12) & 0xf;
7179 ri
= get_arm_cp_reginfo(s
->cp_regs
,
7180 ENCODE_CP_REG(cpnum
, is64
, s
->ns
, crn
, crm
, opc1
, opc2
));
7182 /* Check access permissions */
7183 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
7188 (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && cpnum
< 14)) {
7189 /* Emit code to perform further access permissions checks at
7190 * runtime; this may result in an exception.
7191 * Note that on XScale all cp0..c13 registers do an access check
7192 * call in order to handle c15_cpar.
7198 /* Note that since we are an implementation which takes an
7199 * exception on a trapped conditional instruction only if the
7200 * instruction passes its condition code check, we can take
7201 * advantage of the clause in the ARM ARM that allows us to set
7202 * the COND field in the instruction to 0xE in all cases.
7203 * We could fish the actual condition out of the insn (ARM)
7204 * or the condexec bits (Thumb) but it isn't necessary.
7209 syndrome
= syn_cp14_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7212 syndrome
= syn_cp14_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7213 rt
, isread
, s
->thumb
);
7218 syndrome
= syn_cp15_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7221 syndrome
= syn_cp15_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7222 rt
, isread
, s
->thumb
);
7226 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7227 * so this can only happen if this is an ARMv7 or earlier CPU,
7228 * in which case the syndrome information won't actually be
7231 assert(!arm_dc_feature(s
, ARM_FEATURE_V8
));
7232 syndrome
= syn_uncategorized();
7236 gen_set_condexec(s
);
7237 gen_set_pc_im(s
, s
->pc
- 4);
7238 tmpptr
= tcg_const_ptr(ri
);
7239 tcg_syn
= tcg_const_i32(syndrome
);
7240 gen_helper_access_check_cp_reg(cpu_env
, tmpptr
, tcg_syn
);
7241 tcg_temp_free_ptr(tmpptr
);
7242 tcg_temp_free_i32(tcg_syn
);
7245 /* Handle special cases first */
7246 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
7253 gen_set_pc_im(s
, s
->pc
);
7254 s
->is_jmp
= DISAS_WFI
;
7260 if ((s
->tb
->cflags
& CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
7269 if (ri
->type
& ARM_CP_CONST
) {
7270 tmp64
= tcg_const_i64(ri
->resetvalue
);
7271 } else if (ri
->readfn
) {
7273 tmp64
= tcg_temp_new_i64();
7274 tmpptr
= tcg_const_ptr(ri
);
7275 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
7276 tcg_temp_free_ptr(tmpptr
);
7278 tmp64
= tcg_temp_new_i64();
7279 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7281 tmp
= tcg_temp_new_i32();
7282 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
7283 store_reg(s
, rt
, tmp
);
7284 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7285 tmp
= tcg_temp_new_i32();
7286 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
7287 tcg_temp_free_i64(tmp64
);
7288 store_reg(s
, rt2
, tmp
);
7291 if (ri
->type
& ARM_CP_CONST
) {
7292 tmp
= tcg_const_i32(ri
->resetvalue
);
7293 } else if (ri
->readfn
) {
7295 tmp
= tcg_temp_new_i32();
7296 tmpptr
= tcg_const_ptr(ri
);
7297 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
7298 tcg_temp_free_ptr(tmpptr
);
7300 tmp
= load_cpu_offset(ri
->fieldoffset
);
7303 /* Destination register of r15 for 32 bit loads sets
7304 * the condition codes from the high 4 bits of the value
7307 tcg_temp_free_i32(tmp
);
7309 store_reg(s
, rt
, tmp
);
7314 if (ri
->type
& ARM_CP_CONST
) {
7315 /* If not forbidden by access permissions, treat as WI */
7320 TCGv_i32 tmplo
, tmphi
;
7321 TCGv_i64 tmp64
= tcg_temp_new_i64();
7322 tmplo
= load_reg(s
, rt
);
7323 tmphi
= load_reg(s
, rt2
);
7324 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
7325 tcg_temp_free_i32(tmplo
);
7326 tcg_temp_free_i32(tmphi
);
7328 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
7329 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
7330 tcg_temp_free_ptr(tmpptr
);
7332 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7334 tcg_temp_free_i64(tmp64
);
7339 tmp
= load_reg(s
, rt
);
7340 tmpptr
= tcg_const_ptr(ri
);
7341 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
7342 tcg_temp_free_ptr(tmpptr
);
7343 tcg_temp_free_i32(tmp
);
7345 TCGv_i32 tmp
= load_reg(s
, rt
);
7346 store_cpu_offset(tmp
, ri
->fieldoffset
);
7351 if ((s
->tb
->cflags
& CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
7352 /* I/O operations must end the TB here (whether read or write) */
7355 } else if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
7356 /* We default to ending the TB on a coprocessor register write,
7357 * but allow this to be suppressed by the register definition
7358 * (usually only necessary to work around guest bugs).
7366 /* Unknown register; this might be a guest error or a QEMU
7367 * unimplemented feature.
7370 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7371 "64 bit system register cp:%d opc1: %d crm:%d "
7373 isread
? "read" : "write", cpnum
, opc1
, crm
,
7374 s
->ns
? "non-secure" : "secure");
7376 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7377 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7379 isread
? "read" : "write", cpnum
, opc1
, crn
, crm
, opc2
,
7380 s
->ns
? "non-secure" : "secure");
7387 /* Store a 64-bit value to a register pair. Clobbers val. */
7388 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
7391 tmp
= tcg_temp_new_i32();
7392 tcg_gen_extrl_i64_i32(tmp
, val
);
7393 store_reg(s
, rlow
, tmp
);
7394 tmp
= tcg_temp_new_i32();
7395 tcg_gen_shri_i64(val
, val
, 32);
7396 tcg_gen_extrl_i64_i32(tmp
, val
);
7397 store_reg(s
, rhigh
, tmp
);
7400 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7401 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
7406 /* Load value and extend to 64 bits. */
7407 tmp
= tcg_temp_new_i64();
7408 tmp2
= load_reg(s
, rlow
);
7409 tcg_gen_extu_i32_i64(tmp
, tmp2
);
7410 tcg_temp_free_i32(tmp2
);
7411 tcg_gen_add_i64(val
, val
, tmp
);
7412 tcg_temp_free_i64(tmp
);
7415 /* load and add a 64-bit value from a register pair. */
7416 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
7422 /* Load 64-bit value rd:rn. */
7423 tmpl
= load_reg(s
, rlow
);
7424 tmph
= load_reg(s
, rhigh
);
7425 tmp
= tcg_temp_new_i64();
7426 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
7427 tcg_temp_free_i32(tmpl
);
7428 tcg_temp_free_i32(tmph
);
7429 tcg_gen_add_i64(val
, val
, tmp
);
7430 tcg_temp_free_i64(tmp
);
7433 /* Set N and Z flags from hi|lo. */
7434 static void gen_logicq_cc(TCGv_i32 lo
, TCGv_i32 hi
)
7436 tcg_gen_mov_i32(cpu_NF
, hi
);
7437 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
7440 /* Load/Store exclusive instructions are implemented by remembering
7441 the value/address loaded, and seeing if these are the same
7442 when the store is performed. This should be sufficient to implement
7443 the architecturally mandated semantics, and avoids having to monitor
7446 In system emulation mode only one CPU will be running at once, so
7447 this sequence is effectively atomic. In user emulation mode we
7448 throw an exception and handle the atomic operation elsewhere. */
7449 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
7450 TCGv_i32 addr
, int size
)
7452 TCGv_i32 tmp
= tcg_temp_new_i32();
7456 /* emit alignment check if needed */
7458 /* NB: all LDREX variants (incl. thumb) occupy 4 bytes */
7459 gen_alignment_check(s
, 4, (target_ulong
)1 << size
, addr
);
7464 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
7467 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
7471 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
7478 TCGv_i32 tmp2
= tcg_temp_new_i32();
7479 TCGv_i32 tmp3
= tcg_temp_new_i32();
7481 tcg_gen_addi_i32(tmp2
, addr
, 4);
7482 gen_aa32_ld32u(tmp3
, tmp2
, get_mem_index(s
));
7483 tcg_temp_free_i32(tmp2
);
7484 tcg_gen_concat_i32_i64(cpu_exclusive_val
, tmp
, tmp3
);
7485 store_reg(s
, rt2
, tmp3
);
7487 tcg_gen_extu_i32_i64(cpu_exclusive_val
, tmp
);
7490 store_reg(s
, rt
, tmp
);
7491 tcg_gen_extu_i32_i64(cpu_exclusive_addr
, addr
);
7494 static void gen_clrex(DisasContext
*s
)
7496 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7499 #ifdef CONFIG_USER_ONLY
7500 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7501 TCGv_i32 addr
, int size
)
7503 tcg_gen_extu_i32_i64(cpu_exclusive_test
, addr
);
7504 tcg_gen_movi_i32(cpu_exclusive_info
,
7505 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
7506 gen_exception_internal_insn(s
, 4, EXCP_STREX
);
7509 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7510 TCGv_i32 addr
, int size
)
7513 TCGv_i64 val64
, extaddr
;
7514 TCGLabel
*done_label
;
7515 TCGLabel
*fail_label
;
7517 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7523 fail_label
= gen_new_label();
7524 done_label
= gen_new_label();
7525 extaddr
= tcg_temp_new_i64();
7526 tcg_gen_extu_i32_i64(extaddr
, addr
);
7527 tcg_gen_brcond_i64(TCG_COND_NE
, extaddr
, cpu_exclusive_addr
, fail_label
);
7528 tcg_temp_free_i64(extaddr
);
7530 tmp
= tcg_temp_new_i32();
7533 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
7536 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
7540 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
7546 val64
= tcg_temp_new_i64();
7548 TCGv_i32 tmp2
= tcg_temp_new_i32();
7549 TCGv_i32 tmp3
= tcg_temp_new_i32();
7550 tcg_gen_addi_i32(tmp2
, addr
, 4);
7551 gen_aa32_ld32u(tmp3
, tmp2
, get_mem_index(s
));
7552 tcg_temp_free_i32(tmp2
);
7553 tcg_gen_concat_i32_i64(val64
, tmp
, tmp3
);
7554 tcg_temp_free_i32(tmp3
);
7556 tcg_gen_extu_i32_i64(val64
, tmp
);
7558 tcg_temp_free_i32(tmp
);
7560 tcg_gen_brcond_i64(TCG_COND_NE
, val64
, cpu_exclusive_val
, fail_label
);
7561 tcg_temp_free_i64(val64
);
7563 tmp
= load_reg(s
, rt
);
7566 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
7569 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
7573 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
7578 tcg_temp_free_i32(tmp
);
7580 tcg_gen_addi_i32(addr
, addr
, 4);
7581 tmp
= load_reg(s
, rt2
);
7582 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
7583 tcg_temp_free_i32(tmp
);
7585 tcg_gen_movi_i32(cpu_R
[rd
], 0);
7586 tcg_gen_br(done_label
);
7587 gen_set_label(fail_label
);
7588 tcg_gen_movi_i32(cpu_R
[rd
], 1);
7589 gen_set_label(done_label
);
7590 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7597 * @mode: mode field from insn (which stack to store to)
7598 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7599 * @writeback: true if writeback bit set
7601 * Generate code for the SRS (Store Return State) insn.
7603 static void gen_srs(DisasContext
*s
,
7604 uint32_t mode
, uint32_t amode
, bool writeback
)
7607 TCGv_i32 addr
= tcg_temp_new_i32();
7608 TCGv_i32 tmp
= tcg_const_i32(mode
);
7609 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7610 tcg_temp_free_i32(tmp
);
7627 tcg_gen_addi_i32(addr
, addr
, offset
);
7628 tmp
= load_reg(s
, 14);
7629 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
7630 tcg_temp_free_i32(tmp
);
7631 tmp
= load_cpu_field(spsr
);
7632 tcg_gen_addi_i32(addr
, addr
, 4);
7633 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
7634 tcg_temp_free_i32(tmp
);
7652 tcg_gen_addi_i32(addr
, addr
, offset
);
7653 tmp
= tcg_const_i32(mode
);
7654 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7655 tcg_temp_free_i32(tmp
);
7657 tcg_temp_free_i32(addr
);
7660 static void disas_arm_insn(DisasContext
*s
, unsigned int insn
)
7662 unsigned int cond
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
7669 /* M variants do not implement ARM mode. */
7670 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
7675 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7676 * choose to UNDEF. In ARMv5 and above the space is used
7677 * for miscellaneous unconditional instructions.
7681 /* Unconditional instructions. */
7682 if (((insn
>> 25) & 7) == 1) {
7683 /* NEON Data processing. */
7684 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
7688 if (disas_neon_data_insn(s
, insn
)) {
7693 if ((insn
& 0x0f100000) == 0x04000000) {
7694 /* NEON load/store. */
7695 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
7699 if (disas_neon_ls_insn(s
, insn
)) {
7704 if ((insn
& 0x0f000e10) == 0x0e000a00) {
7706 if (disas_vfp_insn(s
, insn
)) {
7711 if (((insn
& 0x0f30f000) == 0x0510f000) ||
7712 ((insn
& 0x0f30f010) == 0x0710f000)) {
7713 if ((insn
& (1 << 22)) == 0) {
7715 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
7719 /* Otherwise PLD; v5TE+ */
7723 if (((insn
& 0x0f70f000) == 0x0450f000) ||
7724 ((insn
& 0x0f70f010) == 0x0650f000)) {
7726 return; /* PLI; V7 */
7728 if (((insn
& 0x0f700000) == 0x04100000) ||
7729 ((insn
& 0x0f700010) == 0x06100000)) {
7730 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
7733 return; /* v7MP: Unallocated memory hint: must NOP */
7736 if ((insn
& 0x0ffffdff) == 0x01010000) {
7739 if (((insn
>> 9) & 1) != s
->bswap_code
) {
7740 /* Dynamic endianness switching not implemented. */
7741 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented setend\n");
7745 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
7746 switch ((insn
>> 4) & 0xf) {
7754 /* We don't emulate caches so these are a no-op. */
7757 /* We need to break the TB after this insn to execute
7758 * self-modifying code correctly and also to take
7759 * any pending interrupts immediately.
7766 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
7772 gen_srs(s
, (insn
& 0x1f), (insn
>> 23) & 3, insn
& (1 << 21));
7774 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
7780 rn
= (insn
>> 16) & 0xf;
7781 addr
= load_reg(s
, rn
);
7782 i
= (insn
>> 23) & 3;
7784 case 0: offset
= -4; break; /* DA */
7785 case 1: offset
= 0; break; /* IA */
7786 case 2: offset
= -8; break; /* DB */
7787 case 3: offset
= 4; break; /* IB */
7791 tcg_gen_addi_i32(addr
, addr
, offset
);
7792 /* Load PC into tmp and CPSR into tmp2. */
7793 tmp
= tcg_temp_new_i32();
7794 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
7795 tcg_gen_addi_i32(addr
, addr
, 4);
7796 tmp2
= tcg_temp_new_i32();
7797 gen_aa32_ld32u(tmp2
, addr
, get_mem_index(s
));
7798 if (insn
& (1 << 21)) {
7799 /* Base writeback. */
7801 case 0: offset
= -8; break;
7802 case 1: offset
= 4; break;
7803 case 2: offset
= -4; break;
7804 case 3: offset
= 0; break;
7808 tcg_gen_addi_i32(addr
, addr
, offset
);
7809 store_reg(s
, rn
, addr
);
7811 tcg_temp_free_i32(addr
);
7813 gen_rfe(s
, tmp
, tmp2
);
7815 } else if ((insn
& 0x0e000000) == 0x0a000000) {
7816 /* branch link and change to thumb (blx <offset>) */
7819 val
= (uint32_t)s
->pc
;
7820 tmp
= tcg_temp_new_i32();
7821 tcg_gen_movi_i32(tmp
, val
);
7822 store_reg(s
, 14, tmp
);
7823 /* Sign-extend the 24-bit offset */
7824 offset
= (((int32_t)insn
) << 8) >> 8;
7825 /* offset * 4 + bit24 * 2 + (thumb bit) */
7826 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
7827 /* pipeline offset */
7829 /* protected by ARCH(5); above, near the start of uncond block */
7832 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
7833 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
7834 /* iWMMXt register transfer. */
7835 if (extract32(s
->c15_cpar
, 1, 1)) {
7836 if (!disas_iwmmxt_insn(s
, insn
)) {
7841 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
7842 /* Coprocessor double register transfer. */
7844 } else if ((insn
& 0x0f000010) == 0x0e000010) {
7845 /* Additional coprocessor register transfer. */
7846 } else if ((insn
& 0x0ff10020) == 0x01000000) {
7849 /* cps (privileged) */
7853 if (insn
& (1 << 19)) {
7854 if (insn
& (1 << 8))
7856 if (insn
& (1 << 7))
7858 if (insn
& (1 << 6))
7860 if (insn
& (1 << 18))
7863 if (insn
& (1 << 17)) {
7865 val
|= (insn
& 0x1f);
7868 gen_set_psr_im(s
, mask
, 0, val
);
7875 /* if not always execute, we generate a conditional jump to
7877 s
->condlabel
= gen_new_label();
7878 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
7881 if ((insn
& 0x0f900000) == 0x03000000) {
7882 if ((insn
& (1 << 21)) == 0) {
7884 rd
= (insn
>> 12) & 0xf;
7885 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
7886 if ((insn
& (1 << 22)) == 0) {
7888 tmp
= tcg_temp_new_i32();
7889 tcg_gen_movi_i32(tmp
, val
);
7892 tmp
= load_reg(s
, rd
);
7893 tcg_gen_ext16u_i32(tmp
, tmp
);
7894 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
7896 store_reg(s
, rd
, tmp
);
7898 if (((insn
>> 12) & 0xf) != 0xf)
7900 if (((insn
>> 16) & 0xf) == 0) {
7901 gen_nop_hint(s
, insn
& 0xff);
7903 /* CPSR = immediate */
7905 shift
= ((insn
>> 8) & 0xf) * 2;
7907 val
= (val
>> shift
) | (val
<< (32 - shift
));
7908 i
= ((insn
& (1 << 22)) != 0);
7909 if (gen_set_psr_im(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
),
7915 } else if ((insn
& 0x0f900000) == 0x01000000
7916 && (insn
& 0x00000090) != 0x00000090) {
7917 /* miscellaneous instructions */
7918 op1
= (insn
>> 21) & 3;
7919 sh
= (insn
>> 4) & 0xf;
7922 case 0x0: /* move program status register */
7925 tmp
= load_reg(s
, rm
);
7926 i
= ((op1
& 2) != 0);
7927 if (gen_set_psr(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
7931 rd
= (insn
>> 12) & 0xf;
7935 tmp
= load_cpu_field(spsr
);
7937 tmp
= tcg_temp_new_i32();
7938 gen_helper_cpsr_read(tmp
, cpu_env
);
7940 store_reg(s
, rd
, tmp
);
7945 /* branch/exchange thumb (bx). */
7947 tmp
= load_reg(s
, rm
);
7949 } else if (op1
== 3) {
7952 rd
= (insn
>> 12) & 0xf;
7953 tmp
= load_reg(s
, rm
);
7954 gen_helper_clz(tmp
, tmp
);
7955 store_reg(s
, rd
, tmp
);
7963 /* Trivial implementation equivalent to bx. */
7964 tmp
= load_reg(s
, rm
);
7975 /* branch link/exchange thumb (blx) */
7976 tmp
= load_reg(s
, rm
);
7977 tmp2
= tcg_temp_new_i32();
7978 tcg_gen_movi_i32(tmp2
, s
->pc
);
7979 store_reg(s
, 14, tmp2
);
7985 uint32_t c
= extract32(insn
, 8, 4);
7987 /* Check this CPU supports ARMv8 CRC instructions.
7988 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7989 * Bits 8, 10 and 11 should be zero.
7991 if (!arm_dc_feature(s
, ARM_FEATURE_CRC
) || op1
== 0x3 ||
7996 rn
= extract32(insn
, 16, 4);
7997 rd
= extract32(insn
, 12, 4);
7999 tmp
= load_reg(s
, rn
);
8000 tmp2
= load_reg(s
, rm
);
8002 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
8003 } else if (op1
== 1) {
8004 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
8006 tmp3
= tcg_const_i32(1 << op1
);
8008 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
8010 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
8012 tcg_temp_free_i32(tmp2
);
8013 tcg_temp_free_i32(tmp3
);
8014 store_reg(s
, rd
, tmp
);
8017 case 0x5: /* saturating add/subtract */
8019 rd
= (insn
>> 12) & 0xf;
8020 rn
= (insn
>> 16) & 0xf;
8021 tmp
= load_reg(s
, rm
);
8022 tmp2
= load_reg(s
, rn
);
8024 gen_helper_double_saturate(tmp2
, cpu_env
, tmp2
);
8026 gen_helper_sub_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8028 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8029 tcg_temp_free_i32(tmp2
);
8030 store_reg(s
, rd
, tmp
);
8034 int imm16
= extract32(insn
, 0, 4) | (extract32(insn
, 8, 12) << 4);
8039 gen_exception_insn(s
, 4, EXCP_BKPT
,
8040 syn_aa32_bkpt(imm16
, false),
8041 default_exception_el(s
));
8044 /* Hypervisor call (v7) */
8052 /* Secure monitor call (v6+) */
8064 case 0x8: /* signed multiply */
8069 rs
= (insn
>> 8) & 0xf;
8070 rn
= (insn
>> 12) & 0xf;
8071 rd
= (insn
>> 16) & 0xf;
8073 /* (32 * 16) >> 16 */
8074 tmp
= load_reg(s
, rm
);
8075 tmp2
= load_reg(s
, rs
);
8077 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8080 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8081 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8082 tmp
= tcg_temp_new_i32();
8083 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
8084 tcg_temp_free_i64(tmp64
);
8085 if ((sh
& 2) == 0) {
8086 tmp2
= load_reg(s
, rn
);
8087 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8088 tcg_temp_free_i32(tmp2
);
8090 store_reg(s
, rd
, tmp
);
8093 tmp
= load_reg(s
, rm
);
8094 tmp2
= load_reg(s
, rs
);
8095 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
8096 tcg_temp_free_i32(tmp2
);
8098 tmp64
= tcg_temp_new_i64();
8099 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8100 tcg_temp_free_i32(tmp
);
8101 gen_addq(s
, tmp64
, rn
, rd
);
8102 gen_storeq_reg(s
, rn
, rd
, tmp64
);
8103 tcg_temp_free_i64(tmp64
);
8106 tmp2
= load_reg(s
, rn
);
8107 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8108 tcg_temp_free_i32(tmp2
);
8110 store_reg(s
, rd
, tmp
);
8117 } else if (((insn
& 0x0e000000) == 0 &&
8118 (insn
& 0x00000090) != 0x90) ||
8119 ((insn
& 0x0e000000) == (1 << 25))) {
8120 int set_cc
, logic_cc
, shiftop
;
8122 op1
= (insn
>> 21) & 0xf;
8123 set_cc
= (insn
>> 20) & 1;
8124 logic_cc
= table_logic_cc
[op1
] & set_cc
;
8126 /* data processing instruction */
8127 if (insn
& (1 << 25)) {
8128 /* immediate operand */
8130 shift
= ((insn
>> 8) & 0xf) * 2;
8132 val
= (val
>> shift
) | (val
<< (32 - shift
));
8134 tmp2
= tcg_temp_new_i32();
8135 tcg_gen_movi_i32(tmp2
, val
);
8136 if (logic_cc
&& shift
) {
8137 gen_set_CF_bit31(tmp2
);
8142 tmp2
= load_reg(s
, rm
);
8143 shiftop
= (insn
>> 5) & 3;
8144 if (!(insn
& (1 << 4))) {
8145 shift
= (insn
>> 7) & 0x1f;
8146 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8148 rs
= (insn
>> 8) & 0xf;
8149 tmp
= load_reg(s
, rs
);
8150 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
8153 if (op1
!= 0x0f && op1
!= 0x0d) {
8154 rn
= (insn
>> 16) & 0xf;
8155 tmp
= load_reg(s
, rn
);
8157 TCGV_UNUSED_I32(tmp
);
8159 rd
= (insn
>> 12) & 0xf;
8162 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8166 store_reg_bx(s
, rd
, tmp
);
8169 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8173 store_reg_bx(s
, rd
, tmp
);
8176 if (set_cc
&& rd
== 15) {
8177 /* SUBS r15, ... is used for exception return. */
8181 gen_sub_CC(tmp
, tmp
, tmp2
);
8182 gen_exception_return(s
, tmp
);
8185 gen_sub_CC(tmp
, tmp
, tmp2
);
8187 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8189 store_reg_bx(s
, rd
, tmp
);
8194 gen_sub_CC(tmp
, tmp2
, tmp
);
8196 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8198 store_reg_bx(s
, rd
, tmp
);
8202 gen_add_CC(tmp
, tmp
, tmp2
);
8204 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8206 store_reg_bx(s
, rd
, tmp
);
8210 gen_adc_CC(tmp
, tmp
, tmp2
);
8212 gen_add_carry(tmp
, tmp
, tmp2
);
8214 store_reg_bx(s
, rd
, tmp
);
8218 gen_sbc_CC(tmp
, tmp
, tmp2
);
8220 gen_sub_carry(tmp
, tmp
, tmp2
);
8222 store_reg_bx(s
, rd
, tmp
);
8226 gen_sbc_CC(tmp
, tmp2
, tmp
);
8228 gen_sub_carry(tmp
, tmp2
, tmp
);
8230 store_reg_bx(s
, rd
, tmp
);
8234 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8237 tcg_temp_free_i32(tmp
);
8241 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8244 tcg_temp_free_i32(tmp
);
8248 gen_sub_CC(tmp
, tmp
, tmp2
);
8250 tcg_temp_free_i32(tmp
);
8254 gen_add_CC(tmp
, tmp
, tmp2
);
8256 tcg_temp_free_i32(tmp
);
8259 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8263 store_reg_bx(s
, rd
, tmp
);
8266 if (logic_cc
&& rd
== 15) {
8267 /* MOVS r15, ... is used for exception return. */
8271 gen_exception_return(s
, tmp2
);
8276 store_reg_bx(s
, rd
, tmp2
);
8280 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8284 store_reg_bx(s
, rd
, tmp
);
8288 tcg_gen_not_i32(tmp2
, tmp2
);
8292 store_reg_bx(s
, rd
, tmp2
);
8295 if (op1
!= 0x0f && op1
!= 0x0d) {
8296 tcg_temp_free_i32(tmp2
);
8299 /* other instructions */
8300 op1
= (insn
>> 24) & 0xf;
8304 /* multiplies, extra load/stores */
8305 sh
= (insn
>> 5) & 3;
8308 rd
= (insn
>> 16) & 0xf;
8309 rn
= (insn
>> 12) & 0xf;
8310 rs
= (insn
>> 8) & 0xf;
8312 op1
= (insn
>> 20) & 0xf;
8314 case 0: case 1: case 2: case 3: case 6:
8316 tmp
= load_reg(s
, rs
);
8317 tmp2
= load_reg(s
, rm
);
8318 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8319 tcg_temp_free_i32(tmp2
);
8320 if (insn
& (1 << 22)) {
8321 /* Subtract (mls) */
8323 tmp2
= load_reg(s
, rn
);
8324 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8325 tcg_temp_free_i32(tmp2
);
8326 } else if (insn
& (1 << 21)) {
8328 tmp2
= load_reg(s
, rn
);
8329 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8330 tcg_temp_free_i32(tmp2
);
8332 if (insn
& (1 << 20))
8334 store_reg(s
, rd
, tmp
);
8337 /* 64 bit mul double accumulate (UMAAL) */
8339 tmp
= load_reg(s
, rs
);
8340 tmp2
= load_reg(s
, rm
);
8341 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8342 gen_addq_lo(s
, tmp64
, rn
);
8343 gen_addq_lo(s
, tmp64
, rd
);
8344 gen_storeq_reg(s
, rn
, rd
, tmp64
);
8345 tcg_temp_free_i64(tmp64
);
8347 case 8: case 9: case 10: case 11:
8348 case 12: case 13: case 14: case 15:
8349 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8350 tmp
= load_reg(s
, rs
);
8351 tmp2
= load_reg(s
, rm
);
8352 if (insn
& (1 << 22)) {
8353 tcg_gen_muls2_i32(tmp
, tmp2
, tmp
, tmp2
);
8355 tcg_gen_mulu2_i32(tmp
, tmp2
, tmp
, tmp2
);
8357 if (insn
& (1 << 21)) { /* mult accumulate */
8358 TCGv_i32 al
= load_reg(s
, rn
);
8359 TCGv_i32 ah
= load_reg(s
, rd
);
8360 tcg_gen_add2_i32(tmp
, tmp2
, tmp
, tmp2
, al
, ah
);
8361 tcg_temp_free_i32(al
);
8362 tcg_temp_free_i32(ah
);
8364 if (insn
& (1 << 20)) {
8365 gen_logicq_cc(tmp
, tmp2
);
8367 store_reg(s
, rn
, tmp
);
8368 store_reg(s
, rd
, tmp2
);
8374 rn
= (insn
>> 16) & 0xf;
8375 rd
= (insn
>> 12) & 0xf;
8376 if (insn
& (1 << 23)) {
8377 /* load/store exclusive */
8378 int op2
= (insn
>> 8) & 3;
8379 op1
= (insn
>> 21) & 0x3;
8382 case 0: /* lda/stl */
8388 case 1: /* reserved */
8390 case 2: /* ldaex/stlex */
8393 case 3: /* ldrex/strex */
8402 addr
= tcg_temp_local_new_i32();
8403 load_reg_var(s
, addr
, rn
);
8405 /* Since the emulation does not have barriers,
8406 the acquire/release semantics need no special
8409 if (insn
& (1 << 20)) {
8410 tmp
= tcg_temp_new_i32();
8413 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
8416 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
8419 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
8424 store_reg(s
, rd
, tmp
);
8427 tmp
= load_reg(s
, rm
);
8430 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8433 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
8436 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
8441 tcg_temp_free_i32(tmp
);
8443 } else if (insn
& (1 << 20)) {
8446 gen_load_exclusive(s
, rd
, 15, addr
, 2);
8448 case 1: /* ldrexd */
8449 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
8451 case 2: /* ldrexb */
8452 gen_load_exclusive(s
, rd
, 15, addr
, 0);
8454 case 3: /* ldrexh */
8455 gen_load_exclusive(s
, rd
, 15, addr
, 1);
8464 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
8466 case 1: /* strexd */
8467 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
8469 case 2: /* strexb */
8470 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
8472 case 3: /* strexh */
8473 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
8479 tcg_temp_free_i32(addr
);
8481 /* SWP instruction */
8484 /* ??? This is not really atomic. However we know
8485 we never have multiple CPUs running in parallel,
8486 so it is good enough. */
8487 addr
= load_reg(s
, rn
);
8488 tmp
= load_reg(s
, rm
);
8489 tmp2
= tcg_temp_new_i32();
8490 if (insn
& (1 << 22)) {
8491 gen_aa32_ld8u(tmp2
, addr
, get_mem_index(s
));
8492 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
8494 gen_aa32_ld32u(tmp2
, addr
, get_mem_index(s
));
8495 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8497 tcg_temp_free_i32(tmp
);
8498 tcg_temp_free_i32(addr
);
8499 store_reg(s
, rd
, tmp2
);
8504 bool load
= insn
& (1 << 20);
8505 bool doubleword
= false;
8506 /* Misc load/store */
8507 rn
= (insn
>> 16) & 0xf;
8508 rd
= (insn
>> 12) & 0xf;
8510 if (!load
&& (sh
& 2)) {
8514 /* UNPREDICTABLE; we choose to UNDEF */
8517 load
= (sh
& 1) == 0;
8521 addr
= load_reg(s
, rn
);
8522 if (insn
& (1 << 24))
8523 gen_add_datah_offset(s
, insn
, 0, addr
);
8529 tmp
= load_reg(s
, rd
);
8530 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8531 tcg_temp_free_i32(tmp
);
8532 tcg_gen_addi_i32(addr
, addr
, 4);
8533 tmp
= load_reg(s
, rd
+ 1);
8534 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8535 tcg_temp_free_i32(tmp
);
8538 tmp
= tcg_temp_new_i32();
8539 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
8540 store_reg(s
, rd
, tmp
);
8541 tcg_gen_addi_i32(addr
, addr
, 4);
8542 tmp
= tcg_temp_new_i32();
8543 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
8546 address_offset
= -4;
8549 tmp
= tcg_temp_new_i32();
8552 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
8555 gen_aa32_ld8s(tmp
, addr
, get_mem_index(s
));
8559 gen_aa32_ld16s(tmp
, addr
, get_mem_index(s
));
8564 tmp
= load_reg(s
, rd
);
8565 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
8566 tcg_temp_free_i32(tmp
);
8568 /* Perform base writeback before the loaded value to
8569 ensure correct behavior with overlapping index registers.
8570 ldrd with base writeback is undefined if the
8571 destination and index registers overlap. */
8572 if (!(insn
& (1 << 24))) {
8573 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
8574 store_reg(s
, rn
, addr
);
8575 } else if (insn
& (1 << 21)) {
8577 tcg_gen_addi_i32(addr
, addr
, address_offset
);
8578 store_reg(s
, rn
, addr
);
8580 tcg_temp_free_i32(addr
);
8583 /* Complete the load. */
8584 store_reg(s
, rd
, tmp
);
8593 if (insn
& (1 << 4)) {
8595 /* Armv6 Media instructions. */
8597 rn
= (insn
>> 16) & 0xf;
8598 rd
= (insn
>> 12) & 0xf;
8599 rs
= (insn
>> 8) & 0xf;
8600 switch ((insn
>> 23) & 3) {
8601 case 0: /* Parallel add/subtract. */
8602 op1
= (insn
>> 20) & 7;
8603 tmp
= load_reg(s
, rn
);
8604 tmp2
= load_reg(s
, rm
);
8605 sh
= (insn
>> 5) & 7;
8606 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
8608 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
8609 tcg_temp_free_i32(tmp2
);
8610 store_reg(s
, rd
, tmp
);
8613 if ((insn
& 0x00700020) == 0) {
8614 /* Halfword pack. */
8615 tmp
= load_reg(s
, rn
);
8616 tmp2
= load_reg(s
, rm
);
8617 shift
= (insn
>> 7) & 0x1f;
8618 if (insn
& (1 << 6)) {
8622 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8623 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8624 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8628 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8629 tcg_gen_ext16u_i32(tmp
, tmp
);
8630 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8632 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8633 tcg_temp_free_i32(tmp2
);
8634 store_reg(s
, rd
, tmp
);
8635 } else if ((insn
& 0x00200020) == 0x00200000) {
8637 tmp
= load_reg(s
, rm
);
8638 shift
= (insn
>> 7) & 0x1f;
8639 if (insn
& (1 << 6)) {
8642 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8644 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8646 sh
= (insn
>> 16) & 0x1f;
8647 tmp2
= tcg_const_i32(sh
);
8648 if (insn
& (1 << 22))
8649 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
8651 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
8652 tcg_temp_free_i32(tmp2
);
8653 store_reg(s
, rd
, tmp
);
8654 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
8656 tmp
= load_reg(s
, rm
);
8657 sh
= (insn
>> 16) & 0x1f;
8658 tmp2
= tcg_const_i32(sh
);
8659 if (insn
& (1 << 22))
8660 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
8662 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
8663 tcg_temp_free_i32(tmp2
);
8664 store_reg(s
, rd
, tmp
);
8665 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
8667 tmp
= load_reg(s
, rn
);
8668 tmp2
= load_reg(s
, rm
);
8669 tmp3
= tcg_temp_new_i32();
8670 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
8671 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8672 tcg_temp_free_i32(tmp3
);
8673 tcg_temp_free_i32(tmp2
);
8674 store_reg(s
, rd
, tmp
);
8675 } else if ((insn
& 0x000003e0) == 0x00000060) {
8676 tmp
= load_reg(s
, rm
);
8677 shift
= (insn
>> 10) & 3;
8678 /* ??? In many cases it's not necessary to do a
8679 rotate, a shift is sufficient. */
8681 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8682 op1
= (insn
>> 20) & 7;
8684 case 0: gen_sxtb16(tmp
); break;
8685 case 2: gen_sxtb(tmp
); break;
8686 case 3: gen_sxth(tmp
); break;
8687 case 4: gen_uxtb16(tmp
); break;
8688 case 6: gen_uxtb(tmp
); break;
8689 case 7: gen_uxth(tmp
); break;
8690 default: goto illegal_op
;
8693 tmp2
= load_reg(s
, rn
);
8694 if ((op1
& 3) == 0) {
8695 gen_add16(tmp
, tmp2
);
8697 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8698 tcg_temp_free_i32(tmp2
);
8701 store_reg(s
, rd
, tmp
);
8702 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
8704 tmp
= load_reg(s
, rm
);
8705 if (insn
& (1 << 22)) {
8706 if (insn
& (1 << 7)) {
8710 gen_helper_rbit(tmp
, tmp
);
8713 if (insn
& (1 << 7))
8716 tcg_gen_bswap32_i32(tmp
, tmp
);
8718 store_reg(s
, rd
, tmp
);
8723 case 2: /* Multiplies (Type 3). */
8724 switch ((insn
>> 20) & 0x7) {
8726 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
8727 /* op2 not 00x or 11x : UNDEF */
8730 /* Signed multiply most significant [accumulate].
8731 (SMMUL, SMMLA, SMMLS) */
8732 tmp
= load_reg(s
, rm
);
8733 tmp2
= load_reg(s
, rs
);
8734 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8737 tmp
= load_reg(s
, rd
);
8738 if (insn
& (1 << 6)) {
8739 tmp64
= gen_subq_msw(tmp64
, tmp
);
8741 tmp64
= gen_addq_msw(tmp64
, tmp
);
8744 if (insn
& (1 << 5)) {
8745 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8747 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8748 tmp
= tcg_temp_new_i32();
8749 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
8750 tcg_temp_free_i64(tmp64
);
8751 store_reg(s
, rn
, tmp
);
8755 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8756 if (insn
& (1 << 7)) {
8759 tmp
= load_reg(s
, rm
);
8760 tmp2
= load_reg(s
, rs
);
8761 if (insn
& (1 << 5))
8762 gen_swap_half(tmp2
);
8763 gen_smul_dual(tmp
, tmp2
);
8764 if (insn
& (1 << 22)) {
8765 /* smlald, smlsld */
8768 tmp64
= tcg_temp_new_i64();
8769 tmp64_2
= tcg_temp_new_i64();
8770 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8771 tcg_gen_ext_i32_i64(tmp64_2
, tmp2
);
8772 tcg_temp_free_i32(tmp
);
8773 tcg_temp_free_i32(tmp2
);
8774 if (insn
& (1 << 6)) {
8775 tcg_gen_sub_i64(tmp64
, tmp64
, tmp64_2
);
8777 tcg_gen_add_i64(tmp64
, tmp64
, tmp64_2
);
8779 tcg_temp_free_i64(tmp64_2
);
8780 gen_addq(s
, tmp64
, rd
, rn
);
8781 gen_storeq_reg(s
, rd
, rn
, tmp64
);
8782 tcg_temp_free_i64(tmp64
);
8784 /* smuad, smusd, smlad, smlsd */
8785 if (insn
& (1 << 6)) {
8786 /* This subtraction cannot overflow. */
8787 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8789 /* This addition cannot overflow 32 bits;
8790 * however it may overflow considered as a
8791 * signed operation, in which case we must set
8794 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8796 tcg_temp_free_i32(tmp2
);
8799 tmp2
= load_reg(s
, rd
);
8800 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8801 tcg_temp_free_i32(tmp2
);
8803 store_reg(s
, rn
, tmp
);
8809 if (!arm_dc_feature(s
, ARM_FEATURE_ARM_DIV
)) {
8812 if (((insn
>> 5) & 7) || (rd
!= 15)) {
8815 tmp
= load_reg(s
, rm
);
8816 tmp2
= load_reg(s
, rs
);
8817 if (insn
& (1 << 21)) {
8818 gen_helper_udiv(tmp
, tmp
, tmp2
);
8820 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8822 tcg_temp_free_i32(tmp2
);
8823 store_reg(s
, rn
, tmp
);
8830 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
8832 case 0: /* Unsigned sum of absolute differences. */
8834 tmp
= load_reg(s
, rm
);
8835 tmp2
= load_reg(s
, rs
);
8836 gen_helper_usad8(tmp
, tmp
, tmp2
);
8837 tcg_temp_free_i32(tmp2
);
8839 tmp2
= load_reg(s
, rd
);
8840 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8841 tcg_temp_free_i32(tmp2
);
8843 store_reg(s
, rn
, tmp
);
8845 case 0x20: case 0x24: case 0x28: case 0x2c:
8846 /* Bitfield insert/clear. */
8848 shift
= (insn
>> 7) & 0x1f;
8849 i
= (insn
>> 16) & 0x1f;
8851 /* UNPREDICTABLE; we choose to UNDEF */
8856 tmp
= tcg_temp_new_i32();
8857 tcg_gen_movi_i32(tmp
, 0);
8859 tmp
= load_reg(s
, rm
);
8862 tmp2
= load_reg(s
, rd
);
8863 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, i
);
8864 tcg_temp_free_i32(tmp2
);
8866 store_reg(s
, rd
, tmp
);
8868 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8869 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
8871 tmp
= load_reg(s
, rm
);
8872 shift
= (insn
>> 7) & 0x1f;
8873 i
= ((insn
>> 16) & 0x1f) + 1;
8878 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
8880 gen_sbfx(tmp
, shift
, i
);
8883 store_reg(s
, rd
, tmp
);
8893 /* Check for undefined extension instructions
8894 * per the ARM Bible IE:
8895 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8897 sh
= (0xf << 20) | (0xf << 4);
8898 if (op1
== 0x7 && ((insn
& sh
) == sh
))
8902 /* load/store byte/word */
8903 rn
= (insn
>> 16) & 0xf;
8904 rd
= (insn
>> 12) & 0xf;
8905 tmp2
= load_reg(s
, rn
);
8906 if ((insn
& 0x01200000) == 0x00200000) {
8908 i
= get_a32_user_mem_index(s
);
8910 i
= get_mem_index(s
);
8912 if (insn
& (1 << 24))
8913 gen_add_data_offset(s
, insn
, tmp2
);
8914 if (insn
& (1 << 20)) {
8916 tmp
= tcg_temp_new_i32();
8917 if (insn
& (1 << 22)) {
8918 gen_aa32_ld8u(tmp
, tmp2
, i
);
8920 gen_aa32_ld32u(tmp
, tmp2
, i
);
8924 tmp
= load_reg(s
, rd
);
8925 if (insn
& (1 << 22)) {
8926 gen_aa32_st8(tmp
, tmp2
, i
);
8928 gen_aa32_st32(tmp
, tmp2
, i
);
8930 tcg_temp_free_i32(tmp
);
8932 if (!(insn
& (1 << 24))) {
8933 gen_add_data_offset(s
, insn
, tmp2
);
8934 store_reg(s
, rn
, tmp2
);
8935 } else if (insn
& (1 << 21)) {
8936 store_reg(s
, rn
, tmp2
);
8938 tcg_temp_free_i32(tmp2
);
8940 if (insn
& (1 << 20)) {
8941 /* Complete the load. */
8942 store_reg_from_load(s
, rd
, tmp
);
8948 int j
, n
, loaded_base
;
8949 bool exc_return
= false;
8950 bool is_load
= extract32(insn
, 20, 1);
8952 TCGv_i32 loaded_var
;
8953 /* load/store multiple words */
8954 /* XXX: store correct base if write back */
8955 if (insn
& (1 << 22)) {
8956 /* LDM (user), LDM (exception return) and STM (user) */
8958 goto illegal_op
; /* only usable in supervisor mode */
8960 if (is_load
&& extract32(insn
, 15, 1)) {
8966 rn
= (insn
>> 16) & 0xf;
8967 addr
= load_reg(s
, rn
);
8969 /* compute total size */
8971 TCGV_UNUSED_I32(loaded_var
);
8974 if (insn
& (1 << i
))
8977 /* XXX: test invalid n == 0 case ? */
8978 if (insn
& (1 << 23)) {
8979 if (insn
& (1 << 24)) {
8981 tcg_gen_addi_i32(addr
, addr
, 4);
8983 /* post increment */
8986 if (insn
& (1 << 24)) {
8988 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
8990 /* post decrement */
8992 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
8997 if (insn
& (1 << i
)) {
9000 tmp
= tcg_temp_new_i32();
9001 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9003 tmp2
= tcg_const_i32(i
);
9004 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
9005 tcg_temp_free_i32(tmp2
);
9006 tcg_temp_free_i32(tmp
);
9007 } else if (i
== rn
) {
9011 store_reg_from_load(s
, i
, tmp
);
9016 /* special case: r15 = PC + 8 */
9017 val
= (long)s
->pc
+ 4;
9018 tmp
= tcg_temp_new_i32();
9019 tcg_gen_movi_i32(tmp
, val
);
9021 tmp
= tcg_temp_new_i32();
9022 tmp2
= tcg_const_i32(i
);
9023 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
9024 tcg_temp_free_i32(tmp2
);
9026 tmp
= load_reg(s
, i
);
9028 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
9029 tcg_temp_free_i32(tmp
);
9032 /* no need to add after the last transfer */
9034 tcg_gen_addi_i32(addr
, addr
, 4);
9037 if (insn
& (1 << 21)) {
9039 if (insn
& (1 << 23)) {
9040 if (insn
& (1 << 24)) {
9043 /* post increment */
9044 tcg_gen_addi_i32(addr
, addr
, 4);
9047 if (insn
& (1 << 24)) {
9050 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
9052 /* post decrement */
9053 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
9056 store_reg(s
, rn
, addr
);
9058 tcg_temp_free_i32(addr
);
9061 store_reg(s
, rn
, loaded_var
);
9064 /* Restore CPSR from SPSR. */
9065 tmp
= load_cpu_field(spsr
);
9066 gen_set_cpsr(tmp
, CPSR_ERET_MASK
);
9067 tcg_temp_free_i32(tmp
);
9068 s
->is_jmp
= DISAS_JUMP
;
9077 /* branch (and link) */
9078 val
= (int32_t)s
->pc
;
9079 if (insn
& (1 << 24)) {
9080 tmp
= tcg_temp_new_i32();
9081 tcg_gen_movi_i32(tmp
, val
);
9082 store_reg(s
, 14, tmp
);
9084 offset
= sextract32(insn
<< 2, 0, 26);
9092 if (((insn
>> 8) & 0xe) == 10) {
9094 if (disas_vfp_insn(s
, insn
)) {
9097 } else if (disas_coproc_insn(s
, insn
)) {
9104 gen_set_pc_im(s
, s
->pc
);
9105 s
->svc_imm
= extract32(insn
, 0, 24);
9106 s
->is_jmp
= DISAS_SWI
;
9110 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
9111 default_exception_el(s
));
9117 /* Return true if this is a Thumb-2 logical op. */
9119 thumb2_logic_op(int op
)
9124 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9125 then set condition code flags based on the result of the operation.
9126 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9127 to the high bit of T1.
9128 Returns zero if the opcode is valid. */
9131 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
,
9132 TCGv_i32 t0
, TCGv_i32 t1
)
9139 tcg_gen_and_i32(t0
, t0
, t1
);
9143 tcg_gen_andc_i32(t0
, t0
, t1
);
9147 tcg_gen_or_i32(t0
, t0
, t1
);
9151 tcg_gen_orc_i32(t0
, t0
, t1
);
9155 tcg_gen_xor_i32(t0
, t0
, t1
);
9160 gen_add_CC(t0
, t0
, t1
);
9162 tcg_gen_add_i32(t0
, t0
, t1
);
9166 gen_adc_CC(t0
, t0
, t1
);
9172 gen_sbc_CC(t0
, t0
, t1
);
9174 gen_sub_carry(t0
, t0
, t1
);
9179 gen_sub_CC(t0
, t0
, t1
);
9181 tcg_gen_sub_i32(t0
, t0
, t1
);
9185 gen_sub_CC(t0
, t1
, t0
);
9187 tcg_gen_sub_i32(t0
, t1
, t0
);
9189 default: /* 5, 6, 7, 9, 12, 15. */
9195 gen_set_CF_bit31(t1
);
9200 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9202 static int disas_thumb2_insn(CPUARMState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
9204 uint32_t insn
, imm
, shift
, offset
;
9205 uint32_t rd
, rn
, rm
, rs
;
9216 if (!(arm_dc_feature(s
, ARM_FEATURE_THUMB2
)
9217 || arm_dc_feature(s
, ARM_FEATURE_M
))) {
9218 /* Thumb-1 cores may need to treat bl and blx as a pair of
9219 16-bit instructions to get correct prefetch abort behavior. */
9221 if ((insn
& (1 << 12)) == 0) {
9223 /* Second half of blx. */
9224 offset
= ((insn
& 0x7ff) << 1);
9225 tmp
= load_reg(s
, 14);
9226 tcg_gen_addi_i32(tmp
, tmp
, offset
);
9227 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
9229 tmp2
= tcg_temp_new_i32();
9230 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
9231 store_reg(s
, 14, tmp2
);
9235 if (insn
& (1 << 11)) {
9236 /* Second half of bl. */
9237 offset
= ((insn
& 0x7ff) << 1) | 1;
9238 tmp
= load_reg(s
, 14);
9239 tcg_gen_addi_i32(tmp
, tmp
, offset
);
9241 tmp2
= tcg_temp_new_i32();
9242 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
9243 store_reg(s
, 14, tmp2
);
9247 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
9248 /* Instruction spans a page boundary. Implement it as two
9249 16-bit instructions in case the second half causes an
9251 offset
= ((int32_t)insn
<< 21) >> 9;
9252 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
9255 /* Fall through to 32-bit decode. */
9258 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
9260 insn
|= (uint32_t)insn_hw1
<< 16;
9262 if ((insn
& 0xf800e800) != 0xf000e800) {
9266 rn
= (insn
>> 16) & 0xf;
9267 rs
= (insn
>> 12) & 0xf;
9268 rd
= (insn
>> 8) & 0xf;
9270 switch ((insn
>> 25) & 0xf) {
9271 case 0: case 1: case 2: case 3:
9272 /* 16-bit instructions. Should never happen. */
9275 if (insn
& (1 << 22)) {
9276 /* Other load/store, table branch. */
9277 if (insn
& 0x01200000) {
9278 /* Load/store doubleword. */
9280 addr
= tcg_temp_new_i32();
9281 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
9283 addr
= load_reg(s
, rn
);
9285 offset
= (insn
& 0xff) * 4;
9286 if ((insn
& (1 << 23)) == 0)
9288 if (insn
& (1 << 24)) {
9289 tcg_gen_addi_i32(addr
, addr
, offset
);
9292 if (insn
& (1 << 20)) {
9294 tmp
= tcg_temp_new_i32();
9295 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9296 store_reg(s
, rs
, tmp
);
9297 tcg_gen_addi_i32(addr
, addr
, 4);
9298 tmp
= tcg_temp_new_i32();
9299 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9300 store_reg(s
, rd
, tmp
);
9303 tmp
= load_reg(s
, rs
);
9304 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
9305 tcg_temp_free_i32(tmp
);
9306 tcg_gen_addi_i32(addr
, addr
, 4);
9307 tmp
= load_reg(s
, rd
);
9308 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
9309 tcg_temp_free_i32(tmp
);
9311 if (insn
& (1 << 21)) {
9312 /* Base writeback. */
9315 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
9316 store_reg(s
, rn
, addr
);
9318 tcg_temp_free_i32(addr
);
9320 } else if ((insn
& (1 << 23)) == 0) {
9321 /* Load/store exclusive word. */
9322 addr
= tcg_temp_local_new_i32();
9323 load_reg_var(s
, addr
, rn
);
9324 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
9325 if (insn
& (1 << 20)) {
9326 gen_load_exclusive(s
, rs
, 15, addr
, 2);
9328 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
9330 tcg_temp_free_i32(addr
);
9331 } else if ((insn
& (7 << 5)) == 0) {
9334 addr
= tcg_temp_new_i32();
9335 tcg_gen_movi_i32(addr
, s
->pc
);
9337 addr
= load_reg(s
, rn
);
9339 tmp
= load_reg(s
, rm
);
9340 tcg_gen_add_i32(addr
, addr
, tmp
);
9341 if (insn
& (1 << 4)) {
9343 tcg_gen_add_i32(addr
, addr
, tmp
);
9344 tcg_temp_free_i32(tmp
);
9345 tmp
= tcg_temp_new_i32();
9346 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
9348 tcg_temp_free_i32(tmp
);
9349 tmp
= tcg_temp_new_i32();
9350 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
9352 tcg_temp_free_i32(addr
);
9353 tcg_gen_shli_i32(tmp
, tmp
, 1);
9354 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
9355 store_reg(s
, 15, tmp
);
9357 int op2
= (insn
>> 6) & 0x3;
9358 op
= (insn
>> 4) & 0x3;
9363 /* Load/store exclusive byte/halfword/doubleword */
9370 /* Load-acquire/store-release */
9376 /* Load-acquire/store-release exclusive */
9380 addr
= tcg_temp_local_new_i32();
9381 load_reg_var(s
, addr
, rn
);
9383 if (insn
& (1 << 20)) {
9384 tmp
= tcg_temp_new_i32();
9387 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
9390 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
9393 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9398 store_reg(s
, rs
, tmp
);
9400 tmp
= load_reg(s
, rs
);
9403 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
9406 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
9409 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
9414 tcg_temp_free_i32(tmp
);
9416 } else if (insn
& (1 << 20)) {
9417 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
9419 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
9421 tcg_temp_free_i32(addr
);
9424 /* Load/store multiple, RFE, SRS. */
9425 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
9426 /* RFE, SRS: not available in user mode or on M profile */
9427 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
9430 if (insn
& (1 << 20)) {
9432 addr
= load_reg(s
, rn
);
9433 if ((insn
& (1 << 24)) == 0)
9434 tcg_gen_addi_i32(addr
, addr
, -8);
9435 /* Load PC into tmp and CPSR into tmp2. */
9436 tmp
= tcg_temp_new_i32();
9437 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9438 tcg_gen_addi_i32(addr
, addr
, 4);
9439 tmp2
= tcg_temp_new_i32();
9440 gen_aa32_ld32u(tmp2
, addr
, get_mem_index(s
));
9441 if (insn
& (1 << 21)) {
9442 /* Base writeback. */
9443 if (insn
& (1 << 24)) {
9444 tcg_gen_addi_i32(addr
, addr
, 4);
9446 tcg_gen_addi_i32(addr
, addr
, -4);
9448 store_reg(s
, rn
, addr
);
9450 tcg_temp_free_i32(addr
);
9452 gen_rfe(s
, tmp
, tmp2
);
9455 gen_srs(s
, (insn
& 0x1f), (insn
& (1 << 24)) ? 1 : 2,
9459 int i
, loaded_base
= 0;
9460 TCGv_i32 loaded_var
;
9461 /* Load/store multiple. */
9462 addr
= load_reg(s
, rn
);
9464 for (i
= 0; i
< 16; i
++) {
9465 if (insn
& (1 << i
))
9468 if (insn
& (1 << 24)) {
9469 tcg_gen_addi_i32(addr
, addr
, -offset
);
9472 TCGV_UNUSED_I32(loaded_var
);
9473 for (i
= 0; i
< 16; i
++) {
9474 if ((insn
& (1 << i
)) == 0)
9476 if (insn
& (1 << 20)) {
9478 tmp
= tcg_temp_new_i32();
9479 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9482 } else if (i
== rn
) {
9486 store_reg(s
, i
, tmp
);
9490 tmp
= load_reg(s
, i
);
9491 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
9492 tcg_temp_free_i32(tmp
);
9494 tcg_gen_addi_i32(addr
, addr
, 4);
9497 store_reg(s
, rn
, loaded_var
);
9499 if (insn
& (1 << 21)) {
9500 /* Base register writeback. */
9501 if (insn
& (1 << 24)) {
9502 tcg_gen_addi_i32(addr
, addr
, -offset
);
9504 /* Fault if writeback register is in register list. */
9505 if (insn
& (1 << rn
))
9507 store_reg(s
, rn
, addr
);
9509 tcg_temp_free_i32(addr
);
9516 op
= (insn
>> 21) & 0xf;
9518 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9521 /* Halfword pack. */
9522 tmp
= load_reg(s
, rn
);
9523 tmp2
= load_reg(s
, rm
);
9524 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
9525 if (insn
& (1 << 5)) {
9529 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
9530 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
9531 tcg_gen_ext16u_i32(tmp2
, tmp2
);
9535 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
9536 tcg_gen_ext16u_i32(tmp
, tmp
);
9537 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
9539 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9540 tcg_temp_free_i32(tmp2
);
9541 store_reg(s
, rd
, tmp
);
9543 /* Data processing register constant shift. */
9545 tmp
= tcg_temp_new_i32();
9546 tcg_gen_movi_i32(tmp
, 0);
9548 tmp
= load_reg(s
, rn
);
9550 tmp2
= load_reg(s
, rm
);
9552 shiftop
= (insn
>> 4) & 3;
9553 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
9554 conds
= (insn
& (1 << 20)) != 0;
9555 logic_cc
= (conds
&& thumb2_logic_op(op
));
9556 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
9557 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
9559 tcg_temp_free_i32(tmp2
);
9561 store_reg(s
, rd
, tmp
);
9563 tcg_temp_free_i32(tmp
);
9567 case 13: /* Misc data processing. */
9568 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
9569 if (op
< 4 && (insn
& 0xf000) != 0xf000)
9572 case 0: /* Register controlled shift. */
9573 tmp
= load_reg(s
, rn
);
9574 tmp2
= load_reg(s
, rm
);
9575 if ((insn
& 0x70) != 0)
9577 op
= (insn
>> 21) & 3;
9578 logic_cc
= (insn
& (1 << 20)) != 0;
9579 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
9582 store_reg_bx(s
, rd
, tmp
);
9584 case 1: /* Sign/zero extend. */
9585 op
= (insn
>> 20) & 7;
9587 case 0: /* SXTAH, SXTH */
9588 case 1: /* UXTAH, UXTH */
9589 case 4: /* SXTAB, SXTB */
9590 case 5: /* UXTAB, UXTB */
9592 case 2: /* SXTAB16, SXTB16 */
9593 case 3: /* UXTAB16, UXTB16 */
9594 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9602 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9606 tmp
= load_reg(s
, rm
);
9607 shift
= (insn
>> 4) & 3;
9608 /* ??? In many cases it's not necessary to do a
9609 rotate, a shift is sufficient. */
9611 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
9612 op
= (insn
>> 20) & 7;
9614 case 0: gen_sxth(tmp
); break;
9615 case 1: gen_uxth(tmp
); break;
9616 case 2: gen_sxtb16(tmp
); break;
9617 case 3: gen_uxtb16(tmp
); break;
9618 case 4: gen_sxtb(tmp
); break;
9619 case 5: gen_uxtb(tmp
); break;
9621 g_assert_not_reached();
9624 tmp2
= load_reg(s
, rn
);
9625 if ((op
>> 1) == 1) {
9626 gen_add16(tmp
, tmp2
);
9628 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9629 tcg_temp_free_i32(tmp2
);
9632 store_reg(s
, rd
, tmp
);
9634 case 2: /* SIMD add/subtract. */
9635 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9638 op
= (insn
>> 20) & 7;
9639 shift
= (insn
>> 4) & 7;
9640 if ((op
& 3) == 3 || (shift
& 3) == 3)
9642 tmp
= load_reg(s
, rn
);
9643 tmp2
= load_reg(s
, rm
);
9644 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
9645 tcg_temp_free_i32(tmp2
);
9646 store_reg(s
, rd
, tmp
);
9648 case 3: /* Other data processing. */
9649 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
9651 /* Saturating add/subtract. */
9652 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9655 tmp
= load_reg(s
, rn
);
9656 tmp2
= load_reg(s
, rm
);
9658 gen_helper_double_saturate(tmp
, cpu_env
, tmp
);
9660 gen_helper_sub_saturate(tmp
, cpu_env
, tmp2
, tmp
);
9662 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
9663 tcg_temp_free_i32(tmp2
);
9666 case 0x0a: /* rbit */
9667 case 0x08: /* rev */
9668 case 0x09: /* rev16 */
9669 case 0x0b: /* revsh */
9670 case 0x18: /* clz */
9672 case 0x10: /* sel */
9673 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9677 case 0x20: /* crc32/crc32c */
9683 if (!arm_dc_feature(s
, ARM_FEATURE_CRC
)) {
9690 tmp
= load_reg(s
, rn
);
9692 case 0x0a: /* rbit */
9693 gen_helper_rbit(tmp
, tmp
);
9695 case 0x08: /* rev */
9696 tcg_gen_bswap32_i32(tmp
, tmp
);
9698 case 0x09: /* rev16 */
9701 case 0x0b: /* revsh */
9704 case 0x10: /* sel */
9705 tmp2
= load_reg(s
, rm
);
9706 tmp3
= tcg_temp_new_i32();
9707 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
9708 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
9709 tcg_temp_free_i32(tmp3
);
9710 tcg_temp_free_i32(tmp2
);
9712 case 0x18: /* clz */
9713 gen_helper_clz(tmp
, tmp
);
9723 uint32_t sz
= op
& 0x3;
9724 uint32_t c
= op
& 0x8;
9726 tmp2
= load_reg(s
, rm
);
9728 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
9729 } else if (sz
== 1) {
9730 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
9732 tmp3
= tcg_const_i32(1 << sz
);
9734 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
9736 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
9738 tcg_temp_free_i32(tmp2
);
9739 tcg_temp_free_i32(tmp3
);
9743 g_assert_not_reached();
9746 store_reg(s
, rd
, tmp
);
9748 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9749 switch ((insn
>> 20) & 7) {
9750 case 0: /* 32 x 32 -> 32 */
9751 case 7: /* Unsigned sum of absolute differences. */
9753 case 1: /* 16 x 16 -> 32 */
9754 case 2: /* Dual multiply add. */
9755 case 3: /* 32 * 16 -> 32msb */
9756 case 4: /* Dual multiply subtract. */
9757 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9758 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9763 op
= (insn
>> 4) & 0xf;
9764 tmp
= load_reg(s
, rn
);
9765 tmp2
= load_reg(s
, rm
);
9766 switch ((insn
>> 20) & 7) {
9767 case 0: /* 32 x 32 -> 32 */
9768 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9769 tcg_temp_free_i32(tmp2
);
9771 tmp2
= load_reg(s
, rs
);
9773 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
9775 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9776 tcg_temp_free_i32(tmp2
);
9779 case 1: /* 16 x 16 -> 32 */
9780 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
9781 tcg_temp_free_i32(tmp2
);
9783 tmp2
= load_reg(s
, rs
);
9784 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9785 tcg_temp_free_i32(tmp2
);
9788 case 2: /* Dual multiply add. */
9789 case 4: /* Dual multiply subtract. */
9791 gen_swap_half(tmp2
);
9792 gen_smul_dual(tmp
, tmp2
);
9793 if (insn
& (1 << 22)) {
9794 /* This subtraction cannot overflow. */
9795 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9797 /* This addition cannot overflow 32 bits;
9798 * however it may overflow considered as a signed
9799 * operation, in which case we must set the Q flag.
9801 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9803 tcg_temp_free_i32(tmp2
);
9806 tmp2
= load_reg(s
, rs
);
9807 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9808 tcg_temp_free_i32(tmp2
);
9811 case 3: /* 32 * 16 -> 32msb */
9813 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
9816 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9817 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
9818 tmp
= tcg_temp_new_i32();
9819 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
9820 tcg_temp_free_i64(tmp64
);
9823 tmp2
= load_reg(s
, rs
);
9824 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9825 tcg_temp_free_i32(tmp2
);
9828 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9829 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9831 tmp
= load_reg(s
, rs
);
9832 if (insn
& (1 << 20)) {
9833 tmp64
= gen_addq_msw(tmp64
, tmp
);
9835 tmp64
= gen_subq_msw(tmp64
, tmp
);
9838 if (insn
& (1 << 4)) {
9839 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
9841 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
9842 tmp
= tcg_temp_new_i32();
9843 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
9844 tcg_temp_free_i64(tmp64
);
9846 case 7: /* Unsigned sum of absolute differences. */
9847 gen_helper_usad8(tmp
, tmp
, tmp2
);
9848 tcg_temp_free_i32(tmp2
);
9850 tmp2
= load_reg(s
, rs
);
9851 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9852 tcg_temp_free_i32(tmp2
);
9856 store_reg(s
, rd
, tmp
);
9858 case 6: case 7: /* 64-bit multiply, Divide. */
9859 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
9860 tmp
= load_reg(s
, rn
);
9861 tmp2
= load_reg(s
, rm
);
9862 if ((op
& 0x50) == 0x10) {
9864 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DIV
)) {
9868 gen_helper_udiv(tmp
, tmp
, tmp2
);
9870 gen_helper_sdiv(tmp
, tmp
, tmp2
);
9871 tcg_temp_free_i32(tmp2
);
9872 store_reg(s
, rd
, tmp
);
9873 } else if ((op
& 0xe) == 0xc) {
9874 /* Dual multiply accumulate long. */
9875 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9876 tcg_temp_free_i32(tmp
);
9877 tcg_temp_free_i32(tmp2
);
9881 gen_swap_half(tmp2
);
9882 gen_smul_dual(tmp
, tmp2
);
9884 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9886 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9888 tcg_temp_free_i32(tmp2
);
9890 tmp64
= tcg_temp_new_i64();
9891 tcg_gen_ext_i32_i64(tmp64
, tmp
);
9892 tcg_temp_free_i32(tmp
);
9893 gen_addq(s
, tmp64
, rs
, rd
);
9894 gen_storeq_reg(s
, rs
, rd
, tmp64
);
9895 tcg_temp_free_i64(tmp64
);
9898 /* Unsigned 64-bit multiply */
9899 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
9903 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9904 tcg_temp_free_i32(tmp2
);
9905 tcg_temp_free_i32(tmp
);
9908 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
9909 tcg_temp_free_i32(tmp2
);
9910 tmp64
= tcg_temp_new_i64();
9911 tcg_gen_ext_i32_i64(tmp64
, tmp
);
9912 tcg_temp_free_i32(tmp
);
9914 /* Signed 64-bit multiply */
9915 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9920 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9921 tcg_temp_free_i64(tmp64
);
9924 gen_addq_lo(s
, tmp64
, rs
);
9925 gen_addq_lo(s
, tmp64
, rd
);
9926 } else if (op
& 0x40) {
9927 /* 64-bit accumulate. */
9928 gen_addq(s
, tmp64
, rs
, rd
);
9930 gen_storeq_reg(s
, rs
, rd
, tmp64
);
9931 tcg_temp_free_i64(tmp64
);
9936 case 6: case 7: case 14: case 15:
9938 if (((insn
>> 24) & 3) == 3) {
9939 /* Translate into the equivalent ARM encoding. */
9940 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
9941 if (disas_neon_data_insn(s
, insn
)) {
9944 } else if (((insn
>> 8) & 0xe) == 10) {
9945 if (disas_vfp_insn(s
, insn
)) {
9949 if (insn
& (1 << 28))
9951 if (disas_coproc_insn(s
, insn
)) {
9956 case 8: case 9: case 10: case 11:
9957 if (insn
& (1 << 15)) {
9958 /* Branches, misc control. */
9959 if (insn
& 0x5000) {
9960 /* Unconditional branch. */
9961 /* signextend(hw1[10:0]) -> offset[:12]. */
9962 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
9963 /* hw1[10:0] -> offset[11:1]. */
9964 offset
|= (insn
& 0x7ff) << 1;
9965 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9966 offset[24:22] already have the same value because of the
9967 sign extension above. */
9968 offset
^= ((~insn
) & (1 << 13)) << 10;
9969 offset
^= ((~insn
) & (1 << 11)) << 11;
9971 if (insn
& (1 << 14)) {
9972 /* Branch and link. */
9973 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
9977 if (insn
& (1 << 12)) {
9982 offset
&= ~(uint32_t)2;
9983 /* thumb2 bx, no need to check */
9984 gen_bx_im(s
, offset
);
9986 } else if (((insn
>> 23) & 7) == 7) {
9988 if (insn
& (1 << 13))
9991 if (insn
& (1 << 26)) {
9992 if (!(insn
& (1 << 20))) {
9993 /* Hypervisor call (v7) */
9994 int imm16
= extract32(insn
, 16, 4) << 12
9995 | extract32(insn
, 0, 12);
10002 /* Secure monitor call (v6+) */
10010 op
= (insn
>> 20) & 7;
10012 case 0: /* msr cpsr. */
10013 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10014 tmp
= load_reg(s
, rn
);
10015 addr
= tcg_const_i32(insn
& 0xff);
10016 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
10017 tcg_temp_free_i32(addr
);
10018 tcg_temp_free_i32(tmp
);
10023 case 1: /* msr spsr. */
10024 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10027 tmp
= load_reg(s
, rn
);
10029 msr_mask(s
, (insn
>> 8) & 0xf, op
== 1),
10033 case 2: /* cps, nop-hint. */
10034 if (((insn
>> 8) & 7) == 0) {
10035 gen_nop_hint(s
, insn
& 0xff);
10037 /* Implemented as NOP in user mode. */
10042 if (insn
& (1 << 10)) {
10043 if (insn
& (1 << 7))
10045 if (insn
& (1 << 6))
10047 if (insn
& (1 << 5))
10049 if (insn
& (1 << 9))
10050 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
10052 if (insn
& (1 << 8)) {
10054 imm
|= (insn
& 0x1f);
10057 gen_set_psr_im(s
, offset
, 0, imm
);
10060 case 3: /* Special control operations. */
10062 op
= (insn
>> 4) & 0xf;
10064 case 2: /* clrex */
10069 /* These execute as NOPs. */
10072 /* We need to break the TB after this insn
10073 * to execute self-modifying code correctly
10074 * and also to take any pending interrupts
10084 /* Trivial implementation equivalent to bx. */
10085 tmp
= load_reg(s
, rn
);
10088 case 5: /* Exception return. */
10092 if (rn
!= 14 || rd
!= 15) {
10095 tmp
= load_reg(s
, rn
);
10096 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
10097 gen_exception_return(s
, tmp
);
10099 case 6: /* mrs cpsr. */
10100 tmp
= tcg_temp_new_i32();
10101 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10102 addr
= tcg_const_i32(insn
& 0xff);
10103 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
10104 tcg_temp_free_i32(addr
);
10106 gen_helper_cpsr_read(tmp
, cpu_env
);
10108 store_reg(s
, rd
, tmp
);
10110 case 7: /* mrs spsr. */
10111 /* Not accessible in user mode. */
10112 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
10115 tmp
= load_cpu_field(spsr
);
10116 store_reg(s
, rd
, tmp
);
10121 /* Conditional branch. */
10122 op
= (insn
>> 22) & 0xf;
10123 /* Generate a conditional jump to next instruction. */
10124 s
->condlabel
= gen_new_label();
10125 arm_gen_test_cc(op
^ 1, s
->condlabel
);
10128 /* offset[11:1] = insn[10:0] */
10129 offset
= (insn
& 0x7ff) << 1;
10130 /* offset[17:12] = insn[21:16]. */
10131 offset
|= (insn
& 0x003f0000) >> 4;
10132 /* offset[31:20] = insn[26]. */
10133 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
10134 /* offset[18] = insn[13]. */
10135 offset
|= (insn
& (1 << 13)) << 5;
10136 /* offset[19] = insn[11]. */
10137 offset
|= (insn
& (1 << 11)) << 8;
10139 /* jump to the offset */
10140 gen_jmp(s
, s
->pc
+ offset
);
10143 /* Data processing immediate. */
10144 if (insn
& (1 << 25)) {
10145 if (insn
& (1 << 24)) {
10146 if (insn
& (1 << 20))
10148 /* Bitfield/Saturate. */
10149 op
= (insn
>> 21) & 7;
10151 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
10153 tmp
= tcg_temp_new_i32();
10154 tcg_gen_movi_i32(tmp
, 0);
10156 tmp
= load_reg(s
, rn
);
10159 case 2: /* Signed bitfield extract. */
10161 if (shift
+ imm
> 32)
10164 gen_sbfx(tmp
, shift
, imm
);
10166 case 6: /* Unsigned bitfield extract. */
10168 if (shift
+ imm
> 32)
10171 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
10173 case 3: /* Bitfield insert/clear. */
10176 imm
= imm
+ 1 - shift
;
10178 tmp2
= load_reg(s
, rd
);
10179 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, imm
);
10180 tcg_temp_free_i32(tmp2
);
10185 default: /* Saturate. */
10188 tcg_gen_sari_i32(tmp
, tmp
, shift
);
10190 tcg_gen_shli_i32(tmp
, tmp
, shift
);
10192 tmp2
= tcg_const_i32(imm
);
10195 if ((op
& 1) && shift
== 0) {
10196 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10197 tcg_temp_free_i32(tmp
);
10198 tcg_temp_free_i32(tmp2
);
10201 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
10203 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
10207 if ((op
& 1) && shift
== 0) {
10208 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10209 tcg_temp_free_i32(tmp
);
10210 tcg_temp_free_i32(tmp2
);
10213 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
10215 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
10218 tcg_temp_free_i32(tmp2
);
10221 store_reg(s
, rd
, tmp
);
10223 imm
= ((insn
& 0x04000000) >> 15)
10224 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
10225 if (insn
& (1 << 22)) {
10226 /* 16-bit immediate. */
10227 imm
|= (insn
>> 4) & 0xf000;
10228 if (insn
& (1 << 23)) {
10230 tmp
= load_reg(s
, rd
);
10231 tcg_gen_ext16u_i32(tmp
, tmp
);
10232 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
10235 tmp
= tcg_temp_new_i32();
10236 tcg_gen_movi_i32(tmp
, imm
);
10239 /* Add/sub 12-bit immediate. */
10241 offset
= s
->pc
& ~(uint32_t)3;
10242 if (insn
& (1 << 23))
10246 tmp
= tcg_temp_new_i32();
10247 tcg_gen_movi_i32(tmp
, offset
);
10249 tmp
= load_reg(s
, rn
);
10250 if (insn
& (1 << 23))
10251 tcg_gen_subi_i32(tmp
, tmp
, imm
);
10253 tcg_gen_addi_i32(tmp
, tmp
, imm
);
10256 store_reg(s
, rd
, tmp
);
10259 int shifter_out
= 0;
10260 /* modified 12-bit immediate. */
10261 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
10262 imm
= (insn
& 0xff);
10265 /* Nothing to do. */
10267 case 1: /* 00XY00XY */
10270 case 2: /* XY00XY00 */
10274 case 3: /* XYXYXYXY */
10278 default: /* Rotated constant. */
10279 shift
= (shift
<< 1) | (imm
>> 7);
10281 imm
= imm
<< (32 - shift
);
10285 tmp2
= tcg_temp_new_i32();
10286 tcg_gen_movi_i32(tmp2
, imm
);
10287 rn
= (insn
>> 16) & 0xf;
10289 tmp
= tcg_temp_new_i32();
10290 tcg_gen_movi_i32(tmp
, 0);
10292 tmp
= load_reg(s
, rn
);
10294 op
= (insn
>> 21) & 0xf;
10295 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
10296 shifter_out
, tmp
, tmp2
))
10298 tcg_temp_free_i32(tmp2
);
10299 rd
= (insn
>> 8) & 0xf;
10301 store_reg(s
, rd
, tmp
);
10303 tcg_temp_free_i32(tmp
);
10308 case 12: /* Load/store single data item. */
10313 if ((insn
& 0x01100000) == 0x01000000) {
10314 if (disas_neon_ls_insn(s
, insn
)) {
10319 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
10321 if (!(insn
& (1 << 20))) {
10325 /* Byte or halfword load space with dest == r15 : memory hints.
10326 * Catch them early so we don't emit pointless addressing code.
10327 * This space is a mix of:
10328 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10329 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10331 * unallocated hints, which must be treated as NOPs
10332 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10333 * which is easiest for the decoding logic
10334 * Some space which must UNDEF
10336 int op1
= (insn
>> 23) & 3;
10337 int op2
= (insn
>> 6) & 0x3f;
10342 /* UNPREDICTABLE, unallocated hint or
10343 * PLD/PLDW/PLI (literal)
10348 return 0; /* PLD/PLDW/PLI or unallocated hint */
10350 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
10351 return 0; /* PLD/PLDW/PLI or unallocated hint */
10353 /* UNDEF space, or an UNPREDICTABLE */
10357 memidx
= get_mem_index(s
);
10359 addr
= tcg_temp_new_i32();
10361 /* s->pc has already been incremented by 4. */
10362 imm
= s
->pc
& 0xfffffffc;
10363 if (insn
& (1 << 23))
10364 imm
+= insn
& 0xfff;
10366 imm
-= insn
& 0xfff;
10367 tcg_gen_movi_i32(addr
, imm
);
10369 addr
= load_reg(s
, rn
);
10370 if (insn
& (1 << 23)) {
10371 /* Positive offset. */
10372 imm
= insn
& 0xfff;
10373 tcg_gen_addi_i32(addr
, addr
, imm
);
10376 switch ((insn
>> 8) & 0xf) {
10377 case 0x0: /* Shifted Register. */
10378 shift
= (insn
>> 4) & 0xf;
10380 tcg_temp_free_i32(addr
);
10383 tmp
= load_reg(s
, rm
);
10385 tcg_gen_shli_i32(tmp
, tmp
, shift
);
10386 tcg_gen_add_i32(addr
, addr
, tmp
);
10387 tcg_temp_free_i32(tmp
);
10389 case 0xc: /* Negative offset. */
10390 tcg_gen_addi_i32(addr
, addr
, -imm
);
10392 case 0xe: /* User privilege. */
10393 tcg_gen_addi_i32(addr
, addr
, imm
);
10394 memidx
= get_a32_user_mem_index(s
);
10396 case 0x9: /* Post-decrement. */
10398 /* Fall through. */
10399 case 0xb: /* Post-increment. */
10403 case 0xd: /* Pre-decrement. */
10405 /* Fall through. */
10406 case 0xf: /* Pre-increment. */
10407 tcg_gen_addi_i32(addr
, addr
, imm
);
10411 tcg_temp_free_i32(addr
);
10416 if (insn
& (1 << 20)) {
10418 tmp
= tcg_temp_new_i32();
10421 gen_aa32_ld8u(tmp
, addr
, memidx
);
10424 gen_aa32_ld8s(tmp
, addr
, memidx
);
10427 gen_aa32_ld16u(tmp
, addr
, memidx
);
10430 gen_aa32_ld16s(tmp
, addr
, memidx
);
10433 gen_aa32_ld32u(tmp
, addr
, memidx
);
10436 tcg_temp_free_i32(tmp
);
10437 tcg_temp_free_i32(addr
);
10443 store_reg(s
, rs
, tmp
);
10447 tmp
= load_reg(s
, rs
);
10450 gen_aa32_st8(tmp
, addr
, memidx
);
10453 gen_aa32_st16(tmp
, addr
, memidx
);
10456 gen_aa32_st32(tmp
, addr
, memidx
);
10459 tcg_temp_free_i32(tmp
);
10460 tcg_temp_free_i32(addr
);
10463 tcg_temp_free_i32(tmp
);
10466 tcg_gen_addi_i32(addr
, addr
, imm
);
10468 store_reg(s
, rn
, addr
);
10470 tcg_temp_free_i32(addr
);
10482 static void disas_thumb_insn(CPUARMState
*env
, DisasContext
*s
)
10484 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
10491 if (s
->condexec_mask
) {
10492 cond
= s
->condexec_cond
;
10493 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
10494 s
->condlabel
= gen_new_label();
10495 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
10500 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
10503 switch (insn
>> 12) {
10507 op
= (insn
>> 11) & 3;
10510 rn
= (insn
>> 3) & 7;
10511 tmp
= load_reg(s
, rn
);
10512 if (insn
& (1 << 10)) {
10514 tmp2
= tcg_temp_new_i32();
10515 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
10518 rm
= (insn
>> 6) & 7;
10519 tmp2
= load_reg(s
, rm
);
10521 if (insn
& (1 << 9)) {
10522 if (s
->condexec_mask
)
10523 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10525 gen_sub_CC(tmp
, tmp
, tmp2
);
10527 if (s
->condexec_mask
)
10528 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10530 gen_add_CC(tmp
, tmp
, tmp2
);
10532 tcg_temp_free_i32(tmp2
);
10533 store_reg(s
, rd
, tmp
);
10535 /* shift immediate */
10536 rm
= (insn
>> 3) & 7;
10537 shift
= (insn
>> 6) & 0x1f;
10538 tmp
= load_reg(s
, rm
);
10539 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
10540 if (!s
->condexec_mask
)
10542 store_reg(s
, rd
, tmp
);
10546 /* arithmetic large immediate */
10547 op
= (insn
>> 11) & 3;
10548 rd
= (insn
>> 8) & 0x7;
10549 if (op
== 0) { /* mov */
10550 tmp
= tcg_temp_new_i32();
10551 tcg_gen_movi_i32(tmp
, insn
& 0xff);
10552 if (!s
->condexec_mask
)
10554 store_reg(s
, rd
, tmp
);
10556 tmp
= load_reg(s
, rd
);
10557 tmp2
= tcg_temp_new_i32();
10558 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
10561 gen_sub_CC(tmp
, tmp
, tmp2
);
10562 tcg_temp_free_i32(tmp
);
10563 tcg_temp_free_i32(tmp2
);
10566 if (s
->condexec_mask
)
10567 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10569 gen_add_CC(tmp
, tmp
, tmp2
);
10570 tcg_temp_free_i32(tmp2
);
10571 store_reg(s
, rd
, tmp
);
10574 if (s
->condexec_mask
)
10575 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10577 gen_sub_CC(tmp
, tmp
, tmp2
);
10578 tcg_temp_free_i32(tmp2
);
10579 store_reg(s
, rd
, tmp
);
10585 if (insn
& (1 << 11)) {
10586 rd
= (insn
>> 8) & 7;
10587 /* load pc-relative. Bit 1 of PC is ignored. */
10588 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
10589 val
&= ~(uint32_t)2;
10590 addr
= tcg_temp_new_i32();
10591 tcg_gen_movi_i32(addr
, val
);
10592 tmp
= tcg_temp_new_i32();
10593 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10594 tcg_temp_free_i32(addr
);
10595 store_reg(s
, rd
, tmp
);
10598 if (insn
& (1 << 10)) {
10599 /* data processing extended or blx */
10600 rd
= (insn
& 7) | ((insn
>> 4) & 8);
10601 rm
= (insn
>> 3) & 0xf;
10602 op
= (insn
>> 8) & 3;
10605 tmp
= load_reg(s
, rd
);
10606 tmp2
= load_reg(s
, rm
);
10607 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10608 tcg_temp_free_i32(tmp2
);
10609 store_reg(s
, rd
, tmp
);
10612 tmp
= load_reg(s
, rd
);
10613 tmp2
= load_reg(s
, rm
);
10614 gen_sub_CC(tmp
, tmp
, tmp2
);
10615 tcg_temp_free_i32(tmp2
);
10616 tcg_temp_free_i32(tmp
);
10618 case 2: /* mov/cpy */
10619 tmp
= load_reg(s
, rm
);
10620 store_reg(s
, rd
, tmp
);
10622 case 3:/* branch [and link] exchange thumb register */
10623 tmp
= load_reg(s
, rm
);
10624 if (insn
& (1 << 7)) {
10626 val
= (uint32_t)s
->pc
| 1;
10627 tmp2
= tcg_temp_new_i32();
10628 tcg_gen_movi_i32(tmp2
, val
);
10629 store_reg(s
, 14, tmp2
);
10631 /* already thumb, no need to check */
10638 /* data processing register */
10640 rm
= (insn
>> 3) & 7;
10641 op
= (insn
>> 6) & 0xf;
10642 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
10643 /* the shift/rotate ops want the operands backwards */
10652 if (op
== 9) { /* neg */
10653 tmp
= tcg_temp_new_i32();
10654 tcg_gen_movi_i32(tmp
, 0);
10655 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
10656 tmp
= load_reg(s
, rd
);
10658 TCGV_UNUSED_I32(tmp
);
10661 tmp2
= load_reg(s
, rm
);
10663 case 0x0: /* and */
10664 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
10665 if (!s
->condexec_mask
)
10668 case 0x1: /* eor */
10669 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
10670 if (!s
->condexec_mask
)
10673 case 0x2: /* lsl */
10674 if (s
->condexec_mask
) {
10675 gen_shl(tmp2
, tmp2
, tmp
);
10677 gen_helper_shl_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10678 gen_logic_CC(tmp2
);
10681 case 0x3: /* lsr */
10682 if (s
->condexec_mask
) {
10683 gen_shr(tmp2
, tmp2
, tmp
);
10685 gen_helper_shr_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10686 gen_logic_CC(tmp2
);
10689 case 0x4: /* asr */
10690 if (s
->condexec_mask
) {
10691 gen_sar(tmp2
, tmp2
, tmp
);
10693 gen_helper_sar_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10694 gen_logic_CC(tmp2
);
10697 case 0x5: /* adc */
10698 if (s
->condexec_mask
) {
10699 gen_adc(tmp
, tmp2
);
10701 gen_adc_CC(tmp
, tmp
, tmp2
);
10704 case 0x6: /* sbc */
10705 if (s
->condexec_mask
) {
10706 gen_sub_carry(tmp
, tmp
, tmp2
);
10708 gen_sbc_CC(tmp
, tmp
, tmp2
);
10711 case 0x7: /* ror */
10712 if (s
->condexec_mask
) {
10713 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
10714 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
10716 gen_helper_ror_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10717 gen_logic_CC(tmp2
);
10720 case 0x8: /* tst */
10721 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
10725 case 0x9: /* neg */
10726 if (s
->condexec_mask
)
10727 tcg_gen_neg_i32(tmp
, tmp2
);
10729 gen_sub_CC(tmp
, tmp
, tmp2
);
10731 case 0xa: /* cmp */
10732 gen_sub_CC(tmp
, tmp
, tmp2
);
10735 case 0xb: /* cmn */
10736 gen_add_CC(tmp
, tmp
, tmp2
);
10739 case 0xc: /* orr */
10740 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
10741 if (!s
->condexec_mask
)
10744 case 0xd: /* mul */
10745 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
10746 if (!s
->condexec_mask
)
10749 case 0xe: /* bic */
10750 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
10751 if (!s
->condexec_mask
)
10754 case 0xf: /* mvn */
10755 tcg_gen_not_i32(tmp2
, tmp2
);
10756 if (!s
->condexec_mask
)
10757 gen_logic_CC(tmp2
);
10764 store_reg(s
, rm
, tmp2
);
10766 tcg_temp_free_i32(tmp
);
10768 store_reg(s
, rd
, tmp
);
10769 tcg_temp_free_i32(tmp2
);
10772 tcg_temp_free_i32(tmp
);
10773 tcg_temp_free_i32(tmp2
);
10778 /* load/store register offset. */
10780 rn
= (insn
>> 3) & 7;
10781 rm
= (insn
>> 6) & 7;
10782 op
= (insn
>> 9) & 7;
10783 addr
= load_reg(s
, rn
);
10784 tmp
= load_reg(s
, rm
);
10785 tcg_gen_add_i32(addr
, addr
, tmp
);
10786 tcg_temp_free_i32(tmp
);
10788 if (op
< 3) { /* store */
10789 tmp
= load_reg(s
, rd
);
10791 tmp
= tcg_temp_new_i32();
10796 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10799 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
10802 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
10804 case 3: /* ldrsb */
10805 gen_aa32_ld8s(tmp
, addr
, get_mem_index(s
));
10808 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10811 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
10814 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
10816 case 7: /* ldrsh */
10817 gen_aa32_ld16s(tmp
, addr
, get_mem_index(s
));
10820 if (op
>= 3) { /* load */
10821 store_reg(s
, rd
, tmp
);
10823 tcg_temp_free_i32(tmp
);
10825 tcg_temp_free_i32(addr
);
10829 /* load/store word immediate offset */
10831 rn
= (insn
>> 3) & 7;
10832 addr
= load_reg(s
, rn
);
10833 val
= (insn
>> 4) & 0x7c;
10834 tcg_gen_addi_i32(addr
, addr
, val
);
10836 if (insn
& (1 << 11)) {
10838 tmp
= tcg_temp_new_i32();
10839 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10840 store_reg(s
, rd
, tmp
);
10843 tmp
= load_reg(s
, rd
);
10844 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10845 tcg_temp_free_i32(tmp
);
10847 tcg_temp_free_i32(addr
);
10851 /* load/store byte immediate offset */
10853 rn
= (insn
>> 3) & 7;
10854 addr
= load_reg(s
, rn
);
10855 val
= (insn
>> 6) & 0x1f;
10856 tcg_gen_addi_i32(addr
, addr
, val
);
10858 if (insn
& (1 << 11)) {
10860 tmp
= tcg_temp_new_i32();
10861 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
10862 store_reg(s
, rd
, tmp
);
10865 tmp
= load_reg(s
, rd
);
10866 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
10867 tcg_temp_free_i32(tmp
);
10869 tcg_temp_free_i32(addr
);
10873 /* load/store halfword immediate offset */
10875 rn
= (insn
>> 3) & 7;
10876 addr
= load_reg(s
, rn
);
10877 val
= (insn
>> 5) & 0x3e;
10878 tcg_gen_addi_i32(addr
, addr
, val
);
10880 if (insn
& (1 << 11)) {
10882 tmp
= tcg_temp_new_i32();
10883 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
10884 store_reg(s
, rd
, tmp
);
10887 tmp
= load_reg(s
, rd
);
10888 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
10889 tcg_temp_free_i32(tmp
);
10891 tcg_temp_free_i32(addr
);
10895 /* load/store from stack */
10896 rd
= (insn
>> 8) & 7;
10897 addr
= load_reg(s
, 13);
10898 val
= (insn
& 0xff) * 4;
10899 tcg_gen_addi_i32(addr
, addr
, val
);
10901 if (insn
& (1 << 11)) {
10903 tmp
= tcg_temp_new_i32();
10904 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10905 store_reg(s
, rd
, tmp
);
10908 tmp
= load_reg(s
, rd
);
10909 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10910 tcg_temp_free_i32(tmp
);
10912 tcg_temp_free_i32(addr
);
10916 /* add to high reg */
10917 rd
= (insn
>> 8) & 7;
10918 if (insn
& (1 << 11)) {
10920 tmp
= load_reg(s
, 13);
10922 /* PC. bit 1 is ignored. */
10923 tmp
= tcg_temp_new_i32();
10924 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
10926 val
= (insn
& 0xff) * 4;
10927 tcg_gen_addi_i32(tmp
, tmp
, val
);
10928 store_reg(s
, rd
, tmp
);
10933 op
= (insn
>> 8) & 0xf;
10936 /* adjust stack pointer */
10937 tmp
= load_reg(s
, 13);
10938 val
= (insn
& 0x7f) * 4;
10939 if (insn
& (1 << 7))
10940 val
= -(int32_t)val
;
10941 tcg_gen_addi_i32(tmp
, tmp
, val
);
10942 store_reg(s
, 13, tmp
);
10945 case 2: /* sign/zero extend. */
10948 rm
= (insn
>> 3) & 7;
10949 tmp
= load_reg(s
, rm
);
10950 switch ((insn
>> 6) & 3) {
10951 case 0: gen_sxth(tmp
); break;
10952 case 1: gen_sxtb(tmp
); break;
10953 case 2: gen_uxth(tmp
); break;
10954 case 3: gen_uxtb(tmp
); break;
10956 store_reg(s
, rd
, tmp
);
10958 case 4: case 5: case 0xc: case 0xd:
10960 addr
= load_reg(s
, 13);
10961 if (insn
& (1 << 8))
10965 for (i
= 0; i
< 8; i
++) {
10966 if (insn
& (1 << i
))
10969 if ((insn
& (1 << 11)) == 0) {
10970 tcg_gen_addi_i32(addr
, addr
, -offset
);
10972 for (i
= 0; i
< 8; i
++) {
10973 if (insn
& (1 << i
)) {
10974 if (insn
& (1 << 11)) {
10976 tmp
= tcg_temp_new_i32();
10977 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10978 store_reg(s
, i
, tmp
);
10981 tmp
= load_reg(s
, i
);
10982 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10983 tcg_temp_free_i32(tmp
);
10985 /* advance to the next address. */
10986 tcg_gen_addi_i32(addr
, addr
, 4);
10989 TCGV_UNUSED_I32(tmp
);
10990 if (insn
& (1 << 8)) {
10991 if (insn
& (1 << 11)) {
10993 tmp
= tcg_temp_new_i32();
10994 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10995 /* don't set the pc until the rest of the instruction
10999 tmp
= load_reg(s
, 14);
11000 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
11001 tcg_temp_free_i32(tmp
);
11003 tcg_gen_addi_i32(addr
, addr
, 4);
11005 if ((insn
& (1 << 11)) == 0) {
11006 tcg_gen_addi_i32(addr
, addr
, -offset
);
11008 /* write back the new stack pointer */
11009 store_reg(s
, 13, addr
);
11010 /* set the new PC value */
11011 if ((insn
& 0x0900) == 0x0900) {
11012 store_reg_from_load(s
, 15, tmp
);
11016 case 1: case 3: case 9: case 11: /* czb */
11018 tmp
= load_reg(s
, rm
);
11019 s
->condlabel
= gen_new_label();
11021 if (insn
& (1 << 11))
11022 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
11024 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
11025 tcg_temp_free_i32(tmp
);
11026 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
11027 val
= (uint32_t)s
->pc
+ 2;
11032 case 15: /* IT, nop-hint. */
11033 if ((insn
& 0xf) == 0) {
11034 gen_nop_hint(s
, (insn
>> 4) & 0xf);
11038 s
->condexec_cond
= (insn
>> 4) & 0xe;
11039 s
->condexec_mask
= insn
& 0x1f;
11040 /* No actual code generated for this insn, just setup state. */
11043 case 0xe: /* bkpt */
11045 int imm8
= extract32(insn
, 0, 8);
11047 gen_exception_insn(s
, 2, EXCP_BKPT
, syn_aa32_bkpt(imm8
, true),
11048 default_exception_el(s
));
11052 case 0xa: /* rev */
11054 rn
= (insn
>> 3) & 0x7;
11056 tmp
= load_reg(s
, rn
);
11057 switch ((insn
>> 6) & 3) {
11058 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
11059 case 1: gen_rev16(tmp
); break;
11060 case 3: gen_revsh(tmp
); break;
11061 default: goto illegal_op
;
11063 store_reg(s
, rd
, tmp
);
11067 switch ((insn
>> 5) & 7) {
11071 if (((insn
>> 3) & 1) != s
->bswap_code
) {
11072 /* Dynamic endianness switching not implemented. */
11073 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented setend\n");
11083 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
11084 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
11087 addr
= tcg_const_i32(19);
11088 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
11089 tcg_temp_free_i32(addr
);
11093 addr
= tcg_const_i32(16);
11094 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
11095 tcg_temp_free_i32(addr
);
11097 tcg_temp_free_i32(tmp
);
11100 if (insn
& (1 << 4)) {
11101 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
11105 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
11120 /* load/store multiple */
11121 TCGv_i32 loaded_var
;
11122 TCGV_UNUSED_I32(loaded_var
);
11123 rn
= (insn
>> 8) & 0x7;
11124 addr
= load_reg(s
, rn
);
11125 for (i
= 0; i
< 8; i
++) {
11126 if (insn
& (1 << i
)) {
11127 if (insn
& (1 << 11)) {
11129 tmp
= tcg_temp_new_i32();
11130 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
11134 store_reg(s
, i
, tmp
);
11138 tmp
= load_reg(s
, i
);
11139 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
11140 tcg_temp_free_i32(tmp
);
11142 /* advance to the next address */
11143 tcg_gen_addi_i32(addr
, addr
, 4);
11146 if ((insn
& (1 << rn
)) == 0) {
11147 /* base reg not in list: base register writeback */
11148 store_reg(s
, rn
, addr
);
11150 /* base reg in list: if load, complete it now */
11151 if (insn
& (1 << 11)) {
11152 store_reg(s
, rn
, loaded_var
);
11154 tcg_temp_free_i32(addr
);
11159 /* conditional branch or swi */
11160 cond
= (insn
>> 8) & 0xf;
11166 gen_set_pc_im(s
, s
->pc
);
11167 s
->svc_imm
= extract32(insn
, 0, 8);
11168 s
->is_jmp
= DISAS_SWI
;
11171 /* generate a conditional jump to next instruction */
11172 s
->condlabel
= gen_new_label();
11173 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
11176 /* jump to the offset */
11177 val
= (uint32_t)s
->pc
+ 2;
11178 offset
= ((int32_t)insn
<< 24) >> 24;
11179 val
+= offset
<< 1;
11184 if (insn
& (1 << 11)) {
11185 if (disas_thumb2_insn(env
, s
, insn
))
11189 /* unconditional branch */
11190 val
= (uint32_t)s
->pc
;
11191 offset
= ((int32_t)insn
<< 21) >> 21;
11192 val
+= (offset
<< 1) + 2;
11197 if (disas_thumb2_insn(env
, s
, insn
))
11203 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
11204 default_exception_el(s
));
11208 gen_exception_insn(s
, 2, EXCP_UDEF
, syn_uncategorized(),
11209 default_exception_el(s
));
11212 static bool insn_crosses_page(CPUARMState
*env
, DisasContext
*s
)
11214 /* Return true if the insn at dc->pc might cross a page boundary.
11215 * (False positives are OK, false negatives are not.)
11219 if ((s
->pc
& 3) == 0) {
11220 /* At a 4-aligned address we can't be crossing a page */
11224 /* This must be a Thumb insn */
11225 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
11227 if ((insn
>> 11) >= 0x1d) {
11228 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11229 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11230 * end up actually treating this as two 16-bit insns (see the
11231 * code at the start of disas_thumb2_insn()) but we don't bother
11232 * to check for that as it is unlikely, and false positives here
11237 /* Definitely a 16-bit insn, can't be crossing a page. */
11241 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
11242 basic block 'tb'. */
11243 void gen_intermediate_code(CPUARMState
*env
, TranslationBlock
*tb
)
11245 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11246 CPUState
*cs
= CPU(cpu
);
11247 DisasContext dc1
, *dc
= &dc1
;
11248 target_ulong pc_start
;
11249 target_ulong next_page_start
;
11254 /* generate intermediate code */
11256 /* The A64 decoder has its own top level loop, because it doesn't need
11257 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11259 if (ARM_TBFLAG_AARCH64_STATE(tb
->flags
)) {
11260 gen_intermediate_code_a64(cpu
, tb
);
11268 dc
->is_jmp
= DISAS_NEXT
;
11270 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
11274 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11275 * there is no secure EL1, so we route exceptions to EL3.
11277 dc
->secure_routed_to_el3
= arm_feature(env
, ARM_FEATURE_EL3
) &&
11278 !arm_el_is_aa64(env
, 3);
11279 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
11280 dc
->bswap_code
= ARM_TBFLAG_BSWAP_CODE(tb
->flags
);
11281 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
11282 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
11283 dc
->mmu_idx
= ARM_TBFLAG_MMUIDX(tb
->flags
);
11284 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
11285 #if !defined(CONFIG_USER_ONLY)
11286 dc
->user
= (dc
->current_el
== 0);
11288 dc
->ns
= ARM_TBFLAG_NS(tb
->flags
);
11289 dc
->fp_excp_el
= ARM_TBFLAG_FPEXC_EL(tb
->flags
);
11290 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
11291 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
11292 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
11293 dc
->c15_cpar
= ARM_TBFLAG_XSCALE_CPAR(tb
->flags
);
11294 dc
->cp_regs
= cpu
->cp_regs
;
11295 dc
->features
= env
->features
;
11297 /* Single step state. The code-generation logic here is:
11299 * generate code with no special handling for single-stepping (except
11300 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11301 * this happens anyway because those changes are all system register or
11303 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11304 * emit code for one insn
11305 * emit code to clear PSTATE.SS
11306 * emit code to generate software step exception for completed step
11307 * end TB (as usual for having generated an exception)
11308 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11309 * emit code to generate a software step exception
11312 dc
->ss_active
= ARM_TBFLAG_SS_ACTIVE(tb
->flags
);
11313 dc
->pstate_ss
= ARM_TBFLAG_PSTATE_SS(tb
->flags
);
11314 dc
->is_ldex
= false;
11315 dc
->ss_same_el
= false; /* Can't be true since EL_d must be AArch64 */
11317 cpu_F0s
= tcg_temp_new_i32();
11318 cpu_F1s
= tcg_temp_new_i32();
11319 cpu_F0d
= tcg_temp_new_i64();
11320 cpu_F1d
= tcg_temp_new_i64();
11323 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11324 cpu_M0
= tcg_temp_new_i64();
11325 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
11327 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
11328 if (max_insns
== 0) {
11329 max_insns
= CF_COUNT_MASK
;
11331 if (max_insns
> TCG_MAX_INSNS
) {
11332 max_insns
= TCG_MAX_INSNS
;
11337 tcg_clear_temp_count();
11339 /* A note on handling of the condexec (IT) bits:
11341 * We want to avoid the overhead of having to write the updated condexec
11342 * bits back to the CPUARMState for every instruction in an IT block. So:
11343 * (1) if the condexec bits are not already zero then we write
11344 * zero back into the CPUARMState now. This avoids complications trying
11345 * to do it at the end of the block. (For example if we don't do this
11346 * it's hard to identify whether we can safely skip writing condexec
11347 * at the end of the TB, which we definitely want to do for the case
11348 * where a TB doesn't do anything with the IT state at all.)
11349 * (2) if we are going to leave the TB then we call gen_set_condexec()
11350 * which will write the correct value into CPUARMState if zero is wrong.
11351 * This is done both for leaving the TB at the end, and for leaving
11352 * it because of an exception we know will happen, which is done in
11353 * gen_exception_insn(). The latter is necessary because we need to
11354 * leave the TB with the PC/IT state just prior to execution of the
11355 * instruction which caused the exception.
11356 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11357 * then the CPUARMState will be wrong and we need to reset it.
11358 * This is handled in the same way as restoration of the
11359 * PC in these situations; we save the value of the condexec bits
11360 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11361 * then uses this to restore them after an exception.
11363 * Note that there are no instructions which can read the condexec
11364 * bits, and none which can write non-static values to them, so
11365 * we don't need to care about whether CPUARMState is correct in the
11369 /* Reset the conditional execution bits immediately. This avoids
11370 complications trying to do it at the end of the block. */
11371 if (dc
->condexec_mask
|| dc
->condexec_cond
)
11373 TCGv_i32 tmp
= tcg_temp_new_i32();
11374 tcg_gen_movi_i32(tmp
, 0);
11375 store_cpu_field(tmp
, condexec_bits
);
11378 tcg_gen_insn_start(dc
->pc
,
11379 (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1));
11382 #ifdef CONFIG_USER_ONLY
11383 /* Intercept jump to the magic kernel page. */
11384 if (dc
->pc
>= 0xffff0000) {
11385 /* We always get here via a jump, so know we are not in a
11386 conditional execution block. */
11387 gen_exception_internal(EXCP_KERNEL_TRAP
);
11388 dc
->is_jmp
= DISAS_EXC
;
11392 if (dc
->pc
>= 0xfffffff0 && arm_dc_feature(dc
, ARM_FEATURE_M
)) {
11393 /* We always get here via a jump, so know we are not in a
11394 conditional execution block. */
11395 gen_exception_internal(EXCP_EXCEPTION_EXIT
);
11396 dc
->is_jmp
= DISAS_EXC
;
11401 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
11403 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
11404 if (bp
->pc
== dc
->pc
) {
11405 if (bp
->flags
& BP_CPU
) {
11406 gen_set_condexec(dc
);
11407 gen_set_pc_im(dc
, dc
->pc
);
11408 gen_helper_check_breakpoints(cpu_env
);
11409 /* End the TB early; it's likely not going to be executed */
11410 dc
->is_jmp
= DISAS_UPDATE
;
11412 gen_exception_internal_insn(dc
, 0, EXCP_DEBUG
);
11413 /* The address covered by the breakpoint must be
11414 included in [tb->pc, tb->pc + tb->size) in order
11415 to for it to be properly cleared -- thus we
11416 increment the PC here so that the logic setting
11417 tb->size below does the right thing. */
11418 /* TODO: Advance PC by correct instruction length to
11419 * avoid disassembler error messages */
11421 goto done_generating
;
11428 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
11432 if (dc
->ss_active
&& !dc
->pstate_ss
) {
11433 /* Singlestep state is Active-pending.
11434 * If we're in this state at the start of a TB then either
11435 * a) we just took an exception to an EL which is being debugged
11436 * and this is the first insn in the exception handler
11437 * b) debug exceptions were masked and we just unmasked them
11438 * without changing EL (eg by clearing PSTATE.D)
11439 * In either case we're going to take a swstep exception in the
11440 * "did not step an insn" case, and so the syndrome ISV and EX
11441 * bits should be zero.
11443 assert(num_insns
== 1);
11444 gen_exception(EXCP_UDEF
, syn_swstep(dc
->ss_same_el
, 0, 0),
11445 default_exception_el(dc
));
11446 goto done_generating
;
11450 disas_thumb_insn(env
, dc
);
11451 if (dc
->condexec_mask
) {
11452 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
11453 | ((dc
->condexec_mask
>> 4) & 1);
11454 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
11455 if (dc
->condexec_mask
== 0) {
11456 dc
->condexec_cond
= 0;
11460 unsigned int insn
= arm_ldl_code(env
, dc
->pc
, dc
->bswap_code
);
11462 disas_arm_insn(dc
, insn
);
11465 if (dc
->condjmp
&& !dc
->is_jmp
) {
11466 gen_set_label(dc
->condlabel
);
11470 if (tcg_check_temp_count()) {
11471 fprintf(stderr
, "TCG temporary leak before "TARGET_FMT_lx
"\n",
11475 /* Translation stops when a conditional branch is encountered.
11476 * Otherwise the subsequent code could get translated several times.
11477 * Also stop translation when a page boundary is reached. This
11478 * ensures prefetch aborts occur at the right place. */
11480 /* We want to stop the TB if the next insn starts in a new page,
11481 * or if it spans between this page and the next. This means that
11482 * if we're looking at the last halfword in the page we need to
11483 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11484 * or a 32-bit Thumb insn (which won't).
11485 * This is to avoid generating a silly TB with a single 16-bit insn
11486 * in it at the end of this page (which would execute correctly
11487 * but isn't very efficient).
11489 end_of_page
= (dc
->pc
>= next_page_start
) ||
11490 ((dc
->pc
>= next_page_start
- 3) && insn_crosses_page(env
, dc
));
11492 } while (!dc
->is_jmp
&& !tcg_op_buf_full() &&
11493 !cs
->singlestep_enabled
&&
11497 num_insns
< max_insns
);
11499 if (tb
->cflags
& CF_LAST_IO
) {
11501 /* FIXME: This can theoretically happen with self-modifying
11503 cpu_abort(cs
, "IO on conditional branch instruction");
11508 /* At this stage dc->condjmp will only be set when the skipped
11509 instruction was a conditional branch or trap, and the PC has
11510 already been written. */
11511 if (unlikely(cs
->singlestep_enabled
|| dc
->ss_active
)) {
11512 /* Make sure the pc is updated, and raise a debug exception. */
11514 gen_set_condexec(dc
);
11515 if (dc
->is_jmp
== DISAS_SWI
) {
11516 gen_ss_advance(dc
);
11517 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
11518 default_exception_el(dc
));
11519 } else if (dc
->is_jmp
== DISAS_HVC
) {
11520 gen_ss_advance(dc
);
11521 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
11522 } else if (dc
->is_jmp
== DISAS_SMC
) {
11523 gen_ss_advance(dc
);
11524 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
11525 } else if (dc
->ss_active
) {
11526 gen_step_complete_exception(dc
);
11528 gen_exception_internal(EXCP_DEBUG
);
11530 gen_set_label(dc
->condlabel
);
11532 if (dc
->condjmp
|| dc
->is_jmp
== DISAS_NEXT
||
11533 dc
->is_jmp
== DISAS_UPDATE
) {
11534 gen_set_pc_im(dc
, dc
->pc
);
11537 gen_set_condexec(dc
);
11538 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
11539 gen_ss_advance(dc
);
11540 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
11541 default_exception_el(dc
));
11542 } else if (dc
->is_jmp
== DISAS_HVC
&& !dc
->condjmp
) {
11543 gen_ss_advance(dc
);
11544 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
11545 } else if (dc
->is_jmp
== DISAS_SMC
&& !dc
->condjmp
) {
11546 gen_ss_advance(dc
);
11547 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
11548 } else if (dc
->ss_active
) {
11549 gen_step_complete_exception(dc
);
11551 /* FIXME: Single stepping a WFI insn will not halt
11553 gen_exception_internal(EXCP_DEBUG
);
11556 /* While branches must always occur at the end of an IT block,
11557 there are a few other things that can cause us to terminate
11558 the TB in the middle of an IT block:
11559 - Exception generating instructions (bkpt, swi, undefined).
11561 - Hardware watchpoints.
11562 Hardware breakpoints have already been handled and skip this code.
11564 gen_set_condexec(dc
);
11565 switch(dc
->is_jmp
) {
11567 gen_goto_tb(dc
, 1, dc
->pc
);
11570 gen_set_pc_im(dc
, dc
->pc
);
11574 /* indicate that the hash table must be used to find the next TB */
11575 tcg_gen_exit_tb(0);
11577 case DISAS_TB_JUMP
:
11578 /* nothing more to generate */
11581 gen_helper_wfi(cpu_env
);
11582 /* The helper doesn't necessarily throw an exception, but we
11583 * must go back to the main loop to check for interrupts anyway.
11585 tcg_gen_exit_tb(0);
11588 gen_helper_wfe(cpu_env
);
11591 gen_helper_yield(cpu_env
);
11594 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
11595 default_exception_el(dc
));
11598 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
11601 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
11605 gen_set_label(dc
->condlabel
);
11606 gen_set_condexec(dc
);
11607 gen_goto_tb(dc
, 1, dc
->pc
);
11613 gen_tb_end(tb
, num_insns
);
11616 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
11617 qemu_log("----------------\n");
11618 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
11619 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
,
11620 dc
->thumb
| (dc
->bswap_code
<< 1));
11624 tb
->size
= dc
->pc
- pc_start
;
11625 tb
->icount
= num_insns
;
11628 static const char *cpu_mode_names
[16] = {
11629 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11630 "???", "???", "hyp", "und", "???", "???", "???", "sys"
11633 void arm_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
11636 ARMCPU
*cpu
= ARM_CPU(cs
);
11637 CPUARMState
*env
= &cpu
->env
;
11640 const char *ns_status
;
11643 aarch64_cpu_dump_state(cs
, f
, cpu_fprintf
, flags
);
11647 for(i
=0;i
<16;i
++) {
11648 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
11650 cpu_fprintf(f
, "\n");
11652 cpu_fprintf(f
, " ");
11654 psr
= cpsr_read(env
);
11656 if (arm_feature(env
, ARM_FEATURE_EL3
) &&
11657 (psr
& CPSR_M
) != ARM_CPU_MODE_MON
) {
11658 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
11663 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%s%d\n",
11665 psr
& (1 << 31) ? 'N' : '-',
11666 psr
& (1 << 30) ? 'Z' : '-',
11667 psr
& (1 << 29) ? 'C' : '-',
11668 psr
& (1 << 28) ? 'V' : '-',
11669 psr
& CPSR_T
? 'T' : 'A',
11671 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
11673 if (flags
& CPU_DUMP_FPU
) {
11674 int numvfpregs
= 0;
11675 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
11678 if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
11681 for (i
= 0; i
< numvfpregs
; i
++) {
11682 uint64_t v
= float64_val(env
->vfp
.regs
[i
]);
11683 cpu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
11684 i
* 2, (uint32_t)v
,
11685 i
* 2 + 1, (uint32_t)(v
>> 32),
11688 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
11692 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
,
11693 target_ulong
*data
)
11697 env
->condexec_bits
= 0;
11699 env
->regs
[15] = data
[0];
11700 env
->condexec_bits
= data
[1];