4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "internals.h"
29 #include "disas/disas.h"
32 #include "qemu/bitops.h"
35 #include "exec/helper-proto.h"
36 #include "exec/helper-gen.h"
38 #include "trace-tcg.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J 0
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
55 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
57 #if defined(CONFIG_USER_ONLY)
60 #define IS_USER(s) (s->user)
64 /* We reuse the same 64-bit temporaries for efficiency. */
65 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
66 static TCGv_i32 cpu_R
[16];
67 static TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
68 static TCGv_i64 cpu_exclusive_addr
;
69 static TCGv_i64 cpu_exclusive_val
;
70 #ifdef CONFIG_USER_ONLY
71 static TCGv_i64 cpu_exclusive_test
;
72 static TCGv_i32 cpu_exclusive_info
;
75 /* FIXME: These should be removed. */
76 static TCGv_i32 cpu_F0s
, cpu_F1s
;
77 static TCGv_i64 cpu_F0d
, cpu_F1d
;
79 #include "exec/gen-icount.h"
81 static const char *regnames
[] =
82 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
83 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
85 /* initialize TCG globals. */
86 void arm_translate_init(void)
90 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
92 for (i
= 0; i
< 16; i
++) {
93 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
94 offsetof(CPUARMState
, regs
[i
]),
97 cpu_CF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, CF
), "CF");
98 cpu_NF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, NF
), "NF");
99 cpu_VF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, VF
), "VF");
100 cpu_ZF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, ZF
), "ZF");
102 cpu_exclusive_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
103 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
104 cpu_exclusive_val
= tcg_global_mem_new_i64(TCG_AREG0
,
105 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
106 #ifdef CONFIG_USER_ONLY
107 cpu_exclusive_test
= tcg_global_mem_new_i64(TCG_AREG0
,
108 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
109 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
110 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
113 a64_translate_init();
116 static inline TCGv_i32
load_cpu_offset(int offset
)
118 TCGv_i32 tmp
= tcg_temp_new_i32();
119 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
123 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
125 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
127 tcg_gen_st_i32(var
, cpu_env
, offset
);
128 tcg_temp_free_i32(var
);
131 #define store_cpu_field(var, name) \
132 store_cpu_offset(var, offsetof(CPUARMState, name))
134 /* Set a variable to the value of a CPU register. */
135 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
139 /* normally, since we updated PC, we need only to add one insn */
141 addr
= (long)s
->pc
+ 2;
143 addr
= (long)s
->pc
+ 4;
144 tcg_gen_movi_i32(var
, addr
);
146 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
150 /* Create a new temporary and set it to the value of a CPU register. */
151 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
153 TCGv_i32 tmp
= tcg_temp_new_i32();
154 load_reg_var(s
, tmp
, reg
);
158 /* Set a CPU register. The source must be a temporary and will be
160 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
163 tcg_gen_andi_i32(var
, var
, ~1);
164 s
->is_jmp
= DISAS_JUMP
;
166 tcg_gen_mov_i32(cpu_R
[reg
], var
);
167 tcg_temp_free_i32(var
);
170 /* Value extensions. */
171 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
172 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
173 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
174 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
176 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
177 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
180 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
182 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
183 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
184 tcg_temp_free_i32(tmp_mask
);
186 /* Set NZCV flags from the high 4 bits of var. */
187 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
189 static void gen_exception_internal(int excp
)
191 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
193 assert(excp_is_internal(excp
));
194 gen_helper_exception_internal(cpu_env
, tcg_excp
);
195 tcg_temp_free_i32(tcg_excp
);
198 static void gen_exception(int excp
, uint32_t syndrome
)
200 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
201 TCGv_i32 tcg_syn
= tcg_const_i32(syndrome
);
203 gen_helper_exception_with_syndrome(cpu_env
, tcg_excp
, tcg_syn
);
204 tcg_temp_free_i32(tcg_syn
);
205 tcg_temp_free_i32(tcg_excp
);
208 static void gen_ss_advance(DisasContext
*s
)
210 /* If the singlestep state is Active-not-pending, advance to
215 gen_helper_clear_pstate_ss(cpu_env
);
219 static void gen_step_complete_exception(DisasContext
*s
)
221 /* We just completed step of an insn. Move from Active-not-pending
222 * to Active-pending, and then also take the swstep exception.
223 * This corresponds to making the (IMPDEF) choice to prioritize
224 * swstep exceptions over asynchronous exceptions taken to an exception
225 * level where debug is disabled. This choice has the advantage that
226 * we do not need to maintain internal state corresponding to the
227 * ISV/EX syndrome bits between completion of the step and generation
228 * of the exception, and our syndrome information is always correct.
231 gen_exception(EXCP_UDEF
, syn_swstep(s
->ss_same_el
, 1, s
->is_ldex
));
232 s
->is_jmp
= DISAS_EXC
;
235 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
237 TCGv_i32 tmp1
= tcg_temp_new_i32();
238 TCGv_i32 tmp2
= tcg_temp_new_i32();
239 tcg_gen_ext16s_i32(tmp1
, a
);
240 tcg_gen_ext16s_i32(tmp2
, b
);
241 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
242 tcg_temp_free_i32(tmp2
);
243 tcg_gen_sari_i32(a
, a
, 16);
244 tcg_gen_sari_i32(b
, b
, 16);
245 tcg_gen_mul_i32(b
, b
, a
);
246 tcg_gen_mov_i32(a
, tmp1
);
247 tcg_temp_free_i32(tmp1
);
250 /* Byteswap each halfword. */
251 static void gen_rev16(TCGv_i32 var
)
253 TCGv_i32 tmp
= tcg_temp_new_i32();
254 tcg_gen_shri_i32(tmp
, var
, 8);
255 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
256 tcg_gen_shli_i32(var
, var
, 8);
257 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
258 tcg_gen_or_i32(var
, var
, tmp
);
259 tcg_temp_free_i32(tmp
);
262 /* Byteswap low halfword and sign extend. */
263 static void gen_revsh(TCGv_i32 var
)
265 tcg_gen_ext16u_i32(var
, var
);
266 tcg_gen_bswap16_i32(var
, var
);
267 tcg_gen_ext16s_i32(var
, var
);
270 /* Unsigned bitfield extract. */
271 static void gen_ubfx(TCGv_i32 var
, int shift
, uint32_t mask
)
274 tcg_gen_shri_i32(var
, var
, shift
);
275 tcg_gen_andi_i32(var
, var
, mask
);
278 /* Signed bitfield extract. */
279 static void gen_sbfx(TCGv_i32 var
, int shift
, int width
)
284 tcg_gen_sari_i32(var
, var
, shift
);
285 if (shift
+ width
< 32) {
286 signbit
= 1u << (width
- 1);
287 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
288 tcg_gen_xori_i32(var
, var
, signbit
);
289 tcg_gen_subi_i32(var
, var
, signbit
);
293 /* Return (b << 32) + a. Mark inputs as dead */
294 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv_i32 b
)
296 TCGv_i64 tmp64
= tcg_temp_new_i64();
298 tcg_gen_extu_i32_i64(tmp64
, b
);
299 tcg_temp_free_i32(b
);
300 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
301 tcg_gen_add_i64(a
, tmp64
, a
);
303 tcg_temp_free_i64(tmp64
);
307 /* Return (b << 32) - a. Mark inputs as dead. */
308 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv_i32 b
)
310 TCGv_i64 tmp64
= tcg_temp_new_i64();
312 tcg_gen_extu_i32_i64(tmp64
, b
);
313 tcg_temp_free_i32(b
);
314 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
315 tcg_gen_sub_i64(a
, tmp64
, a
);
317 tcg_temp_free_i64(tmp64
);
321 /* 32x32->64 multiply. Marks inputs as dead. */
322 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
324 TCGv_i32 lo
= tcg_temp_new_i32();
325 TCGv_i32 hi
= tcg_temp_new_i32();
328 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
329 tcg_temp_free_i32(a
);
330 tcg_temp_free_i32(b
);
332 ret
= tcg_temp_new_i64();
333 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
334 tcg_temp_free_i32(lo
);
335 tcg_temp_free_i32(hi
);
340 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
342 TCGv_i32 lo
= tcg_temp_new_i32();
343 TCGv_i32 hi
= tcg_temp_new_i32();
346 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
347 tcg_temp_free_i32(a
);
348 tcg_temp_free_i32(b
);
350 ret
= tcg_temp_new_i64();
351 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
352 tcg_temp_free_i32(lo
);
353 tcg_temp_free_i32(hi
);
358 /* Swap low and high halfwords. */
359 static void gen_swap_half(TCGv_i32 var
)
361 TCGv_i32 tmp
= tcg_temp_new_i32();
362 tcg_gen_shri_i32(tmp
, var
, 16);
363 tcg_gen_shli_i32(var
, var
, 16);
364 tcg_gen_or_i32(var
, var
, tmp
);
365 tcg_temp_free_i32(tmp
);
368 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
369 tmp = (t0 ^ t1) & 0x8000;
372 t0 = (t0 + t1) ^ tmp;
375 static void gen_add16(TCGv_i32 t0
, TCGv_i32 t1
)
377 TCGv_i32 tmp
= tcg_temp_new_i32();
378 tcg_gen_xor_i32(tmp
, t0
, t1
);
379 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
380 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
381 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
382 tcg_gen_add_i32(t0
, t0
, t1
);
383 tcg_gen_xor_i32(t0
, t0
, tmp
);
384 tcg_temp_free_i32(tmp
);
385 tcg_temp_free_i32(t1
);
388 /* Set CF to the top bit of var. */
389 static void gen_set_CF_bit31(TCGv_i32 var
)
391 tcg_gen_shri_i32(cpu_CF
, var
, 31);
394 /* Set N and Z flags from var. */
395 static inline void gen_logic_CC(TCGv_i32 var
)
397 tcg_gen_mov_i32(cpu_NF
, var
);
398 tcg_gen_mov_i32(cpu_ZF
, var
);
402 static void gen_adc(TCGv_i32 t0
, TCGv_i32 t1
)
404 tcg_gen_add_i32(t0
, t0
, t1
);
405 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
408 /* dest = T0 + T1 + CF. */
409 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
411 tcg_gen_add_i32(dest
, t0
, t1
);
412 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
415 /* dest = T0 - T1 + CF - 1. */
416 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
418 tcg_gen_sub_i32(dest
, t0
, t1
);
419 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
420 tcg_gen_subi_i32(dest
, dest
, 1);
423 /* dest = T0 + T1. Compute C, N, V and Z flags */
424 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
426 TCGv_i32 tmp
= tcg_temp_new_i32();
427 tcg_gen_movi_i32(tmp
, 0);
428 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
429 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
430 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
431 tcg_gen_xor_i32(tmp
, t0
, t1
);
432 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
433 tcg_temp_free_i32(tmp
);
434 tcg_gen_mov_i32(dest
, cpu_NF
);
437 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
438 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
440 TCGv_i32 tmp
= tcg_temp_new_i32();
441 if (TCG_TARGET_HAS_add2_i32
) {
442 tcg_gen_movi_i32(tmp
, 0);
443 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
444 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
446 TCGv_i64 q0
= tcg_temp_new_i64();
447 TCGv_i64 q1
= tcg_temp_new_i64();
448 tcg_gen_extu_i32_i64(q0
, t0
);
449 tcg_gen_extu_i32_i64(q1
, t1
);
450 tcg_gen_add_i64(q0
, q0
, q1
);
451 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
452 tcg_gen_add_i64(q0
, q0
, q1
);
453 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
454 tcg_temp_free_i64(q0
);
455 tcg_temp_free_i64(q1
);
457 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
458 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
459 tcg_gen_xor_i32(tmp
, t0
, t1
);
460 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
461 tcg_temp_free_i32(tmp
);
462 tcg_gen_mov_i32(dest
, cpu_NF
);
465 /* dest = T0 - T1. Compute C, N, V and Z flags */
466 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
469 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
470 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
471 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
472 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
473 tmp
= tcg_temp_new_i32();
474 tcg_gen_xor_i32(tmp
, t0
, t1
);
475 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
476 tcg_temp_free_i32(tmp
);
477 tcg_gen_mov_i32(dest
, cpu_NF
);
480 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
481 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
483 TCGv_i32 tmp
= tcg_temp_new_i32();
484 tcg_gen_not_i32(tmp
, t1
);
485 gen_adc_CC(dest
, t0
, tmp
);
486 tcg_temp_free_i32(tmp
);
489 #define GEN_SHIFT(name) \
490 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
492 TCGv_i32 tmp1, tmp2, tmp3; \
493 tmp1 = tcg_temp_new_i32(); \
494 tcg_gen_andi_i32(tmp1, t1, 0xff); \
495 tmp2 = tcg_const_i32(0); \
496 tmp3 = tcg_const_i32(0x1f); \
497 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
498 tcg_temp_free_i32(tmp3); \
499 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
500 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
501 tcg_temp_free_i32(tmp2); \
502 tcg_temp_free_i32(tmp1); \
508 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
511 tmp1
= tcg_temp_new_i32();
512 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
513 tmp2
= tcg_const_i32(0x1f);
514 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
515 tcg_temp_free_i32(tmp2
);
516 tcg_gen_sar_i32(dest
, t0
, tmp1
);
517 tcg_temp_free_i32(tmp1
);
520 static void tcg_gen_abs_i32(TCGv_i32 dest
, TCGv_i32 src
)
522 TCGv_i32 c0
= tcg_const_i32(0);
523 TCGv_i32 tmp
= tcg_temp_new_i32();
524 tcg_gen_neg_i32(tmp
, src
);
525 tcg_gen_movcond_i32(TCG_COND_GT
, dest
, src
, c0
, src
, tmp
);
526 tcg_temp_free_i32(c0
);
527 tcg_temp_free_i32(tmp
);
530 static void shifter_out_im(TCGv_i32 var
, int shift
)
533 tcg_gen_andi_i32(cpu_CF
, var
, 1);
535 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
537 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
542 /* Shift by immediate. Includes special handling for shift == 0. */
543 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
544 int shift
, int flags
)
550 shifter_out_im(var
, 32 - shift
);
551 tcg_gen_shli_i32(var
, var
, shift
);
557 tcg_gen_shri_i32(cpu_CF
, var
, 31);
559 tcg_gen_movi_i32(var
, 0);
562 shifter_out_im(var
, shift
- 1);
563 tcg_gen_shri_i32(var
, var
, shift
);
570 shifter_out_im(var
, shift
- 1);
573 tcg_gen_sari_i32(var
, var
, shift
);
575 case 3: /* ROR/RRX */
578 shifter_out_im(var
, shift
- 1);
579 tcg_gen_rotri_i32(var
, var
, shift
); break;
581 TCGv_i32 tmp
= tcg_temp_new_i32();
582 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
584 shifter_out_im(var
, 0);
585 tcg_gen_shri_i32(var
, var
, 1);
586 tcg_gen_or_i32(var
, var
, tmp
);
587 tcg_temp_free_i32(tmp
);
592 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
593 TCGv_i32 shift
, int flags
)
597 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
598 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
599 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
600 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
605 gen_shl(var
, var
, shift
);
608 gen_shr(var
, var
, shift
);
611 gen_sar(var
, var
, shift
);
613 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
614 tcg_gen_rotr_i32(var
, var
, shift
); break;
617 tcg_temp_free_i32(shift
);
620 #define PAS_OP(pfx) \
622 case 0: gen_pas_helper(glue(pfx,add16)); break; \
623 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
624 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
625 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
626 case 4: gen_pas_helper(glue(pfx,add8)); break; \
627 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
629 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
634 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
636 tmp
= tcg_temp_new_ptr();
637 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
639 tcg_temp_free_ptr(tmp
);
642 tmp
= tcg_temp_new_ptr();
643 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
645 tcg_temp_free_ptr(tmp
);
647 #undef gen_pas_helper
648 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
661 #undef gen_pas_helper
666 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
667 #define PAS_OP(pfx) \
669 case 0: gen_pas_helper(glue(pfx,add8)); break; \
670 case 1: gen_pas_helper(glue(pfx,add16)); break; \
671 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
672 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
673 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
674 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
676 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
681 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
683 tmp
= tcg_temp_new_ptr();
684 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
686 tcg_temp_free_ptr(tmp
);
689 tmp
= tcg_temp_new_ptr();
690 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
692 tcg_temp_free_ptr(tmp
);
694 #undef gen_pas_helper
695 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
708 #undef gen_pas_helper
714 * generate a conditional branch based on ARM condition code cc.
715 * This is common between ARM and Aarch64 targets.
717 void arm_gen_test_cc(int cc
, int label
)
724 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
727 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
730 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_CF
, 0, label
);
733 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
736 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_NF
, 0, label
);
739 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_NF
, 0, label
);
742 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_VF
, 0, label
);
745 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_VF
, 0, label
);
747 case 8: /* hi: C && !Z */
748 inv
= gen_new_label();
749 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, inv
);
750 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
753 case 9: /* ls: !C || Z */
754 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
755 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
757 case 10: /* ge: N == V -> N ^ V == 0 */
758 tmp
= tcg_temp_new_i32();
759 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
760 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
761 tcg_temp_free_i32(tmp
);
763 case 11: /* lt: N != V -> N ^ V != 0 */
764 tmp
= tcg_temp_new_i32();
765 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
766 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
767 tcg_temp_free_i32(tmp
);
769 case 12: /* gt: !Z && N == V */
770 inv
= gen_new_label();
771 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, inv
);
772 tmp
= tcg_temp_new_i32();
773 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
774 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
775 tcg_temp_free_i32(tmp
);
778 case 13: /* le: Z || N != V */
779 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
780 tmp
= tcg_temp_new_i32();
781 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
782 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
783 tcg_temp_free_i32(tmp
);
786 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
791 static const uint8_t table_logic_cc
[16] = {
810 /* Set PC and Thumb state from an immediate address. */
811 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
815 s
->is_jmp
= DISAS_UPDATE
;
816 if (s
->thumb
!= (addr
& 1)) {
817 tmp
= tcg_temp_new_i32();
818 tcg_gen_movi_i32(tmp
, addr
& 1);
819 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
820 tcg_temp_free_i32(tmp
);
822 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
825 /* Set PC and Thumb state from var. var is marked as dead. */
826 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
828 s
->is_jmp
= DISAS_UPDATE
;
829 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
830 tcg_gen_andi_i32(var
, var
, 1);
831 store_cpu_field(var
, thumb
);
834 /* Variant of store_reg which uses branch&exchange logic when storing
835 to r15 in ARM architecture v7 and above. The source must be a temporary
836 and will be marked as dead. */
837 static inline void store_reg_bx(CPUARMState
*env
, DisasContext
*s
,
838 int reg
, TCGv_i32 var
)
840 if (reg
== 15 && ENABLE_ARCH_7
) {
843 store_reg(s
, reg
, var
);
847 /* Variant of store_reg which uses branch&exchange logic when storing
848 * to r15 in ARM architecture v5T and above. This is used for storing
849 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
850 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
851 static inline void store_reg_from_load(CPUARMState
*env
, DisasContext
*s
,
852 int reg
, TCGv_i32 var
)
854 if (reg
== 15 && ENABLE_ARCH_5
) {
857 store_reg(s
, reg
, var
);
861 /* Abstractions of "generate code to do a guest load/store for
862 * AArch32", where a vaddr is always 32 bits (and is zero
863 * extended if we're a 64 bit core) and data is also
864 * 32 bits unless specifically doing a 64 bit access.
865 * These functions work like tcg_gen_qemu_{ld,st}* except
866 * that the address argument is TCGv_i32 rather than TCGv.
868 #if TARGET_LONG_BITS == 32
870 #define DO_GEN_LD(SUFF, OPC) \
871 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
873 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
876 #define DO_GEN_ST(SUFF, OPC) \
877 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
879 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
882 static inline void gen_aa32_ld64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
884 tcg_gen_qemu_ld_i64(val
, addr
, index
, MO_TEQ
);
887 static inline void gen_aa32_st64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
889 tcg_gen_qemu_st_i64(val
, addr
, index
, MO_TEQ
);
894 #define DO_GEN_LD(SUFF, OPC) \
895 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
897 TCGv addr64 = tcg_temp_new(); \
898 tcg_gen_extu_i32_i64(addr64, addr); \
899 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
900 tcg_temp_free(addr64); \
903 #define DO_GEN_ST(SUFF, OPC) \
904 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
906 TCGv addr64 = tcg_temp_new(); \
907 tcg_gen_extu_i32_i64(addr64, addr); \
908 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
909 tcg_temp_free(addr64); \
912 static inline void gen_aa32_ld64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
914 TCGv addr64
= tcg_temp_new();
915 tcg_gen_extu_i32_i64(addr64
, addr
);
916 tcg_gen_qemu_ld_i64(val
, addr64
, index
, MO_TEQ
);
917 tcg_temp_free(addr64
);
920 static inline void gen_aa32_st64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
922 TCGv addr64
= tcg_temp_new();
923 tcg_gen_extu_i32_i64(addr64
, addr
);
924 tcg_gen_qemu_st_i64(val
, addr64
, index
, MO_TEQ
);
925 tcg_temp_free(addr64
);
932 DO_GEN_LD(16s
, MO_TESW
)
933 DO_GEN_LD(16u, MO_TEUW
)
934 DO_GEN_LD(32u, MO_TEUL
)
936 DO_GEN_ST(16, MO_TEUW
)
937 DO_GEN_ST(32, MO_TEUL
)
939 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
941 tcg_gen_movi_i32(cpu_R
[15], val
);
944 static inline void gen_hvc(DisasContext
*s
, int imm16
)
946 /* The pre HVC helper handles cases when HVC gets trapped
947 * as an undefined insn by runtime configuration (ie before
948 * the insn really executes).
950 gen_set_pc_im(s
, s
->pc
- 4);
951 gen_helper_pre_hvc(cpu_env
);
952 /* Otherwise we will treat this as a real exception which
953 * happens after execution of the insn. (The distinction matters
954 * for the PC value reported to the exception handler and also
955 * for single stepping.)
958 gen_set_pc_im(s
, s
->pc
);
959 s
->is_jmp
= DISAS_HVC
;
962 static inline void gen_smc(DisasContext
*s
)
964 /* As with HVC, we may take an exception either before or after
969 gen_set_pc_im(s
, s
->pc
- 4);
970 tmp
= tcg_const_i32(syn_aa32_smc());
971 gen_helper_pre_smc(cpu_env
, tmp
);
972 tcg_temp_free_i32(tmp
);
973 gen_set_pc_im(s
, s
->pc
);
974 s
->is_jmp
= DISAS_SMC
;
978 gen_set_condexec (DisasContext
*s
)
980 if (s
->condexec_mask
) {
981 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
982 TCGv_i32 tmp
= tcg_temp_new_i32();
983 tcg_gen_movi_i32(tmp
, val
);
984 store_cpu_field(tmp
, condexec_bits
);
988 static void gen_exception_internal_insn(DisasContext
*s
, int offset
, int excp
)
991 gen_set_pc_im(s
, s
->pc
- offset
);
992 gen_exception_internal(excp
);
993 s
->is_jmp
= DISAS_JUMP
;
996 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
, int syn
)
999 gen_set_pc_im(s
, s
->pc
- offset
);
1000 gen_exception(excp
, syn
);
1001 s
->is_jmp
= DISAS_JUMP
;
1004 /* Force a TB lookup after an instruction that changes the CPU state. */
1005 static inline void gen_lookup_tb(DisasContext
*s
)
1007 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
1008 s
->is_jmp
= DISAS_UPDATE
;
1011 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
1014 int val
, rm
, shift
, shiftop
;
1017 if (!(insn
& (1 << 25))) {
1020 if (!(insn
& (1 << 23)))
1023 tcg_gen_addi_i32(var
, var
, val
);
1025 /* shift/register */
1027 shift
= (insn
>> 7) & 0x1f;
1028 shiftop
= (insn
>> 5) & 3;
1029 offset
= load_reg(s
, rm
);
1030 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
1031 if (!(insn
& (1 << 23)))
1032 tcg_gen_sub_i32(var
, var
, offset
);
1034 tcg_gen_add_i32(var
, var
, offset
);
1035 tcg_temp_free_i32(offset
);
1039 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
1040 int extra
, TCGv_i32 var
)
1045 if (insn
& (1 << 22)) {
1047 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
1048 if (!(insn
& (1 << 23)))
1052 tcg_gen_addi_i32(var
, var
, val
);
1056 tcg_gen_addi_i32(var
, var
, extra
);
1058 offset
= load_reg(s
, rm
);
1059 if (!(insn
& (1 << 23)))
1060 tcg_gen_sub_i32(var
, var
, offset
);
1062 tcg_gen_add_i32(var
, var
, offset
);
1063 tcg_temp_free_i32(offset
);
1067 static TCGv_ptr
get_fpstatus_ptr(int neon
)
1069 TCGv_ptr statusptr
= tcg_temp_new_ptr();
1072 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
1074 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
1076 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
1080 #define VFP_OP2(name) \
1081 static inline void gen_vfp_##name(int dp) \
1083 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1085 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1087 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1089 tcg_temp_free_ptr(fpst); \
1099 static inline void gen_vfp_F1_mul(int dp
)
1101 /* Like gen_vfp_mul() but put result in F1 */
1102 TCGv_ptr fpst
= get_fpstatus_ptr(0);
1104 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
1106 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
1108 tcg_temp_free_ptr(fpst
);
1111 static inline void gen_vfp_F1_neg(int dp
)
1113 /* Like gen_vfp_neg() but put result in F1 */
1115 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
1117 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
1121 static inline void gen_vfp_abs(int dp
)
1124 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
1126 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
1129 static inline void gen_vfp_neg(int dp
)
1132 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1134 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1137 static inline void gen_vfp_sqrt(int dp
)
1140 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
1142 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1145 static inline void gen_vfp_cmp(int dp
)
1148 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1150 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1153 static inline void gen_vfp_cmpe(int dp
)
1156 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1158 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1161 static inline void gen_vfp_F1_ld0(int dp
)
1164 tcg_gen_movi_i64(cpu_F1d
, 0);
1166 tcg_gen_movi_i32(cpu_F1s
, 0);
1169 #define VFP_GEN_ITOF(name) \
1170 static inline void gen_vfp_##name(int dp, int neon) \
1172 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1174 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1176 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1178 tcg_temp_free_ptr(statusptr); \
1185 #define VFP_GEN_FTOI(name) \
1186 static inline void gen_vfp_##name(int dp, int neon) \
1188 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1190 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1192 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1194 tcg_temp_free_ptr(statusptr); \
1203 #define VFP_GEN_FIX(name, round) \
1204 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1206 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1207 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1209 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1212 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1215 tcg_temp_free_i32(tmp_shift); \
1216 tcg_temp_free_ptr(statusptr); \
1218 VFP_GEN_FIX(tosh
, _round_to_zero
)
1219 VFP_GEN_FIX(tosl
, _round_to_zero
)
1220 VFP_GEN_FIX(touh
, _round_to_zero
)
1221 VFP_GEN_FIX(toul
, _round_to_zero
)
1228 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1231 gen_aa32_ld64(cpu_F0d
, addr
, get_mem_index(s
));
1233 gen_aa32_ld32u(cpu_F0s
, addr
, get_mem_index(s
));
1237 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1240 gen_aa32_st64(cpu_F0d
, addr
, get_mem_index(s
));
1242 gen_aa32_st32(cpu_F0s
, addr
, get_mem_index(s
));
1247 vfp_reg_offset (int dp
, int reg
)
1250 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1252 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1253 + offsetof(CPU_DoubleU
, l
.upper
);
1255 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1256 + offsetof(CPU_DoubleU
, l
.lower
);
1260 /* Return the offset of a 32-bit piece of a NEON register.
1261 zero is the least significant end of the register. */
1263 neon_reg_offset (int reg
, int n
)
1267 return vfp_reg_offset(0, sreg
);
1270 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1272 TCGv_i32 tmp
= tcg_temp_new_i32();
1273 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1277 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1279 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1280 tcg_temp_free_i32(var
);
1283 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1285 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1288 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1290 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1293 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1294 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1295 #define tcg_gen_st_f32 tcg_gen_st_i32
1296 #define tcg_gen_st_f64 tcg_gen_st_i64
1298 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1301 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1303 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1306 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1309 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1311 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1314 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1317 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1319 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1322 #define ARM_CP_RW_BIT (1 << 20)
1324 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1326 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1329 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1331 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1334 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1336 TCGv_i32 var
= tcg_temp_new_i32();
1337 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1341 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1343 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1344 tcg_temp_free_i32(var
);
1347 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1349 iwmmxt_store_reg(cpu_M0
, rn
);
1352 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1354 iwmmxt_load_reg(cpu_M0
, rn
);
1357 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1359 iwmmxt_load_reg(cpu_V1
, rn
);
1360 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1363 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1365 iwmmxt_load_reg(cpu_V1
, rn
);
1366 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1369 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1371 iwmmxt_load_reg(cpu_V1
, rn
);
1372 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1375 #define IWMMXT_OP(name) \
1376 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1378 iwmmxt_load_reg(cpu_V1, rn); \
1379 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1382 #define IWMMXT_OP_ENV(name) \
1383 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1385 iwmmxt_load_reg(cpu_V1, rn); \
1386 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1389 #define IWMMXT_OP_ENV_SIZE(name) \
1390 IWMMXT_OP_ENV(name##b) \
1391 IWMMXT_OP_ENV(name##w) \
1392 IWMMXT_OP_ENV(name##l)
1394 #define IWMMXT_OP_ENV1(name) \
1395 static inline void gen_op_iwmmxt_##name##_M0(void) \
1397 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1411 IWMMXT_OP_ENV_SIZE(unpackl
)
1412 IWMMXT_OP_ENV_SIZE(unpackh
)
1414 IWMMXT_OP_ENV1(unpacklub
)
1415 IWMMXT_OP_ENV1(unpackluw
)
1416 IWMMXT_OP_ENV1(unpacklul
)
1417 IWMMXT_OP_ENV1(unpackhub
)
1418 IWMMXT_OP_ENV1(unpackhuw
)
1419 IWMMXT_OP_ENV1(unpackhul
)
1420 IWMMXT_OP_ENV1(unpacklsb
)
1421 IWMMXT_OP_ENV1(unpacklsw
)
1422 IWMMXT_OP_ENV1(unpacklsl
)
1423 IWMMXT_OP_ENV1(unpackhsb
)
1424 IWMMXT_OP_ENV1(unpackhsw
)
1425 IWMMXT_OP_ENV1(unpackhsl
)
1427 IWMMXT_OP_ENV_SIZE(cmpeq
)
1428 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1429 IWMMXT_OP_ENV_SIZE(cmpgts
)
1431 IWMMXT_OP_ENV_SIZE(mins
)
1432 IWMMXT_OP_ENV_SIZE(minu
)
1433 IWMMXT_OP_ENV_SIZE(maxs
)
1434 IWMMXT_OP_ENV_SIZE(maxu
)
1436 IWMMXT_OP_ENV_SIZE(subn
)
1437 IWMMXT_OP_ENV_SIZE(addn
)
1438 IWMMXT_OP_ENV_SIZE(subu
)
1439 IWMMXT_OP_ENV_SIZE(addu
)
1440 IWMMXT_OP_ENV_SIZE(subs
)
1441 IWMMXT_OP_ENV_SIZE(adds
)
1443 IWMMXT_OP_ENV(avgb0
)
1444 IWMMXT_OP_ENV(avgb1
)
1445 IWMMXT_OP_ENV(avgw0
)
1446 IWMMXT_OP_ENV(avgw1
)
1448 IWMMXT_OP_ENV(packuw
)
1449 IWMMXT_OP_ENV(packul
)
1450 IWMMXT_OP_ENV(packuq
)
1451 IWMMXT_OP_ENV(packsw
)
1452 IWMMXT_OP_ENV(packsl
)
1453 IWMMXT_OP_ENV(packsq
)
1455 static void gen_op_iwmmxt_set_mup(void)
1458 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1459 tcg_gen_ori_i32(tmp
, tmp
, 2);
1460 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1463 static void gen_op_iwmmxt_set_cup(void)
1466 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1467 tcg_gen_ori_i32(tmp
, tmp
, 1);
1468 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1471 static void gen_op_iwmmxt_setpsr_nz(void)
1473 TCGv_i32 tmp
= tcg_temp_new_i32();
1474 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1475 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1478 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1480 iwmmxt_load_reg(cpu_V1
, rn
);
1481 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1482 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1485 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1492 rd
= (insn
>> 16) & 0xf;
1493 tmp
= load_reg(s
, rd
);
1495 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1496 if (insn
& (1 << 24)) {
1498 if (insn
& (1 << 23))
1499 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1501 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1502 tcg_gen_mov_i32(dest
, tmp
);
1503 if (insn
& (1 << 21))
1504 store_reg(s
, rd
, tmp
);
1506 tcg_temp_free_i32(tmp
);
1507 } else if (insn
& (1 << 21)) {
1509 tcg_gen_mov_i32(dest
, tmp
);
1510 if (insn
& (1 << 23))
1511 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1513 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1514 store_reg(s
, rd
, tmp
);
1515 } else if (!(insn
& (1 << 23)))
1520 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1522 int rd
= (insn
>> 0) & 0xf;
1525 if (insn
& (1 << 8)) {
1526 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1529 tmp
= iwmmxt_load_creg(rd
);
1532 tmp
= tcg_temp_new_i32();
1533 iwmmxt_load_reg(cpu_V0
, rd
);
1534 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1536 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1537 tcg_gen_mov_i32(dest
, tmp
);
1538 tcg_temp_free_i32(tmp
);
1542 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1543 (ie. an undefined instruction). */
1544 static int disas_iwmmxt_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
1547 int rdhi
, rdlo
, rd0
, rd1
, i
;
1549 TCGv_i32 tmp
, tmp2
, tmp3
;
1551 if ((insn
& 0x0e000e00) == 0x0c000000) {
1552 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1554 rdlo
= (insn
>> 12) & 0xf;
1555 rdhi
= (insn
>> 16) & 0xf;
1556 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1557 iwmmxt_load_reg(cpu_V0
, wrd
);
1558 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1559 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1560 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1561 } else { /* TMCRR */
1562 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1563 iwmmxt_store_reg(cpu_V0
, wrd
);
1564 gen_op_iwmmxt_set_mup();
1569 wrd
= (insn
>> 12) & 0xf;
1570 addr
= tcg_temp_new_i32();
1571 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1572 tcg_temp_free_i32(addr
);
1575 if (insn
& ARM_CP_RW_BIT
) {
1576 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1577 tmp
= tcg_temp_new_i32();
1578 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
1579 iwmmxt_store_creg(wrd
, tmp
);
1582 if (insn
& (1 << 8)) {
1583 if (insn
& (1 << 22)) { /* WLDRD */
1584 gen_aa32_ld64(cpu_M0
, addr
, get_mem_index(s
));
1586 } else { /* WLDRW wRd */
1587 tmp
= tcg_temp_new_i32();
1588 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
1591 tmp
= tcg_temp_new_i32();
1592 if (insn
& (1 << 22)) { /* WLDRH */
1593 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
1594 } else { /* WLDRB */
1595 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
1599 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1600 tcg_temp_free_i32(tmp
);
1602 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1605 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1606 tmp
= iwmmxt_load_creg(wrd
);
1607 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
1609 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1610 tmp
= tcg_temp_new_i32();
1611 if (insn
& (1 << 8)) {
1612 if (insn
& (1 << 22)) { /* WSTRD */
1613 gen_aa32_st64(cpu_M0
, addr
, get_mem_index(s
));
1614 } else { /* WSTRW wRd */
1615 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1616 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
1619 if (insn
& (1 << 22)) { /* WSTRH */
1620 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1621 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
1622 } else { /* WSTRB */
1623 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1624 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
1628 tcg_temp_free_i32(tmp
);
1630 tcg_temp_free_i32(addr
);
1634 if ((insn
& 0x0f000000) != 0x0e000000)
1637 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1638 case 0x000: /* WOR */
1639 wrd
= (insn
>> 12) & 0xf;
1640 rd0
= (insn
>> 0) & 0xf;
1641 rd1
= (insn
>> 16) & 0xf;
1642 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1643 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1644 gen_op_iwmmxt_setpsr_nz();
1645 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1646 gen_op_iwmmxt_set_mup();
1647 gen_op_iwmmxt_set_cup();
1649 case 0x011: /* TMCR */
1652 rd
= (insn
>> 12) & 0xf;
1653 wrd
= (insn
>> 16) & 0xf;
1655 case ARM_IWMMXT_wCID
:
1656 case ARM_IWMMXT_wCASF
:
1658 case ARM_IWMMXT_wCon
:
1659 gen_op_iwmmxt_set_cup();
1661 case ARM_IWMMXT_wCSSF
:
1662 tmp
= iwmmxt_load_creg(wrd
);
1663 tmp2
= load_reg(s
, rd
);
1664 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1665 tcg_temp_free_i32(tmp2
);
1666 iwmmxt_store_creg(wrd
, tmp
);
1668 case ARM_IWMMXT_wCGR0
:
1669 case ARM_IWMMXT_wCGR1
:
1670 case ARM_IWMMXT_wCGR2
:
1671 case ARM_IWMMXT_wCGR3
:
1672 gen_op_iwmmxt_set_cup();
1673 tmp
= load_reg(s
, rd
);
1674 iwmmxt_store_creg(wrd
, tmp
);
1680 case 0x100: /* WXOR */
1681 wrd
= (insn
>> 12) & 0xf;
1682 rd0
= (insn
>> 0) & 0xf;
1683 rd1
= (insn
>> 16) & 0xf;
1684 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1685 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1686 gen_op_iwmmxt_setpsr_nz();
1687 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1688 gen_op_iwmmxt_set_mup();
1689 gen_op_iwmmxt_set_cup();
1691 case 0x111: /* TMRC */
1694 rd
= (insn
>> 12) & 0xf;
1695 wrd
= (insn
>> 16) & 0xf;
1696 tmp
= iwmmxt_load_creg(wrd
);
1697 store_reg(s
, rd
, tmp
);
1699 case 0x300: /* WANDN */
1700 wrd
= (insn
>> 12) & 0xf;
1701 rd0
= (insn
>> 0) & 0xf;
1702 rd1
= (insn
>> 16) & 0xf;
1703 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1704 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1705 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1706 gen_op_iwmmxt_setpsr_nz();
1707 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1708 gen_op_iwmmxt_set_mup();
1709 gen_op_iwmmxt_set_cup();
1711 case 0x200: /* WAND */
1712 wrd
= (insn
>> 12) & 0xf;
1713 rd0
= (insn
>> 0) & 0xf;
1714 rd1
= (insn
>> 16) & 0xf;
1715 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1716 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1717 gen_op_iwmmxt_setpsr_nz();
1718 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1719 gen_op_iwmmxt_set_mup();
1720 gen_op_iwmmxt_set_cup();
1722 case 0x810: case 0xa10: /* WMADD */
1723 wrd
= (insn
>> 12) & 0xf;
1724 rd0
= (insn
>> 0) & 0xf;
1725 rd1
= (insn
>> 16) & 0xf;
1726 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1727 if (insn
& (1 << 21))
1728 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1730 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1731 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1732 gen_op_iwmmxt_set_mup();
1734 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1735 wrd
= (insn
>> 12) & 0xf;
1736 rd0
= (insn
>> 16) & 0xf;
1737 rd1
= (insn
>> 0) & 0xf;
1738 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1739 switch ((insn
>> 22) & 3) {
1741 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1744 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1747 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1752 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1753 gen_op_iwmmxt_set_mup();
1754 gen_op_iwmmxt_set_cup();
1756 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1757 wrd
= (insn
>> 12) & 0xf;
1758 rd0
= (insn
>> 16) & 0xf;
1759 rd1
= (insn
>> 0) & 0xf;
1760 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1761 switch ((insn
>> 22) & 3) {
1763 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1766 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1769 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1774 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1775 gen_op_iwmmxt_set_mup();
1776 gen_op_iwmmxt_set_cup();
1778 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1779 wrd
= (insn
>> 12) & 0xf;
1780 rd0
= (insn
>> 16) & 0xf;
1781 rd1
= (insn
>> 0) & 0xf;
1782 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1783 if (insn
& (1 << 22))
1784 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1786 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1787 if (!(insn
& (1 << 20)))
1788 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1789 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1790 gen_op_iwmmxt_set_mup();
1792 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1793 wrd
= (insn
>> 12) & 0xf;
1794 rd0
= (insn
>> 16) & 0xf;
1795 rd1
= (insn
>> 0) & 0xf;
1796 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1797 if (insn
& (1 << 21)) {
1798 if (insn
& (1 << 20))
1799 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1801 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1803 if (insn
& (1 << 20))
1804 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1806 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1808 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1809 gen_op_iwmmxt_set_mup();
1811 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1812 wrd
= (insn
>> 12) & 0xf;
1813 rd0
= (insn
>> 16) & 0xf;
1814 rd1
= (insn
>> 0) & 0xf;
1815 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1816 if (insn
& (1 << 21))
1817 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1819 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1820 if (!(insn
& (1 << 20))) {
1821 iwmmxt_load_reg(cpu_V1
, wrd
);
1822 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1824 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1825 gen_op_iwmmxt_set_mup();
1827 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1828 wrd
= (insn
>> 12) & 0xf;
1829 rd0
= (insn
>> 16) & 0xf;
1830 rd1
= (insn
>> 0) & 0xf;
1831 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1832 switch ((insn
>> 22) & 3) {
1834 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1837 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1840 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1845 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1846 gen_op_iwmmxt_set_mup();
1847 gen_op_iwmmxt_set_cup();
1849 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1850 wrd
= (insn
>> 12) & 0xf;
1851 rd0
= (insn
>> 16) & 0xf;
1852 rd1
= (insn
>> 0) & 0xf;
1853 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1854 if (insn
& (1 << 22)) {
1855 if (insn
& (1 << 20))
1856 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1858 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1860 if (insn
& (1 << 20))
1861 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1863 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1865 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1866 gen_op_iwmmxt_set_mup();
1867 gen_op_iwmmxt_set_cup();
1869 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1870 wrd
= (insn
>> 12) & 0xf;
1871 rd0
= (insn
>> 16) & 0xf;
1872 rd1
= (insn
>> 0) & 0xf;
1873 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1874 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1875 tcg_gen_andi_i32(tmp
, tmp
, 7);
1876 iwmmxt_load_reg(cpu_V1
, rd1
);
1877 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1878 tcg_temp_free_i32(tmp
);
1879 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1880 gen_op_iwmmxt_set_mup();
1882 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1883 if (((insn
>> 6) & 3) == 3)
1885 rd
= (insn
>> 12) & 0xf;
1886 wrd
= (insn
>> 16) & 0xf;
1887 tmp
= load_reg(s
, rd
);
1888 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1889 switch ((insn
>> 6) & 3) {
1891 tmp2
= tcg_const_i32(0xff);
1892 tmp3
= tcg_const_i32((insn
& 7) << 3);
1895 tmp2
= tcg_const_i32(0xffff);
1896 tmp3
= tcg_const_i32((insn
& 3) << 4);
1899 tmp2
= tcg_const_i32(0xffffffff);
1900 tmp3
= tcg_const_i32((insn
& 1) << 5);
1903 TCGV_UNUSED_I32(tmp2
);
1904 TCGV_UNUSED_I32(tmp3
);
1906 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1907 tcg_temp_free_i32(tmp3
);
1908 tcg_temp_free_i32(tmp2
);
1909 tcg_temp_free_i32(tmp
);
1910 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1911 gen_op_iwmmxt_set_mup();
1913 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1914 rd
= (insn
>> 12) & 0xf;
1915 wrd
= (insn
>> 16) & 0xf;
1916 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1918 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1919 tmp
= tcg_temp_new_i32();
1920 switch ((insn
>> 22) & 3) {
1922 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1923 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1925 tcg_gen_ext8s_i32(tmp
, tmp
);
1927 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1931 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1932 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1934 tcg_gen_ext16s_i32(tmp
, tmp
);
1936 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1940 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1941 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1944 store_reg(s
, rd
, tmp
);
1946 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1947 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1949 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1950 switch ((insn
>> 22) & 3) {
1952 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1955 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1958 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1961 tcg_gen_shli_i32(tmp
, tmp
, 28);
1963 tcg_temp_free_i32(tmp
);
1965 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1966 if (((insn
>> 6) & 3) == 3)
1968 rd
= (insn
>> 12) & 0xf;
1969 wrd
= (insn
>> 16) & 0xf;
1970 tmp
= load_reg(s
, rd
);
1971 switch ((insn
>> 6) & 3) {
1973 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1976 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1979 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1982 tcg_temp_free_i32(tmp
);
1983 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1984 gen_op_iwmmxt_set_mup();
1986 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1987 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1989 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1990 tmp2
= tcg_temp_new_i32();
1991 tcg_gen_mov_i32(tmp2
, tmp
);
1992 switch ((insn
>> 22) & 3) {
1994 for (i
= 0; i
< 7; i
++) {
1995 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1996 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2000 for (i
= 0; i
< 3; i
++) {
2001 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2002 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2006 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2007 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2011 tcg_temp_free_i32(tmp2
);
2012 tcg_temp_free_i32(tmp
);
2014 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2015 wrd
= (insn
>> 12) & 0xf;
2016 rd0
= (insn
>> 16) & 0xf;
2017 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2018 switch ((insn
>> 22) & 3) {
2020 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
2023 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
2026 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
2031 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2032 gen_op_iwmmxt_set_mup();
2034 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2035 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2037 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2038 tmp2
= tcg_temp_new_i32();
2039 tcg_gen_mov_i32(tmp2
, tmp
);
2040 switch ((insn
>> 22) & 3) {
2042 for (i
= 0; i
< 7; i
++) {
2043 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2044 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2048 for (i
= 0; i
< 3; i
++) {
2049 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2050 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2054 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2055 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2059 tcg_temp_free_i32(tmp2
);
2060 tcg_temp_free_i32(tmp
);
2062 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2063 rd
= (insn
>> 12) & 0xf;
2064 rd0
= (insn
>> 16) & 0xf;
2065 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
2067 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2068 tmp
= tcg_temp_new_i32();
2069 switch ((insn
>> 22) & 3) {
2071 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
2074 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
2077 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
2080 store_reg(s
, rd
, tmp
);
2082 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2083 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2084 wrd
= (insn
>> 12) & 0xf;
2085 rd0
= (insn
>> 16) & 0xf;
2086 rd1
= (insn
>> 0) & 0xf;
2087 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2088 switch ((insn
>> 22) & 3) {
2090 if (insn
& (1 << 21))
2091 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2093 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2096 if (insn
& (1 << 21))
2097 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2099 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2102 if (insn
& (1 << 21))
2103 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2105 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2110 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2111 gen_op_iwmmxt_set_mup();
2112 gen_op_iwmmxt_set_cup();
2114 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2115 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2116 wrd
= (insn
>> 12) & 0xf;
2117 rd0
= (insn
>> 16) & 0xf;
2118 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2119 switch ((insn
>> 22) & 3) {
2121 if (insn
& (1 << 21))
2122 gen_op_iwmmxt_unpacklsb_M0();
2124 gen_op_iwmmxt_unpacklub_M0();
2127 if (insn
& (1 << 21))
2128 gen_op_iwmmxt_unpacklsw_M0();
2130 gen_op_iwmmxt_unpackluw_M0();
2133 if (insn
& (1 << 21))
2134 gen_op_iwmmxt_unpacklsl_M0();
2136 gen_op_iwmmxt_unpacklul_M0();
2141 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2142 gen_op_iwmmxt_set_mup();
2143 gen_op_iwmmxt_set_cup();
2145 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2146 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2147 wrd
= (insn
>> 12) & 0xf;
2148 rd0
= (insn
>> 16) & 0xf;
2149 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2150 switch ((insn
>> 22) & 3) {
2152 if (insn
& (1 << 21))
2153 gen_op_iwmmxt_unpackhsb_M0();
2155 gen_op_iwmmxt_unpackhub_M0();
2158 if (insn
& (1 << 21))
2159 gen_op_iwmmxt_unpackhsw_M0();
2161 gen_op_iwmmxt_unpackhuw_M0();
2164 if (insn
& (1 << 21))
2165 gen_op_iwmmxt_unpackhsl_M0();
2167 gen_op_iwmmxt_unpackhul_M0();
2172 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2173 gen_op_iwmmxt_set_mup();
2174 gen_op_iwmmxt_set_cup();
2176 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2177 case 0x214: case 0x614: case 0xa14: case 0xe14:
2178 if (((insn
>> 22) & 3) == 0)
2180 wrd
= (insn
>> 12) & 0xf;
2181 rd0
= (insn
>> 16) & 0xf;
2182 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2183 tmp
= tcg_temp_new_i32();
2184 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2185 tcg_temp_free_i32(tmp
);
2188 switch ((insn
>> 22) & 3) {
2190 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2193 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2196 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2199 tcg_temp_free_i32(tmp
);
2200 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2204 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2205 case 0x014: case 0x414: case 0x814: case 0xc14:
2206 if (((insn
>> 22) & 3) == 0)
2208 wrd
= (insn
>> 12) & 0xf;
2209 rd0
= (insn
>> 16) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2211 tmp
= tcg_temp_new_i32();
2212 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2213 tcg_temp_free_i32(tmp
);
2216 switch ((insn
>> 22) & 3) {
2218 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2221 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2224 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2227 tcg_temp_free_i32(tmp
);
2228 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2229 gen_op_iwmmxt_set_mup();
2230 gen_op_iwmmxt_set_cup();
2232 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2233 case 0x114: case 0x514: case 0x914: case 0xd14:
2234 if (((insn
>> 22) & 3) == 0)
2236 wrd
= (insn
>> 12) & 0xf;
2237 rd0
= (insn
>> 16) & 0xf;
2238 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2239 tmp
= tcg_temp_new_i32();
2240 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2241 tcg_temp_free_i32(tmp
);
2244 switch ((insn
>> 22) & 3) {
2246 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2249 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2252 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2255 tcg_temp_free_i32(tmp
);
2256 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2257 gen_op_iwmmxt_set_mup();
2258 gen_op_iwmmxt_set_cup();
2260 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2261 case 0x314: case 0x714: case 0xb14: case 0xf14:
2262 if (((insn
>> 22) & 3) == 0)
2264 wrd
= (insn
>> 12) & 0xf;
2265 rd0
= (insn
>> 16) & 0xf;
2266 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2267 tmp
= tcg_temp_new_i32();
2268 switch ((insn
>> 22) & 3) {
2270 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2271 tcg_temp_free_i32(tmp
);
2274 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2277 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2278 tcg_temp_free_i32(tmp
);
2281 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2284 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2285 tcg_temp_free_i32(tmp
);
2288 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2291 tcg_temp_free_i32(tmp
);
2292 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2293 gen_op_iwmmxt_set_mup();
2294 gen_op_iwmmxt_set_cup();
2296 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2297 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2298 wrd
= (insn
>> 12) & 0xf;
2299 rd0
= (insn
>> 16) & 0xf;
2300 rd1
= (insn
>> 0) & 0xf;
2301 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2302 switch ((insn
>> 22) & 3) {
2304 if (insn
& (1 << 21))
2305 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2307 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2310 if (insn
& (1 << 21))
2311 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2313 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2316 if (insn
& (1 << 21))
2317 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2319 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2324 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2325 gen_op_iwmmxt_set_mup();
2327 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2328 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2329 wrd
= (insn
>> 12) & 0xf;
2330 rd0
= (insn
>> 16) & 0xf;
2331 rd1
= (insn
>> 0) & 0xf;
2332 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2333 switch ((insn
>> 22) & 3) {
2335 if (insn
& (1 << 21))
2336 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2338 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2341 if (insn
& (1 << 21))
2342 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2344 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2347 if (insn
& (1 << 21))
2348 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2350 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2355 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2356 gen_op_iwmmxt_set_mup();
2358 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2359 case 0x402: case 0x502: case 0x602: case 0x702:
2360 wrd
= (insn
>> 12) & 0xf;
2361 rd0
= (insn
>> 16) & 0xf;
2362 rd1
= (insn
>> 0) & 0xf;
2363 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2364 tmp
= tcg_const_i32((insn
>> 20) & 3);
2365 iwmmxt_load_reg(cpu_V1
, rd1
);
2366 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2367 tcg_temp_free_i32(tmp
);
2368 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2369 gen_op_iwmmxt_set_mup();
2371 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2372 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2373 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2374 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2375 wrd
= (insn
>> 12) & 0xf;
2376 rd0
= (insn
>> 16) & 0xf;
2377 rd1
= (insn
>> 0) & 0xf;
2378 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2379 switch ((insn
>> 20) & 0xf) {
2381 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2384 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2387 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2390 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2393 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2396 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2399 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2402 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2405 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2410 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2411 gen_op_iwmmxt_set_mup();
2412 gen_op_iwmmxt_set_cup();
2414 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2415 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2416 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2417 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2418 wrd
= (insn
>> 12) & 0xf;
2419 rd0
= (insn
>> 16) & 0xf;
2420 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2421 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2422 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2423 tcg_temp_free_i32(tmp
);
2424 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2425 gen_op_iwmmxt_set_mup();
2426 gen_op_iwmmxt_set_cup();
2428 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2429 case 0x418: case 0x518: case 0x618: case 0x718:
2430 case 0x818: case 0x918: case 0xa18: case 0xb18:
2431 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2432 wrd
= (insn
>> 12) & 0xf;
2433 rd0
= (insn
>> 16) & 0xf;
2434 rd1
= (insn
>> 0) & 0xf;
2435 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2436 switch ((insn
>> 20) & 0xf) {
2438 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2441 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2444 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2447 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2450 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2453 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2456 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2459 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2462 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2467 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2468 gen_op_iwmmxt_set_mup();
2469 gen_op_iwmmxt_set_cup();
2471 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2472 case 0x408: case 0x508: case 0x608: case 0x708:
2473 case 0x808: case 0x908: case 0xa08: case 0xb08:
2474 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2475 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2477 wrd
= (insn
>> 12) & 0xf;
2478 rd0
= (insn
>> 16) & 0xf;
2479 rd1
= (insn
>> 0) & 0xf;
2480 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2481 switch ((insn
>> 22) & 3) {
2483 if (insn
& (1 << 21))
2484 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2486 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2489 if (insn
& (1 << 21))
2490 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2492 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2495 if (insn
& (1 << 21))
2496 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2498 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2501 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2502 gen_op_iwmmxt_set_mup();
2503 gen_op_iwmmxt_set_cup();
2505 case 0x201: case 0x203: case 0x205: case 0x207:
2506 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2507 case 0x211: case 0x213: case 0x215: case 0x217:
2508 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2509 wrd
= (insn
>> 5) & 0xf;
2510 rd0
= (insn
>> 12) & 0xf;
2511 rd1
= (insn
>> 0) & 0xf;
2512 if (rd0
== 0xf || rd1
== 0xf)
2514 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2515 tmp
= load_reg(s
, rd0
);
2516 tmp2
= load_reg(s
, rd1
);
2517 switch ((insn
>> 16) & 0xf) {
2518 case 0x0: /* TMIA */
2519 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2521 case 0x8: /* TMIAPH */
2522 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2524 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2525 if (insn
& (1 << 16))
2526 tcg_gen_shri_i32(tmp
, tmp
, 16);
2527 if (insn
& (1 << 17))
2528 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2529 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2532 tcg_temp_free_i32(tmp2
);
2533 tcg_temp_free_i32(tmp
);
2536 tcg_temp_free_i32(tmp2
);
2537 tcg_temp_free_i32(tmp
);
2538 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2539 gen_op_iwmmxt_set_mup();
2548 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2549 (ie. an undefined instruction). */
2550 static int disas_dsp_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2552 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2555 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2556 /* Multiply with Internal Accumulate Format */
2557 rd0
= (insn
>> 12) & 0xf;
2559 acc
= (insn
>> 5) & 7;
2564 tmp
= load_reg(s
, rd0
);
2565 tmp2
= load_reg(s
, rd1
);
2566 switch ((insn
>> 16) & 0xf) {
2568 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2570 case 0x8: /* MIAPH */
2571 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2573 case 0xc: /* MIABB */
2574 case 0xd: /* MIABT */
2575 case 0xe: /* MIATB */
2576 case 0xf: /* MIATT */
2577 if (insn
& (1 << 16))
2578 tcg_gen_shri_i32(tmp
, tmp
, 16);
2579 if (insn
& (1 << 17))
2580 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2581 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2586 tcg_temp_free_i32(tmp2
);
2587 tcg_temp_free_i32(tmp
);
2589 gen_op_iwmmxt_movq_wRn_M0(acc
);
2593 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2594 /* Internal Accumulator Access Format */
2595 rdhi
= (insn
>> 16) & 0xf;
2596 rdlo
= (insn
>> 12) & 0xf;
2602 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2603 iwmmxt_load_reg(cpu_V0
, acc
);
2604 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2605 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2606 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2607 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2609 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2610 iwmmxt_store_reg(cpu_V0
, acc
);
2618 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2619 #define VFP_SREG(insn, bigbit, smallbit) \
2620 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2621 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2622 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2623 reg = (((insn) >> (bigbit)) & 0x0f) \
2624 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2626 if (insn & (1 << (smallbit))) \
2628 reg = ((insn) >> (bigbit)) & 0x0f; \
2631 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2632 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2633 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2634 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2635 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2636 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2638 /* Move between integer and VFP cores. */
2639 static TCGv_i32
gen_vfp_mrs(void)
2641 TCGv_i32 tmp
= tcg_temp_new_i32();
2642 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2646 static void gen_vfp_msr(TCGv_i32 tmp
)
2648 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2649 tcg_temp_free_i32(tmp
);
2652 static void gen_neon_dup_u8(TCGv_i32 var
, int shift
)
2654 TCGv_i32 tmp
= tcg_temp_new_i32();
2656 tcg_gen_shri_i32(var
, var
, shift
);
2657 tcg_gen_ext8u_i32(var
, var
);
2658 tcg_gen_shli_i32(tmp
, var
, 8);
2659 tcg_gen_or_i32(var
, var
, tmp
);
2660 tcg_gen_shli_i32(tmp
, var
, 16);
2661 tcg_gen_or_i32(var
, var
, tmp
);
2662 tcg_temp_free_i32(tmp
);
2665 static void gen_neon_dup_low16(TCGv_i32 var
)
2667 TCGv_i32 tmp
= tcg_temp_new_i32();
2668 tcg_gen_ext16u_i32(var
, var
);
2669 tcg_gen_shli_i32(tmp
, var
, 16);
2670 tcg_gen_or_i32(var
, var
, tmp
);
2671 tcg_temp_free_i32(tmp
);
2674 static void gen_neon_dup_high16(TCGv_i32 var
)
2676 TCGv_i32 tmp
= tcg_temp_new_i32();
2677 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2678 tcg_gen_shri_i32(tmp
, var
, 16);
2679 tcg_gen_or_i32(var
, var
, tmp
);
2680 tcg_temp_free_i32(tmp
);
2683 static TCGv_i32
gen_load_and_replicate(DisasContext
*s
, TCGv_i32 addr
, int size
)
2685 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2686 TCGv_i32 tmp
= tcg_temp_new_i32();
2689 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
2690 gen_neon_dup_u8(tmp
, 0);
2693 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
2694 gen_neon_dup_low16(tmp
);
2697 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
2699 default: /* Avoid compiler warnings. */
2705 static int handle_vsel(uint32_t insn
, uint32_t rd
, uint32_t rn
, uint32_t rm
,
2708 uint32_t cc
= extract32(insn
, 20, 2);
2711 TCGv_i64 frn
, frm
, dest
;
2712 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
2714 zero
= tcg_const_i64(0);
2716 frn
= tcg_temp_new_i64();
2717 frm
= tcg_temp_new_i64();
2718 dest
= tcg_temp_new_i64();
2720 zf
= tcg_temp_new_i64();
2721 nf
= tcg_temp_new_i64();
2722 vf
= tcg_temp_new_i64();
2724 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
2725 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
2726 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
2728 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2729 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2732 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
2736 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
2739 case 2: /* ge: N == V -> N ^ V == 0 */
2740 tmp
= tcg_temp_new_i64();
2741 tcg_gen_xor_i64(tmp
, vf
, nf
);
2742 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2744 tcg_temp_free_i64(tmp
);
2746 case 3: /* gt: !Z && N == V */
2747 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
2749 tmp
= tcg_temp_new_i64();
2750 tcg_gen_xor_i64(tmp
, vf
, nf
);
2751 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2753 tcg_temp_free_i64(tmp
);
2756 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2757 tcg_temp_free_i64(frn
);
2758 tcg_temp_free_i64(frm
);
2759 tcg_temp_free_i64(dest
);
2761 tcg_temp_free_i64(zf
);
2762 tcg_temp_free_i64(nf
);
2763 tcg_temp_free_i64(vf
);
2765 tcg_temp_free_i64(zero
);
2767 TCGv_i32 frn
, frm
, dest
;
2770 zero
= tcg_const_i32(0);
2772 frn
= tcg_temp_new_i32();
2773 frm
= tcg_temp_new_i32();
2774 dest
= tcg_temp_new_i32();
2775 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2776 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2779 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
2783 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
2786 case 2: /* ge: N == V -> N ^ V == 0 */
2787 tmp
= tcg_temp_new_i32();
2788 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2789 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2791 tcg_temp_free_i32(tmp
);
2793 case 3: /* gt: !Z && N == V */
2794 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
2796 tmp
= tcg_temp_new_i32();
2797 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2798 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2800 tcg_temp_free_i32(tmp
);
2803 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2804 tcg_temp_free_i32(frn
);
2805 tcg_temp_free_i32(frm
);
2806 tcg_temp_free_i32(dest
);
2808 tcg_temp_free_i32(zero
);
2814 static int handle_vminmaxnm(uint32_t insn
, uint32_t rd
, uint32_t rn
,
2815 uint32_t rm
, uint32_t dp
)
2817 uint32_t vmin
= extract32(insn
, 6, 1);
2818 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2821 TCGv_i64 frn
, frm
, dest
;
2823 frn
= tcg_temp_new_i64();
2824 frm
= tcg_temp_new_i64();
2825 dest
= tcg_temp_new_i64();
2827 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2828 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2830 gen_helper_vfp_minnumd(dest
, frn
, frm
, fpst
);
2832 gen_helper_vfp_maxnumd(dest
, frn
, frm
, fpst
);
2834 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2835 tcg_temp_free_i64(frn
);
2836 tcg_temp_free_i64(frm
);
2837 tcg_temp_free_i64(dest
);
2839 TCGv_i32 frn
, frm
, dest
;
2841 frn
= tcg_temp_new_i32();
2842 frm
= tcg_temp_new_i32();
2843 dest
= tcg_temp_new_i32();
2845 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2846 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2848 gen_helper_vfp_minnums(dest
, frn
, frm
, fpst
);
2850 gen_helper_vfp_maxnums(dest
, frn
, frm
, fpst
);
2852 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2853 tcg_temp_free_i32(frn
);
2854 tcg_temp_free_i32(frm
);
2855 tcg_temp_free_i32(dest
);
2858 tcg_temp_free_ptr(fpst
);
2862 static int handle_vrint(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
2865 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2868 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
2869 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2874 tcg_op
= tcg_temp_new_i64();
2875 tcg_res
= tcg_temp_new_i64();
2876 tcg_gen_ld_f64(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
2877 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
2878 tcg_gen_st_f64(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
2879 tcg_temp_free_i64(tcg_op
);
2880 tcg_temp_free_i64(tcg_res
);
2884 tcg_op
= tcg_temp_new_i32();
2885 tcg_res
= tcg_temp_new_i32();
2886 tcg_gen_ld_f32(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
2887 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
2888 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
2889 tcg_temp_free_i32(tcg_op
);
2890 tcg_temp_free_i32(tcg_res
);
2893 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2894 tcg_temp_free_i32(tcg_rmode
);
2896 tcg_temp_free_ptr(fpst
);
2900 static int handle_vcvt(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
2903 bool is_signed
= extract32(insn
, 7, 1);
2904 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2905 TCGv_i32 tcg_rmode
, tcg_shift
;
2907 tcg_shift
= tcg_const_i32(0);
2909 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
2910 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2913 TCGv_i64 tcg_double
, tcg_res
;
2915 /* Rd is encoded as a single precision register even when the source
2916 * is double precision.
2918 rd
= ((rd
<< 1) & 0x1e) | ((rd
>> 4) & 0x1);
2919 tcg_double
= tcg_temp_new_i64();
2920 tcg_res
= tcg_temp_new_i64();
2921 tcg_tmp
= tcg_temp_new_i32();
2922 tcg_gen_ld_f64(tcg_double
, cpu_env
, vfp_reg_offset(1, rm
));
2924 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
2926 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
2928 tcg_gen_trunc_i64_i32(tcg_tmp
, tcg_res
);
2929 tcg_gen_st_f32(tcg_tmp
, cpu_env
, vfp_reg_offset(0, rd
));
2930 tcg_temp_free_i32(tcg_tmp
);
2931 tcg_temp_free_i64(tcg_res
);
2932 tcg_temp_free_i64(tcg_double
);
2934 TCGv_i32 tcg_single
, tcg_res
;
2935 tcg_single
= tcg_temp_new_i32();
2936 tcg_res
= tcg_temp_new_i32();
2937 tcg_gen_ld_f32(tcg_single
, cpu_env
, vfp_reg_offset(0, rm
));
2939 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
2941 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
2943 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(0, rd
));
2944 tcg_temp_free_i32(tcg_res
);
2945 tcg_temp_free_i32(tcg_single
);
2948 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2949 tcg_temp_free_i32(tcg_rmode
);
2951 tcg_temp_free_i32(tcg_shift
);
2953 tcg_temp_free_ptr(fpst
);
2958 /* Table for converting the most common AArch32 encoding of
2959 * rounding mode to arm_fprounding order (which matches the
2960 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2962 static const uint8_t fp_decode_rm
[] = {
2969 static int disas_vfp_v8_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2971 uint32_t rd
, rn
, rm
, dp
= extract32(insn
, 8, 1);
2973 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
2978 VFP_DREG_D(rd
, insn
);
2979 VFP_DREG_N(rn
, insn
);
2980 VFP_DREG_M(rm
, insn
);
2982 rd
= VFP_SREG_D(insn
);
2983 rn
= VFP_SREG_N(insn
);
2984 rm
= VFP_SREG_M(insn
);
2987 if ((insn
& 0x0f800e50) == 0x0e000a00) {
2988 return handle_vsel(insn
, rd
, rn
, rm
, dp
);
2989 } else if ((insn
& 0x0fb00e10) == 0x0e800a00) {
2990 return handle_vminmaxnm(insn
, rd
, rn
, rm
, dp
);
2991 } else if ((insn
& 0x0fbc0ed0) == 0x0eb80a40) {
2992 /* VRINTA, VRINTN, VRINTP, VRINTM */
2993 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
2994 return handle_vrint(insn
, rd
, rm
, dp
, rounding
);
2995 } else if ((insn
& 0x0fbc0e50) == 0x0ebc0a40) {
2996 /* VCVTA, VCVTN, VCVTP, VCVTM */
2997 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
2998 return handle_vcvt(insn
, rd
, rm
, dp
, rounding
);
3003 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3004 (ie. an undefined instruction). */
3005 static int disas_vfp_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
3007 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
3013 if (!arm_dc_feature(s
, ARM_FEATURE_VFP
)) {
3017 /* FIXME: this access check should not take precedence over UNDEF
3018 * for invalid encodings; we will generate incorrect syndrome information
3019 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3021 if (!s
->cpacr_fpen
) {
3022 gen_exception_insn(s
, 4, EXCP_UDEF
,
3023 syn_fp_access_trap(1, 0xe, s
->thumb
));
3027 if (!s
->vfp_enabled
) {
3028 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3029 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
3031 rn
= (insn
>> 16) & 0xf;
3032 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
&& rn
!= ARM_VFP_MVFR2
3033 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
) {
3038 if (extract32(insn
, 28, 4) == 0xf) {
3039 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3040 * only used in v8 and above.
3042 return disas_vfp_v8_insn(env
, s
, insn
);
3045 dp
= ((insn
& 0xf00) == 0xb00);
3046 switch ((insn
>> 24) & 0xf) {
3048 if (insn
& (1 << 4)) {
3049 /* single register transfer */
3050 rd
= (insn
>> 12) & 0xf;
3055 VFP_DREG_N(rn
, insn
);
3058 if (insn
& 0x00c00060
3059 && !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
3063 pass
= (insn
>> 21) & 1;
3064 if (insn
& (1 << 22)) {
3066 offset
= ((insn
>> 5) & 3) * 8;
3067 } else if (insn
& (1 << 5)) {
3069 offset
= (insn
& (1 << 6)) ? 16 : 0;
3074 if (insn
& ARM_CP_RW_BIT
) {
3076 tmp
= neon_load_reg(rn
, pass
);
3080 tcg_gen_shri_i32(tmp
, tmp
, offset
);
3081 if (insn
& (1 << 23))
3087 if (insn
& (1 << 23)) {
3089 tcg_gen_shri_i32(tmp
, tmp
, 16);
3095 tcg_gen_sari_i32(tmp
, tmp
, 16);
3104 store_reg(s
, rd
, tmp
);
3107 tmp
= load_reg(s
, rd
);
3108 if (insn
& (1 << 23)) {
3111 gen_neon_dup_u8(tmp
, 0);
3112 } else if (size
== 1) {
3113 gen_neon_dup_low16(tmp
);
3115 for (n
= 0; n
<= pass
* 2; n
++) {
3116 tmp2
= tcg_temp_new_i32();
3117 tcg_gen_mov_i32(tmp2
, tmp
);
3118 neon_store_reg(rn
, n
, tmp2
);
3120 neon_store_reg(rn
, n
, tmp
);
3125 tmp2
= neon_load_reg(rn
, pass
);
3126 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
3127 tcg_temp_free_i32(tmp2
);
3130 tmp2
= neon_load_reg(rn
, pass
);
3131 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
3132 tcg_temp_free_i32(tmp2
);
3137 neon_store_reg(rn
, pass
, tmp
);
3141 if ((insn
& 0x6f) != 0x00)
3143 rn
= VFP_SREG_N(insn
);
3144 if (insn
& ARM_CP_RW_BIT
) {
3146 if (insn
& (1 << 21)) {
3147 /* system register */
3152 /* VFP2 allows access to FSID from userspace.
3153 VFP3 restricts all id registers to privileged
3156 && arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3159 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3164 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3166 case ARM_VFP_FPINST
:
3167 case ARM_VFP_FPINST2
:
3168 /* Not present in VFP3. */
3170 || arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3173 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3177 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
3178 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
3180 tmp
= tcg_temp_new_i32();
3181 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
3185 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
3192 || !arm_dc_feature(s
, ARM_FEATURE_MVFR
)) {
3195 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3201 gen_mov_F0_vreg(0, rn
);
3202 tmp
= gen_vfp_mrs();
3205 /* Set the 4 flag bits in the CPSR. */
3207 tcg_temp_free_i32(tmp
);
3209 store_reg(s
, rd
, tmp
);
3213 if (insn
& (1 << 21)) {
3215 /* system register */
3220 /* Writes are ignored. */
3223 tmp
= load_reg(s
, rd
);
3224 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
3225 tcg_temp_free_i32(tmp
);
3231 /* TODO: VFP subarchitecture support.
3232 * For now, keep the EN bit only */
3233 tmp
= load_reg(s
, rd
);
3234 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
3235 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3238 case ARM_VFP_FPINST
:
3239 case ARM_VFP_FPINST2
:
3243 tmp
= load_reg(s
, rd
);
3244 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3250 tmp
= load_reg(s
, rd
);
3252 gen_mov_vreg_F0(0, rn
);
3257 /* data processing */
3258 /* The opcode is in bits 23, 21, 20 and 6. */
3259 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
3263 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
3265 /* rn is register number */
3266 VFP_DREG_N(rn
, insn
);
3269 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18) ||
3270 ((rn
& 0x1e) == 0x6))) {
3271 /* Integer or single/half precision destination. */
3272 rd
= VFP_SREG_D(insn
);
3274 VFP_DREG_D(rd
, insn
);
3277 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14) ||
3278 ((rn
& 0x1e) == 0x4))) {
3279 /* VCVT from int or half precision is always from S reg
3280 * regardless of dp bit. VCVT with immediate frac_bits
3281 * has same format as SREG_M.
3283 rm
= VFP_SREG_M(insn
);
3285 VFP_DREG_M(rm
, insn
);
3288 rn
= VFP_SREG_N(insn
);
3289 if (op
== 15 && rn
== 15) {
3290 /* Double precision destination. */
3291 VFP_DREG_D(rd
, insn
);
3293 rd
= VFP_SREG_D(insn
);
3295 /* NB that we implicitly rely on the encoding for the frac_bits
3296 * in VCVT of fixed to float being the same as that of an SREG_M
3298 rm
= VFP_SREG_M(insn
);
3301 veclen
= s
->vec_len
;
3302 if (op
== 15 && rn
> 3)
3305 /* Shut up compiler warnings. */
3316 /* Figure out what type of vector operation this is. */
3317 if ((rd
& bank_mask
) == 0) {
3322 delta_d
= (s
->vec_stride
>> 1) + 1;
3324 delta_d
= s
->vec_stride
+ 1;
3326 if ((rm
& bank_mask
) == 0) {
3327 /* mixed scalar/vector */
3336 /* Load the initial operands. */
3341 /* Integer source */
3342 gen_mov_F0_vreg(0, rm
);
3347 gen_mov_F0_vreg(dp
, rd
);
3348 gen_mov_F1_vreg(dp
, rm
);
3352 /* Compare with zero */
3353 gen_mov_F0_vreg(dp
, rd
);
3364 /* Source and destination the same. */
3365 gen_mov_F0_vreg(dp
, rd
);
3371 /* VCVTB, VCVTT: only present with the halfprec extension
3372 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3373 * (we choose to UNDEF)
3375 if ((dp
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) ||
3376 !arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
)) {
3379 if (!extract32(rn
, 1, 1)) {
3380 /* Half precision source. */
3381 gen_mov_F0_vreg(0, rm
);
3384 /* Otherwise fall through */
3386 /* One source operand. */
3387 gen_mov_F0_vreg(dp
, rm
);
3391 /* Two source operands. */
3392 gen_mov_F0_vreg(dp
, rn
);
3393 gen_mov_F1_vreg(dp
, rm
);
3397 /* Perform the calculation. */
3399 case 0: /* VMLA: fd + (fn * fm) */
3400 /* Note that order of inputs to the add matters for NaNs */
3402 gen_mov_F0_vreg(dp
, rd
);
3405 case 1: /* VMLS: fd + -(fn * fm) */
3408 gen_mov_F0_vreg(dp
, rd
);
3411 case 2: /* VNMLS: -fd + (fn * fm) */
3412 /* Note that it isn't valid to replace (-A + B) with (B - A)
3413 * or similar plausible looking simplifications
3414 * because this will give wrong results for NaNs.
3417 gen_mov_F0_vreg(dp
, rd
);
3421 case 3: /* VNMLA: -fd + -(fn * fm) */
3424 gen_mov_F0_vreg(dp
, rd
);
3428 case 4: /* mul: fn * fm */
3431 case 5: /* nmul: -(fn * fm) */
3435 case 6: /* add: fn + fm */
3438 case 7: /* sub: fn - fm */
3441 case 8: /* div: fn / fm */
3444 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3445 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3446 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3447 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3448 /* These are fused multiply-add, and must be done as one
3449 * floating point operation with no rounding between the
3450 * multiplication and addition steps.
3451 * NB that doing the negations here as separate steps is
3452 * correct : an input NaN should come out with its sign bit
3453 * flipped if it is a negated-input.
3455 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
)) {
3463 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
3465 frd
= tcg_temp_new_i64();
3466 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3469 gen_helper_vfp_negd(frd
, frd
);
3471 fpst
= get_fpstatus_ptr(0);
3472 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
3473 cpu_F1d
, frd
, fpst
);
3474 tcg_temp_free_ptr(fpst
);
3475 tcg_temp_free_i64(frd
);
3481 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
3483 frd
= tcg_temp_new_i32();
3484 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3486 gen_helper_vfp_negs(frd
, frd
);
3488 fpst
= get_fpstatus_ptr(0);
3489 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
3490 cpu_F1s
, frd
, fpst
);
3491 tcg_temp_free_ptr(fpst
);
3492 tcg_temp_free_i32(frd
);
3495 case 14: /* fconst */
3496 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3500 n
= (insn
<< 12) & 0x80000000;
3501 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3508 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3515 tcg_gen_movi_i32(cpu_F0s
, n
);
3518 case 15: /* extension space */
3532 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3533 tmp
= gen_vfp_mrs();
3534 tcg_gen_ext16u_i32(tmp
, tmp
);
3536 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3539 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3542 tcg_temp_free_i32(tmp
);
3544 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3545 tmp
= gen_vfp_mrs();
3546 tcg_gen_shri_i32(tmp
, tmp
, 16);
3548 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3551 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3554 tcg_temp_free_i32(tmp
);
3556 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3557 tmp
= tcg_temp_new_i32();
3559 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3562 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3565 gen_mov_F0_vreg(0, rd
);
3566 tmp2
= gen_vfp_mrs();
3567 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3568 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3569 tcg_temp_free_i32(tmp2
);
3572 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3573 tmp
= tcg_temp_new_i32();
3575 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3578 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3581 tcg_gen_shli_i32(tmp
, tmp
, 16);
3582 gen_mov_F0_vreg(0, rd
);
3583 tmp2
= gen_vfp_mrs();
3584 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3585 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3586 tcg_temp_free_i32(tmp2
);
3598 case 11: /* cmpez */
3602 case 12: /* vrintr */
3604 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3606 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
3608 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
3610 tcg_temp_free_ptr(fpst
);
3613 case 13: /* vrintz */
3615 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3617 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3618 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3620 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
3622 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
3624 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3625 tcg_temp_free_i32(tcg_rmode
);
3626 tcg_temp_free_ptr(fpst
);
3629 case 14: /* vrintx */
3631 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3633 gen_helper_rintd_exact(cpu_F0d
, cpu_F0d
, fpst
);
3635 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpst
);
3637 tcg_temp_free_ptr(fpst
);
3640 case 15: /* single<->double conversion */
3642 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3644 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3646 case 16: /* fuito */
3647 gen_vfp_uito(dp
, 0);
3649 case 17: /* fsito */
3650 gen_vfp_sito(dp
, 0);
3652 case 20: /* fshto */
3653 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3656 gen_vfp_shto(dp
, 16 - rm
, 0);
3658 case 21: /* fslto */
3659 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3662 gen_vfp_slto(dp
, 32 - rm
, 0);
3664 case 22: /* fuhto */
3665 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3668 gen_vfp_uhto(dp
, 16 - rm
, 0);
3670 case 23: /* fulto */
3671 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3674 gen_vfp_ulto(dp
, 32 - rm
, 0);
3676 case 24: /* ftoui */
3677 gen_vfp_toui(dp
, 0);
3679 case 25: /* ftouiz */
3680 gen_vfp_touiz(dp
, 0);
3682 case 26: /* ftosi */
3683 gen_vfp_tosi(dp
, 0);
3685 case 27: /* ftosiz */
3686 gen_vfp_tosiz(dp
, 0);
3688 case 28: /* ftosh */
3689 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3692 gen_vfp_tosh(dp
, 16 - rm
, 0);
3694 case 29: /* ftosl */
3695 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3698 gen_vfp_tosl(dp
, 32 - rm
, 0);
3700 case 30: /* ftouh */
3701 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3704 gen_vfp_touh(dp
, 16 - rm
, 0);
3706 case 31: /* ftoul */
3707 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3710 gen_vfp_toul(dp
, 32 - rm
, 0);
3712 default: /* undefined */
3716 default: /* undefined */
3720 /* Write back the result. */
3721 if (op
== 15 && (rn
>= 8 && rn
<= 11)) {
3722 /* Comparison, do nothing. */
3723 } else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18 ||
3724 (rn
& 0x1e) == 0x6)) {
3725 /* VCVT double to int: always integer result.
3726 * VCVT double to half precision is always a single
3729 gen_mov_vreg_F0(0, rd
);
3730 } else if (op
== 15 && rn
== 15) {
3732 gen_mov_vreg_F0(!dp
, rd
);
3734 gen_mov_vreg_F0(dp
, rd
);
3737 /* break out of the loop if we have finished */
3741 if (op
== 15 && delta_m
== 0) {
3742 /* single source one-many */
3744 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3746 gen_mov_vreg_F0(dp
, rd
);
3750 /* Setup the next operands. */
3752 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3756 /* One source operand. */
3757 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3759 gen_mov_F0_vreg(dp
, rm
);
3761 /* Two source operands. */
3762 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3764 gen_mov_F0_vreg(dp
, rn
);
3766 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3768 gen_mov_F1_vreg(dp
, rm
);
3776 if ((insn
& 0x03e00000) == 0x00400000) {
3777 /* two-register transfer */
3778 rn
= (insn
>> 16) & 0xf;
3779 rd
= (insn
>> 12) & 0xf;
3781 VFP_DREG_M(rm
, insn
);
3783 rm
= VFP_SREG_M(insn
);
3786 if (insn
& ARM_CP_RW_BIT
) {
3789 gen_mov_F0_vreg(0, rm
* 2);
3790 tmp
= gen_vfp_mrs();
3791 store_reg(s
, rd
, tmp
);
3792 gen_mov_F0_vreg(0, rm
* 2 + 1);
3793 tmp
= gen_vfp_mrs();
3794 store_reg(s
, rn
, tmp
);
3796 gen_mov_F0_vreg(0, rm
);
3797 tmp
= gen_vfp_mrs();
3798 store_reg(s
, rd
, tmp
);
3799 gen_mov_F0_vreg(0, rm
+ 1);
3800 tmp
= gen_vfp_mrs();
3801 store_reg(s
, rn
, tmp
);
3806 tmp
= load_reg(s
, rd
);
3808 gen_mov_vreg_F0(0, rm
* 2);
3809 tmp
= load_reg(s
, rn
);
3811 gen_mov_vreg_F0(0, rm
* 2 + 1);
3813 tmp
= load_reg(s
, rd
);
3815 gen_mov_vreg_F0(0, rm
);
3816 tmp
= load_reg(s
, rn
);
3818 gen_mov_vreg_F0(0, rm
+ 1);
3823 rn
= (insn
>> 16) & 0xf;
3825 VFP_DREG_D(rd
, insn
);
3827 rd
= VFP_SREG_D(insn
);
3828 if ((insn
& 0x01200000) == 0x01000000) {
3829 /* Single load/store */
3830 offset
= (insn
& 0xff) << 2;
3831 if ((insn
& (1 << 23)) == 0)
3833 if (s
->thumb
&& rn
== 15) {
3834 /* This is actually UNPREDICTABLE */
3835 addr
= tcg_temp_new_i32();
3836 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3838 addr
= load_reg(s
, rn
);
3840 tcg_gen_addi_i32(addr
, addr
, offset
);
3841 if (insn
& (1 << 20)) {
3842 gen_vfp_ld(s
, dp
, addr
);
3843 gen_mov_vreg_F0(dp
, rd
);
3845 gen_mov_F0_vreg(dp
, rd
);
3846 gen_vfp_st(s
, dp
, addr
);
3848 tcg_temp_free_i32(addr
);
3850 /* load/store multiple */
3851 int w
= insn
& (1 << 21);
3853 n
= (insn
>> 1) & 0x7f;
3857 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
3858 /* P == U , W == 1 => UNDEF */
3861 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
3862 /* UNPREDICTABLE cases for bad immediates: we choose to
3863 * UNDEF to avoid generating huge numbers of TCG ops
3867 if (rn
== 15 && w
) {
3868 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3872 if (s
->thumb
&& rn
== 15) {
3873 /* This is actually UNPREDICTABLE */
3874 addr
= tcg_temp_new_i32();
3875 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3877 addr
= load_reg(s
, rn
);
3879 if (insn
& (1 << 24)) /* pre-decrement */
3880 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3886 for (i
= 0; i
< n
; i
++) {
3887 if (insn
& ARM_CP_RW_BIT
) {
3889 gen_vfp_ld(s
, dp
, addr
);
3890 gen_mov_vreg_F0(dp
, rd
+ i
);
3893 gen_mov_F0_vreg(dp
, rd
+ i
);
3894 gen_vfp_st(s
, dp
, addr
);
3896 tcg_gen_addi_i32(addr
, addr
, offset
);
3900 if (insn
& (1 << 24))
3901 offset
= -offset
* n
;
3902 else if (dp
&& (insn
& 1))
3908 tcg_gen_addi_i32(addr
, addr
, offset
);
3909 store_reg(s
, rn
, addr
);
3911 tcg_temp_free_i32(addr
);
3917 /* Should never happen. */
3923 static inline void gen_goto_tb(DisasContext
*s
, int n
, target_ulong dest
)
3925 TranslationBlock
*tb
;
3928 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3930 gen_set_pc_im(s
, dest
);
3931 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
3933 gen_set_pc_im(s
, dest
);
3938 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3940 if (unlikely(s
->singlestep_enabled
|| s
->ss_active
)) {
3941 /* An indirect jump so that we still trigger the debug exception. */
3946 gen_goto_tb(s
, 0, dest
);
3947 s
->is_jmp
= DISAS_TB_JUMP
;
3951 static inline void gen_mulxy(TCGv_i32 t0
, TCGv_i32 t1
, int x
, int y
)
3954 tcg_gen_sari_i32(t0
, t0
, 16);
3958 tcg_gen_sari_i32(t1
, t1
, 16);
3961 tcg_gen_mul_i32(t0
, t0
, t1
);
3964 /* Return the mask of PSR bits set by a MSR instruction. */
3965 static uint32_t msr_mask(CPUARMState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3969 if (flags
& (1 << 0))
3971 if (flags
& (1 << 1))
3973 if (flags
& (1 << 2))
3975 if (flags
& (1 << 3))
3978 /* Mask out undefined bits. */
3979 mask
&= ~CPSR_RESERVED
;
3980 if (!arm_dc_feature(s
, ARM_FEATURE_V4T
)) {
3983 if (!arm_dc_feature(s
, ARM_FEATURE_V5
)) {
3984 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3986 if (!arm_dc_feature(s
, ARM_FEATURE_V6
)) {
3987 mask
&= ~(CPSR_E
| CPSR_GE
);
3989 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB2
)) {
3992 /* Mask out execution state and reserved bits. */
3994 mask
&= ~(CPSR_EXEC
| CPSR_RESERVED
);
3996 /* Mask out privileged bits. */
4002 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4003 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv_i32 t0
)
4007 /* ??? This is also undefined in system mode. */
4011 tmp
= load_cpu_field(spsr
);
4012 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
4013 tcg_gen_andi_i32(t0
, t0
, mask
);
4014 tcg_gen_or_i32(tmp
, tmp
, t0
);
4015 store_cpu_field(tmp
, spsr
);
4017 gen_set_cpsr(t0
, mask
);
4019 tcg_temp_free_i32(t0
);
4024 /* Returns nonzero if access to the PSR is not permitted. */
4025 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
4028 tmp
= tcg_temp_new_i32();
4029 tcg_gen_movi_i32(tmp
, val
);
4030 return gen_set_psr(s
, mask
, spsr
, tmp
);
4033 /* Generate an old-style exception return. Marks pc as dead. */
4034 static void gen_exception_return(DisasContext
*s
, TCGv_i32 pc
)
4037 store_reg(s
, 15, pc
);
4038 tmp
= load_cpu_field(spsr
);
4039 gen_set_cpsr(tmp
, CPSR_ERET_MASK
);
4040 tcg_temp_free_i32(tmp
);
4041 s
->is_jmp
= DISAS_UPDATE
;
4044 /* Generate a v6 exception return. Marks both values as dead. */
4045 static void gen_rfe(DisasContext
*s
, TCGv_i32 pc
, TCGv_i32 cpsr
)
4047 gen_set_cpsr(cpsr
, CPSR_ERET_MASK
);
4048 tcg_temp_free_i32(cpsr
);
4049 store_reg(s
, 15, pc
);
4050 s
->is_jmp
= DISAS_UPDATE
;
4053 static void gen_nop_hint(DisasContext
*s
, int val
)
4057 gen_set_pc_im(s
, s
->pc
);
4058 s
->is_jmp
= DISAS_WFI
;
4061 gen_set_pc_im(s
, s
->pc
);
4062 s
->is_jmp
= DISAS_WFE
;
4066 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4072 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4074 static inline void gen_neon_add(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
4077 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
4078 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
4079 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
4084 static inline void gen_neon_rsb(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
4087 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
4088 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
4089 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
4094 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4095 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4096 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4097 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4098 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4100 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4101 switch ((size << 1) | u) { \
4103 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4106 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4109 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4112 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4115 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4118 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4120 default: return 1; \
4123 #define GEN_NEON_INTEGER_OP(name) do { \
4124 switch ((size << 1) | u) { \
4126 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4129 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4132 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4135 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4138 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4141 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4143 default: return 1; \
4146 static TCGv_i32
neon_load_scratch(int scratch
)
4148 TCGv_i32 tmp
= tcg_temp_new_i32();
4149 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4153 static void neon_store_scratch(int scratch
, TCGv_i32 var
)
4155 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4156 tcg_temp_free_i32(var
);
4159 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
4163 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
4165 gen_neon_dup_high16(tmp
);
4167 gen_neon_dup_low16(tmp
);
4170 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
4175 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
4178 if (!q
&& size
== 2) {
4181 tmp
= tcg_const_i32(rd
);
4182 tmp2
= tcg_const_i32(rm
);
4186 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
4189 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
4192 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
4200 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
4203 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
4209 tcg_temp_free_i32(tmp
);
4210 tcg_temp_free_i32(tmp2
);
4214 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
4217 if (!q
&& size
== 2) {
4220 tmp
= tcg_const_i32(rd
);
4221 tmp2
= tcg_const_i32(rm
);
4225 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
4228 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
4231 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
4239 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
4242 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
4248 tcg_temp_free_i32(tmp
);
4249 tcg_temp_free_i32(tmp2
);
4253 static void gen_neon_trn_u8(TCGv_i32 t0
, TCGv_i32 t1
)
4257 rd
= tcg_temp_new_i32();
4258 tmp
= tcg_temp_new_i32();
4260 tcg_gen_shli_i32(rd
, t0
, 8);
4261 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
4262 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
4263 tcg_gen_or_i32(rd
, rd
, tmp
);
4265 tcg_gen_shri_i32(t1
, t1
, 8);
4266 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
4267 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
4268 tcg_gen_or_i32(t1
, t1
, tmp
);
4269 tcg_gen_mov_i32(t0
, rd
);
4271 tcg_temp_free_i32(tmp
);
4272 tcg_temp_free_i32(rd
);
4275 static void gen_neon_trn_u16(TCGv_i32 t0
, TCGv_i32 t1
)
4279 rd
= tcg_temp_new_i32();
4280 tmp
= tcg_temp_new_i32();
4282 tcg_gen_shli_i32(rd
, t0
, 16);
4283 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
4284 tcg_gen_or_i32(rd
, rd
, tmp
);
4285 tcg_gen_shri_i32(t1
, t1
, 16);
4286 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
4287 tcg_gen_or_i32(t1
, t1
, tmp
);
4288 tcg_gen_mov_i32(t0
, rd
);
4290 tcg_temp_free_i32(tmp
);
4291 tcg_temp_free_i32(rd
);
4299 } neon_ls_element_type
[11] = {
4313 /* Translate a NEON load/store element instruction. Return nonzero if the
4314 instruction is invalid. */
4315 static int disas_neon_ls_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
4334 /* FIXME: this access check should not take precedence over UNDEF
4335 * for invalid encodings; we will generate incorrect syndrome information
4336 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4338 if (!s
->cpacr_fpen
) {
4339 gen_exception_insn(s
, 4, EXCP_UDEF
,
4340 syn_fp_access_trap(1, 0xe, s
->thumb
));
4344 if (!s
->vfp_enabled
)
4346 VFP_DREG_D(rd
, insn
);
4347 rn
= (insn
>> 16) & 0xf;
4349 load
= (insn
& (1 << 21)) != 0;
4350 if ((insn
& (1 << 23)) == 0) {
4351 /* Load store all elements. */
4352 op
= (insn
>> 8) & 0xf;
4353 size
= (insn
>> 6) & 3;
4356 /* Catch UNDEF cases for bad values of align field */
4359 if (((insn
>> 5) & 1) == 1) {
4364 if (((insn
>> 4) & 3) == 3) {
4371 nregs
= neon_ls_element_type
[op
].nregs
;
4372 interleave
= neon_ls_element_type
[op
].interleave
;
4373 spacing
= neon_ls_element_type
[op
].spacing
;
4374 if (size
== 3 && (interleave
| spacing
) != 1)
4376 addr
= tcg_temp_new_i32();
4377 load_reg_var(s
, addr
, rn
);
4378 stride
= (1 << size
) * interleave
;
4379 for (reg
= 0; reg
< nregs
; reg
++) {
4380 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
4381 load_reg_var(s
, addr
, rn
);
4382 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
4383 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
4384 load_reg_var(s
, addr
, rn
);
4385 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4388 tmp64
= tcg_temp_new_i64();
4390 gen_aa32_ld64(tmp64
, addr
, get_mem_index(s
));
4391 neon_store_reg64(tmp64
, rd
);
4393 neon_load_reg64(tmp64
, rd
);
4394 gen_aa32_st64(tmp64
, addr
, get_mem_index(s
));
4396 tcg_temp_free_i64(tmp64
);
4397 tcg_gen_addi_i32(addr
, addr
, stride
);
4399 for (pass
= 0; pass
< 2; pass
++) {
4402 tmp
= tcg_temp_new_i32();
4403 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
4404 neon_store_reg(rd
, pass
, tmp
);
4406 tmp
= neon_load_reg(rd
, pass
);
4407 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
4408 tcg_temp_free_i32(tmp
);
4410 tcg_gen_addi_i32(addr
, addr
, stride
);
4411 } else if (size
== 1) {
4413 tmp
= tcg_temp_new_i32();
4414 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
4415 tcg_gen_addi_i32(addr
, addr
, stride
);
4416 tmp2
= tcg_temp_new_i32();
4417 gen_aa32_ld16u(tmp2
, addr
, get_mem_index(s
));
4418 tcg_gen_addi_i32(addr
, addr
, stride
);
4419 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
4420 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4421 tcg_temp_free_i32(tmp2
);
4422 neon_store_reg(rd
, pass
, tmp
);
4424 tmp
= neon_load_reg(rd
, pass
);
4425 tmp2
= tcg_temp_new_i32();
4426 tcg_gen_shri_i32(tmp2
, tmp
, 16);
4427 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
4428 tcg_temp_free_i32(tmp
);
4429 tcg_gen_addi_i32(addr
, addr
, stride
);
4430 gen_aa32_st16(tmp2
, addr
, get_mem_index(s
));
4431 tcg_temp_free_i32(tmp2
);
4432 tcg_gen_addi_i32(addr
, addr
, stride
);
4434 } else /* size == 0 */ {
4436 TCGV_UNUSED_I32(tmp2
);
4437 for (n
= 0; n
< 4; n
++) {
4438 tmp
= tcg_temp_new_i32();
4439 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
4440 tcg_gen_addi_i32(addr
, addr
, stride
);
4444 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
4445 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
4446 tcg_temp_free_i32(tmp
);
4449 neon_store_reg(rd
, pass
, tmp2
);
4451 tmp2
= neon_load_reg(rd
, pass
);
4452 for (n
= 0; n
< 4; n
++) {
4453 tmp
= tcg_temp_new_i32();
4455 tcg_gen_mov_i32(tmp
, tmp2
);
4457 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
4459 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
4460 tcg_temp_free_i32(tmp
);
4461 tcg_gen_addi_i32(addr
, addr
, stride
);
4463 tcg_temp_free_i32(tmp2
);
4470 tcg_temp_free_i32(addr
);
4473 size
= (insn
>> 10) & 3;
4475 /* Load single element to all lanes. */
4476 int a
= (insn
>> 4) & 1;
4480 size
= (insn
>> 6) & 3;
4481 nregs
= ((insn
>> 8) & 3) + 1;
4484 if (nregs
!= 4 || a
== 0) {
4487 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4490 if (nregs
== 1 && a
== 1 && size
== 0) {
4493 if (nregs
== 3 && a
== 1) {
4496 addr
= tcg_temp_new_i32();
4497 load_reg_var(s
, addr
, rn
);
4499 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4500 tmp
= gen_load_and_replicate(s
, addr
, size
);
4501 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4502 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4503 if (insn
& (1 << 5)) {
4504 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
4505 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
4507 tcg_temp_free_i32(tmp
);
4509 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4510 stride
= (insn
& (1 << 5)) ? 2 : 1;
4511 for (reg
= 0; reg
< nregs
; reg
++) {
4512 tmp
= gen_load_and_replicate(s
, addr
, size
);
4513 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4514 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4515 tcg_temp_free_i32(tmp
);
4516 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4520 tcg_temp_free_i32(addr
);
4521 stride
= (1 << size
) * nregs
;
4523 /* Single element. */
4524 int idx
= (insn
>> 4) & 0xf;
4525 pass
= (insn
>> 7) & 1;
4528 shift
= ((insn
>> 5) & 3) * 8;
4532 shift
= ((insn
>> 6) & 1) * 16;
4533 stride
= (insn
& (1 << 5)) ? 2 : 1;
4537 stride
= (insn
& (1 << 6)) ? 2 : 1;
4542 nregs
= ((insn
>> 8) & 3) + 1;
4543 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4546 if (((idx
& (1 << size
)) != 0) ||
4547 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
4552 if ((idx
& 1) != 0) {
4557 if (size
== 2 && (idx
& 2) != 0) {
4562 if ((size
== 2) && ((idx
& 3) == 3)) {
4569 if ((rd
+ stride
* (nregs
- 1)) > 31) {
4570 /* Attempts to write off the end of the register file
4571 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4572 * the neon_load_reg() would write off the end of the array.
4576 addr
= tcg_temp_new_i32();
4577 load_reg_var(s
, addr
, rn
);
4578 for (reg
= 0; reg
< nregs
; reg
++) {
4580 tmp
= tcg_temp_new_i32();
4583 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
4586 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
4589 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
4591 default: /* Avoid compiler warnings. */
4595 tmp2
= neon_load_reg(rd
, pass
);
4596 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
,
4597 shift
, size
? 16 : 8);
4598 tcg_temp_free_i32(tmp2
);
4600 neon_store_reg(rd
, pass
, tmp
);
4601 } else { /* Store */
4602 tmp
= neon_load_reg(rd
, pass
);
4604 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4607 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
4610 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
4613 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
4616 tcg_temp_free_i32(tmp
);
4619 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4621 tcg_temp_free_i32(addr
);
4622 stride
= nregs
* (1 << size
);
4628 base
= load_reg(s
, rn
);
4630 tcg_gen_addi_i32(base
, base
, stride
);
4633 index
= load_reg(s
, rm
);
4634 tcg_gen_add_i32(base
, base
, index
);
4635 tcg_temp_free_i32(index
);
4637 store_reg(s
, rn
, base
);
4642 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4643 static void gen_neon_bsl(TCGv_i32 dest
, TCGv_i32 t
, TCGv_i32 f
, TCGv_i32 c
)
4645 tcg_gen_and_i32(t
, t
, c
);
4646 tcg_gen_andc_i32(f
, f
, c
);
4647 tcg_gen_or_i32(dest
, t
, f
);
4650 static inline void gen_neon_narrow(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4653 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4654 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4655 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4660 static inline void gen_neon_narrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4663 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4664 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4665 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4670 static inline void gen_neon_narrow_satu(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4673 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4674 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4675 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4680 static inline void gen_neon_unarrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4683 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4684 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4685 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4690 static inline void gen_neon_shift_narrow(int size
, TCGv_i32 var
, TCGv_i32 shift
,
4696 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4697 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4702 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4703 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4710 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4711 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4716 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4717 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4724 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv_i32 src
, int size
, int u
)
4728 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4729 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4730 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4735 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4736 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4737 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4741 tcg_temp_free_i32(src
);
4744 static inline void gen_neon_addl(int size
)
4747 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4748 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4749 case 2: tcg_gen_add_i64(CPU_V001
); break;
4754 static inline void gen_neon_subl(int size
)
4757 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4758 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4759 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4764 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4767 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4768 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4770 tcg_gen_neg_i64(var
, var
);
4776 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4779 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4780 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4785 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv_i32 a
, TCGv_i32 b
,
4790 switch ((size
<< 1) | u
) {
4791 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4792 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4793 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4794 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4796 tmp
= gen_muls_i64_i32(a
, b
);
4797 tcg_gen_mov_i64(dest
, tmp
);
4798 tcg_temp_free_i64(tmp
);
4801 tmp
= gen_mulu_i64_i32(a
, b
);
4802 tcg_gen_mov_i64(dest
, tmp
);
4803 tcg_temp_free_i64(tmp
);
4808 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4809 Don't forget to clean them now. */
4811 tcg_temp_free_i32(a
);
4812 tcg_temp_free_i32(b
);
4816 static void gen_neon_narrow_op(int op
, int u
, int size
,
4817 TCGv_i32 dest
, TCGv_i64 src
)
4821 gen_neon_unarrow_sats(size
, dest
, src
);
4823 gen_neon_narrow(size
, dest
, src
);
4827 gen_neon_narrow_satu(size
, dest
, src
);
4829 gen_neon_narrow_sats(size
, dest
, src
);
4834 /* Symbolic constants for op fields for Neon 3-register same-length.
4835 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4838 #define NEON_3R_VHADD 0
4839 #define NEON_3R_VQADD 1
4840 #define NEON_3R_VRHADD 2
4841 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4842 #define NEON_3R_VHSUB 4
4843 #define NEON_3R_VQSUB 5
4844 #define NEON_3R_VCGT 6
4845 #define NEON_3R_VCGE 7
4846 #define NEON_3R_VSHL 8
4847 #define NEON_3R_VQSHL 9
4848 #define NEON_3R_VRSHL 10
4849 #define NEON_3R_VQRSHL 11
4850 #define NEON_3R_VMAX 12
4851 #define NEON_3R_VMIN 13
4852 #define NEON_3R_VABD 14
4853 #define NEON_3R_VABA 15
4854 #define NEON_3R_VADD_VSUB 16
4855 #define NEON_3R_VTST_VCEQ 17
4856 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4857 #define NEON_3R_VMUL 19
4858 #define NEON_3R_VPMAX 20
4859 #define NEON_3R_VPMIN 21
4860 #define NEON_3R_VQDMULH_VQRDMULH 22
4861 #define NEON_3R_VPADD 23
4862 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4863 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4864 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4865 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4866 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4867 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4868 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4869 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4871 static const uint8_t neon_3r_sizes
[] = {
4872 [NEON_3R_VHADD
] = 0x7,
4873 [NEON_3R_VQADD
] = 0xf,
4874 [NEON_3R_VRHADD
] = 0x7,
4875 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4876 [NEON_3R_VHSUB
] = 0x7,
4877 [NEON_3R_VQSUB
] = 0xf,
4878 [NEON_3R_VCGT
] = 0x7,
4879 [NEON_3R_VCGE
] = 0x7,
4880 [NEON_3R_VSHL
] = 0xf,
4881 [NEON_3R_VQSHL
] = 0xf,
4882 [NEON_3R_VRSHL
] = 0xf,
4883 [NEON_3R_VQRSHL
] = 0xf,
4884 [NEON_3R_VMAX
] = 0x7,
4885 [NEON_3R_VMIN
] = 0x7,
4886 [NEON_3R_VABD
] = 0x7,
4887 [NEON_3R_VABA
] = 0x7,
4888 [NEON_3R_VADD_VSUB
] = 0xf,
4889 [NEON_3R_VTST_VCEQ
] = 0x7,
4890 [NEON_3R_VML
] = 0x7,
4891 [NEON_3R_VMUL
] = 0x7,
4892 [NEON_3R_VPMAX
] = 0x7,
4893 [NEON_3R_VPMIN
] = 0x7,
4894 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4895 [NEON_3R_VPADD
] = 0x7,
4896 [NEON_3R_SHA
] = 0xf, /* size field encodes op type */
4897 [NEON_3R_VFM
] = 0x5, /* size bit 1 encodes op */
4898 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4899 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4900 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4901 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4902 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4903 [NEON_3R_FLOAT_MISC
] = 0x5, /* size bit 1 encodes op */
4906 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4907 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4910 #define NEON_2RM_VREV64 0
4911 #define NEON_2RM_VREV32 1
4912 #define NEON_2RM_VREV16 2
4913 #define NEON_2RM_VPADDL 4
4914 #define NEON_2RM_VPADDL_U 5
4915 #define NEON_2RM_AESE 6 /* Includes AESD */
4916 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
4917 #define NEON_2RM_VCLS 8
4918 #define NEON_2RM_VCLZ 9
4919 #define NEON_2RM_VCNT 10
4920 #define NEON_2RM_VMVN 11
4921 #define NEON_2RM_VPADAL 12
4922 #define NEON_2RM_VPADAL_U 13
4923 #define NEON_2RM_VQABS 14
4924 #define NEON_2RM_VQNEG 15
4925 #define NEON_2RM_VCGT0 16
4926 #define NEON_2RM_VCGE0 17
4927 #define NEON_2RM_VCEQ0 18
4928 #define NEON_2RM_VCLE0 19
4929 #define NEON_2RM_VCLT0 20
4930 #define NEON_2RM_SHA1H 21
4931 #define NEON_2RM_VABS 22
4932 #define NEON_2RM_VNEG 23
4933 #define NEON_2RM_VCGT0_F 24
4934 #define NEON_2RM_VCGE0_F 25
4935 #define NEON_2RM_VCEQ0_F 26
4936 #define NEON_2RM_VCLE0_F 27
4937 #define NEON_2RM_VCLT0_F 28
4938 #define NEON_2RM_VABS_F 30
4939 #define NEON_2RM_VNEG_F 31
4940 #define NEON_2RM_VSWP 32
4941 #define NEON_2RM_VTRN 33
4942 #define NEON_2RM_VUZP 34
4943 #define NEON_2RM_VZIP 35
4944 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4945 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4946 #define NEON_2RM_VSHLL 38
4947 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
4948 #define NEON_2RM_VRINTN 40
4949 #define NEON_2RM_VRINTX 41
4950 #define NEON_2RM_VRINTA 42
4951 #define NEON_2RM_VRINTZ 43
4952 #define NEON_2RM_VCVT_F16_F32 44
4953 #define NEON_2RM_VRINTM 45
4954 #define NEON_2RM_VCVT_F32_F16 46
4955 #define NEON_2RM_VRINTP 47
4956 #define NEON_2RM_VCVTAU 48
4957 #define NEON_2RM_VCVTAS 49
4958 #define NEON_2RM_VCVTNU 50
4959 #define NEON_2RM_VCVTNS 51
4960 #define NEON_2RM_VCVTPU 52
4961 #define NEON_2RM_VCVTPS 53
4962 #define NEON_2RM_VCVTMU 54
4963 #define NEON_2RM_VCVTMS 55
4964 #define NEON_2RM_VRECPE 56
4965 #define NEON_2RM_VRSQRTE 57
4966 #define NEON_2RM_VRECPE_F 58
4967 #define NEON_2RM_VRSQRTE_F 59
4968 #define NEON_2RM_VCVT_FS 60
4969 #define NEON_2RM_VCVT_FU 61
4970 #define NEON_2RM_VCVT_SF 62
4971 #define NEON_2RM_VCVT_UF 63
4973 static int neon_2rm_is_float_op(int op
)
4975 /* Return true if this neon 2reg-misc op is float-to-float */
4976 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
4977 (op
>= NEON_2RM_VRINTN
&& op
<= NEON_2RM_VRINTZ
) ||
4978 op
== NEON_2RM_VRINTM
||
4979 (op
>= NEON_2RM_VRINTP
&& op
<= NEON_2RM_VCVTMS
) ||
4980 op
>= NEON_2RM_VRECPE_F
);
4983 /* Each entry in this array has bit n set if the insn allows
4984 * size value n (otherwise it will UNDEF). Since unallocated
4985 * op values will have no bits set they always UNDEF.
4987 static const uint8_t neon_2rm_sizes
[] = {
4988 [NEON_2RM_VREV64
] = 0x7,
4989 [NEON_2RM_VREV32
] = 0x3,
4990 [NEON_2RM_VREV16
] = 0x1,
4991 [NEON_2RM_VPADDL
] = 0x7,
4992 [NEON_2RM_VPADDL_U
] = 0x7,
4993 [NEON_2RM_AESE
] = 0x1,
4994 [NEON_2RM_AESMC
] = 0x1,
4995 [NEON_2RM_VCLS
] = 0x7,
4996 [NEON_2RM_VCLZ
] = 0x7,
4997 [NEON_2RM_VCNT
] = 0x1,
4998 [NEON_2RM_VMVN
] = 0x1,
4999 [NEON_2RM_VPADAL
] = 0x7,
5000 [NEON_2RM_VPADAL_U
] = 0x7,
5001 [NEON_2RM_VQABS
] = 0x7,
5002 [NEON_2RM_VQNEG
] = 0x7,
5003 [NEON_2RM_VCGT0
] = 0x7,
5004 [NEON_2RM_VCGE0
] = 0x7,
5005 [NEON_2RM_VCEQ0
] = 0x7,
5006 [NEON_2RM_VCLE0
] = 0x7,
5007 [NEON_2RM_VCLT0
] = 0x7,
5008 [NEON_2RM_SHA1H
] = 0x4,
5009 [NEON_2RM_VABS
] = 0x7,
5010 [NEON_2RM_VNEG
] = 0x7,
5011 [NEON_2RM_VCGT0_F
] = 0x4,
5012 [NEON_2RM_VCGE0_F
] = 0x4,
5013 [NEON_2RM_VCEQ0_F
] = 0x4,
5014 [NEON_2RM_VCLE0_F
] = 0x4,
5015 [NEON_2RM_VCLT0_F
] = 0x4,
5016 [NEON_2RM_VABS_F
] = 0x4,
5017 [NEON_2RM_VNEG_F
] = 0x4,
5018 [NEON_2RM_VSWP
] = 0x1,
5019 [NEON_2RM_VTRN
] = 0x7,
5020 [NEON_2RM_VUZP
] = 0x7,
5021 [NEON_2RM_VZIP
] = 0x7,
5022 [NEON_2RM_VMOVN
] = 0x7,
5023 [NEON_2RM_VQMOVN
] = 0x7,
5024 [NEON_2RM_VSHLL
] = 0x7,
5025 [NEON_2RM_SHA1SU1
] = 0x4,
5026 [NEON_2RM_VRINTN
] = 0x4,
5027 [NEON_2RM_VRINTX
] = 0x4,
5028 [NEON_2RM_VRINTA
] = 0x4,
5029 [NEON_2RM_VRINTZ
] = 0x4,
5030 [NEON_2RM_VCVT_F16_F32
] = 0x2,
5031 [NEON_2RM_VRINTM
] = 0x4,
5032 [NEON_2RM_VCVT_F32_F16
] = 0x2,
5033 [NEON_2RM_VRINTP
] = 0x4,
5034 [NEON_2RM_VCVTAU
] = 0x4,
5035 [NEON_2RM_VCVTAS
] = 0x4,
5036 [NEON_2RM_VCVTNU
] = 0x4,
5037 [NEON_2RM_VCVTNS
] = 0x4,
5038 [NEON_2RM_VCVTPU
] = 0x4,
5039 [NEON_2RM_VCVTPS
] = 0x4,
5040 [NEON_2RM_VCVTMU
] = 0x4,
5041 [NEON_2RM_VCVTMS
] = 0x4,
5042 [NEON_2RM_VRECPE
] = 0x4,
5043 [NEON_2RM_VRSQRTE
] = 0x4,
5044 [NEON_2RM_VRECPE_F
] = 0x4,
5045 [NEON_2RM_VRSQRTE_F
] = 0x4,
5046 [NEON_2RM_VCVT_FS
] = 0x4,
5047 [NEON_2RM_VCVT_FU
] = 0x4,
5048 [NEON_2RM_VCVT_SF
] = 0x4,
5049 [NEON_2RM_VCVT_UF
] = 0x4,
5052 /* Translate a NEON data processing instruction. Return nonzero if the
5053 instruction is invalid.
5054 We process data in a mixture of 32-bit and 64-bit chunks.
5055 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5057 static int disas_neon_data_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
5069 TCGv_i32 tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
5072 /* FIXME: this access check should not take precedence over UNDEF
5073 * for invalid encodings; we will generate incorrect syndrome information
5074 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5076 if (!s
->cpacr_fpen
) {
5077 gen_exception_insn(s
, 4, EXCP_UDEF
,
5078 syn_fp_access_trap(1, 0xe, s
->thumb
));
5082 if (!s
->vfp_enabled
)
5084 q
= (insn
& (1 << 6)) != 0;
5085 u
= (insn
>> 24) & 1;
5086 VFP_DREG_D(rd
, insn
);
5087 VFP_DREG_N(rn
, insn
);
5088 VFP_DREG_M(rm
, insn
);
5089 size
= (insn
>> 20) & 3;
5090 if ((insn
& (1 << 23)) == 0) {
5091 /* Three register same length. */
5092 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
5093 /* Catch invalid op and bad size combinations: UNDEF */
5094 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
5097 /* All insns of this form UNDEF for either this condition or the
5098 * superset of cases "Q==1"; we catch the latter later.
5100 if (q
&& ((rd
| rn
| rm
) & 1)) {
5104 * The SHA-1/SHA-256 3-register instructions require special treatment
5105 * here, as their size field is overloaded as an op type selector, and
5106 * they all consume their input in a single pass.
5108 if (op
== NEON_3R_SHA
) {
5112 if (!u
) { /* SHA-1 */
5113 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)) {
5116 tmp
= tcg_const_i32(rd
);
5117 tmp2
= tcg_const_i32(rn
);
5118 tmp3
= tcg_const_i32(rm
);
5119 tmp4
= tcg_const_i32(size
);
5120 gen_helper_crypto_sha1_3reg(cpu_env
, tmp
, tmp2
, tmp3
, tmp4
);
5121 tcg_temp_free_i32(tmp4
);
5122 } else { /* SHA-256 */
5123 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA256
) || size
== 3) {
5126 tmp
= tcg_const_i32(rd
);
5127 tmp2
= tcg_const_i32(rn
);
5128 tmp3
= tcg_const_i32(rm
);
5131 gen_helper_crypto_sha256h(cpu_env
, tmp
, tmp2
, tmp3
);
5134 gen_helper_crypto_sha256h2(cpu_env
, tmp
, tmp2
, tmp3
);
5137 gen_helper_crypto_sha256su1(cpu_env
, tmp
, tmp2
, tmp3
);
5141 tcg_temp_free_i32(tmp
);
5142 tcg_temp_free_i32(tmp2
);
5143 tcg_temp_free_i32(tmp3
);
5146 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
5147 /* 64-bit element instructions. */
5148 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5149 neon_load_reg64(cpu_V0
, rn
+ pass
);
5150 neon_load_reg64(cpu_V1
, rm
+ pass
);
5154 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
5157 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
5163 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
5166 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
5172 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5174 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5179 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5182 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5188 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5190 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5193 case NEON_3R_VQRSHL
:
5195 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
5198 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
5202 case NEON_3R_VADD_VSUB
:
5204 tcg_gen_sub_i64(CPU_V001
);
5206 tcg_gen_add_i64(CPU_V001
);
5212 neon_store_reg64(cpu_V0
, rd
+ pass
);
5221 case NEON_3R_VQRSHL
:
5224 /* Shift instruction operands are reversed. */
5239 case NEON_3R_FLOAT_ARITH
:
5240 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
5242 case NEON_3R_FLOAT_MINMAX
:
5243 pairwise
= u
; /* if VPMIN/VPMAX (float) */
5245 case NEON_3R_FLOAT_CMP
:
5247 /* no encoding for U=0 C=1x */
5251 case NEON_3R_FLOAT_ACMP
:
5256 case NEON_3R_FLOAT_MISC
:
5257 /* VMAXNM/VMINNM in ARMv8 */
5258 if (u
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
5263 if (u
&& (size
!= 0)) {
5264 /* UNDEF on invalid size for polynomial subcase */
5269 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
) || u
) {
5277 if (pairwise
&& q
) {
5278 /* All the pairwise insns UNDEF if Q is set */
5282 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5287 tmp
= neon_load_reg(rn
, 0);
5288 tmp2
= neon_load_reg(rn
, 1);
5290 tmp
= neon_load_reg(rm
, 0);
5291 tmp2
= neon_load_reg(rm
, 1);
5295 tmp
= neon_load_reg(rn
, pass
);
5296 tmp2
= neon_load_reg(rm
, pass
);
5300 GEN_NEON_INTEGER_OP(hadd
);
5303 GEN_NEON_INTEGER_OP_ENV(qadd
);
5305 case NEON_3R_VRHADD
:
5306 GEN_NEON_INTEGER_OP(rhadd
);
5308 case NEON_3R_LOGIC
: /* Logic ops. */
5309 switch ((u
<< 2) | size
) {
5311 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
5314 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
5317 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5320 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
5323 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
5326 tmp3
= neon_load_reg(rd
, pass
);
5327 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
5328 tcg_temp_free_i32(tmp3
);
5331 tmp3
= neon_load_reg(rd
, pass
);
5332 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
5333 tcg_temp_free_i32(tmp3
);
5336 tmp3
= neon_load_reg(rd
, pass
);
5337 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
5338 tcg_temp_free_i32(tmp3
);
5343 GEN_NEON_INTEGER_OP(hsub
);
5346 GEN_NEON_INTEGER_OP_ENV(qsub
);
5349 GEN_NEON_INTEGER_OP(cgt
);
5352 GEN_NEON_INTEGER_OP(cge
);
5355 GEN_NEON_INTEGER_OP(shl
);
5358 GEN_NEON_INTEGER_OP_ENV(qshl
);
5361 GEN_NEON_INTEGER_OP(rshl
);
5363 case NEON_3R_VQRSHL
:
5364 GEN_NEON_INTEGER_OP_ENV(qrshl
);
5367 GEN_NEON_INTEGER_OP(max
);
5370 GEN_NEON_INTEGER_OP(min
);
5373 GEN_NEON_INTEGER_OP(abd
);
5376 GEN_NEON_INTEGER_OP(abd
);
5377 tcg_temp_free_i32(tmp2
);
5378 tmp2
= neon_load_reg(rd
, pass
);
5379 gen_neon_add(size
, tmp
, tmp2
);
5381 case NEON_3R_VADD_VSUB
:
5382 if (!u
) { /* VADD */
5383 gen_neon_add(size
, tmp
, tmp2
);
5386 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
5387 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
5388 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
5393 case NEON_3R_VTST_VCEQ
:
5394 if (!u
) { /* VTST */
5396 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
5397 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
5398 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
5403 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5404 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5405 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5410 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
5412 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5413 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5414 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5417 tcg_temp_free_i32(tmp2
);
5418 tmp2
= neon_load_reg(rd
, pass
);
5420 gen_neon_rsb(size
, tmp
, tmp2
);
5422 gen_neon_add(size
, tmp
, tmp2
);
5426 if (u
) { /* polynomial */
5427 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
5428 } else { /* Integer */
5430 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5431 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5432 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5438 GEN_NEON_INTEGER_OP(pmax
);
5441 GEN_NEON_INTEGER_OP(pmin
);
5443 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
5444 if (!u
) { /* VQDMULH */
5447 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5450 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5454 } else { /* VQRDMULH */
5457 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5460 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5468 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
5469 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
5470 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
5474 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
5476 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5477 switch ((u
<< 2) | size
) {
5480 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5483 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
5486 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
5491 tcg_temp_free_ptr(fpstatus
);
5494 case NEON_3R_FLOAT_MULTIPLY
:
5496 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5497 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5499 tcg_temp_free_i32(tmp2
);
5500 tmp2
= neon_load_reg(rd
, pass
);
5502 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5504 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5507 tcg_temp_free_ptr(fpstatus
);
5510 case NEON_3R_FLOAT_CMP
:
5512 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5514 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
5517 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5519 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5522 tcg_temp_free_ptr(fpstatus
);
5525 case NEON_3R_FLOAT_ACMP
:
5527 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5529 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5531 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5533 tcg_temp_free_ptr(fpstatus
);
5536 case NEON_3R_FLOAT_MINMAX
:
5538 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5540 gen_helper_vfp_maxs(tmp
, tmp
, tmp2
, fpstatus
);
5542 gen_helper_vfp_mins(tmp
, tmp
, tmp2
, fpstatus
);
5544 tcg_temp_free_ptr(fpstatus
);
5547 case NEON_3R_FLOAT_MISC
:
5550 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5552 gen_helper_vfp_maxnums(tmp
, tmp
, tmp2
, fpstatus
);
5554 gen_helper_vfp_minnums(tmp
, tmp
, tmp2
, fpstatus
);
5556 tcg_temp_free_ptr(fpstatus
);
5559 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
5561 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
5567 /* VFMA, VFMS: fused multiply-add */
5568 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5569 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
5572 gen_helper_vfp_negs(tmp
, tmp
);
5574 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
5575 tcg_temp_free_i32(tmp3
);
5576 tcg_temp_free_ptr(fpstatus
);
5582 tcg_temp_free_i32(tmp2
);
5584 /* Save the result. For elementwise operations we can put it
5585 straight into the destination register. For pairwise operations
5586 we have to be careful to avoid clobbering the source operands. */
5587 if (pairwise
&& rd
== rm
) {
5588 neon_store_scratch(pass
, tmp
);
5590 neon_store_reg(rd
, pass
, tmp
);
5594 if (pairwise
&& rd
== rm
) {
5595 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5596 tmp
= neon_load_scratch(pass
);
5597 neon_store_reg(rd
, pass
, tmp
);
5600 /* End of 3 register same size operations. */
5601 } else if (insn
& (1 << 4)) {
5602 if ((insn
& 0x00380080) != 0) {
5603 /* Two registers and shift. */
5604 op
= (insn
>> 8) & 0xf;
5605 if (insn
& (1 << 7)) {
5613 while ((insn
& (1 << (size
+ 19))) == 0)
5616 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
5617 /* To avoid excessive duplication of ops we implement shift
5618 by immediate using the variable shift operations. */
5620 /* Shift by immediate:
5621 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5622 if (q
&& ((rd
| rm
) & 1)) {
5625 if (!u
&& (op
== 4 || op
== 6)) {
5628 /* Right shifts are encoded as N - shift, where N is the
5629 element size in bits. */
5631 shift
= shift
- (1 << (size
+ 3));
5639 imm
= (uint8_t) shift
;
5644 imm
= (uint16_t) shift
;
5655 for (pass
= 0; pass
< count
; pass
++) {
5657 neon_load_reg64(cpu_V0
, rm
+ pass
);
5658 tcg_gen_movi_i64(cpu_V1
, imm
);
5663 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5665 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5670 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5672 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5675 case 5: /* VSHL, VSLI */
5676 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5678 case 6: /* VQSHLU */
5679 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
5684 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5687 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5692 if (op
== 1 || op
== 3) {
5694 neon_load_reg64(cpu_V1
, rd
+ pass
);
5695 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5696 } else if (op
== 4 || (op
== 5 && u
)) {
5698 neon_load_reg64(cpu_V1
, rd
+ pass
);
5700 if (shift
< -63 || shift
> 63) {
5704 mask
= 0xffffffffffffffffull
>> -shift
;
5706 mask
= 0xffffffffffffffffull
<< shift
;
5709 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
5710 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5712 neon_store_reg64(cpu_V0
, rd
+ pass
);
5713 } else { /* size < 3 */
5714 /* Operands in T0 and T1. */
5715 tmp
= neon_load_reg(rm
, pass
);
5716 tmp2
= tcg_temp_new_i32();
5717 tcg_gen_movi_i32(tmp2
, imm
);
5721 GEN_NEON_INTEGER_OP(shl
);
5725 GEN_NEON_INTEGER_OP(rshl
);
5728 case 5: /* VSHL, VSLI */
5730 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
5731 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
5732 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
5736 case 6: /* VQSHLU */
5739 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5743 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5747 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5755 GEN_NEON_INTEGER_OP_ENV(qshl
);
5758 tcg_temp_free_i32(tmp2
);
5760 if (op
== 1 || op
== 3) {
5762 tmp2
= neon_load_reg(rd
, pass
);
5763 gen_neon_add(size
, tmp
, tmp2
);
5764 tcg_temp_free_i32(tmp2
);
5765 } else if (op
== 4 || (op
== 5 && u
)) {
5770 mask
= 0xff >> -shift
;
5772 mask
= (uint8_t)(0xff << shift
);
5778 mask
= 0xffff >> -shift
;
5780 mask
= (uint16_t)(0xffff << shift
);
5784 if (shift
< -31 || shift
> 31) {
5788 mask
= 0xffffffffu
>> -shift
;
5790 mask
= 0xffffffffu
<< shift
;
5796 tmp2
= neon_load_reg(rd
, pass
);
5797 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5798 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5799 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5800 tcg_temp_free_i32(tmp2
);
5802 neon_store_reg(rd
, pass
, tmp
);
5805 } else if (op
< 10) {
5806 /* Shift by immediate and narrow:
5807 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5808 int input_unsigned
= (op
== 8) ? !u
: u
;
5812 shift
= shift
- (1 << (size
+ 3));
5815 tmp64
= tcg_const_i64(shift
);
5816 neon_load_reg64(cpu_V0
, rm
);
5817 neon_load_reg64(cpu_V1
, rm
+ 1);
5818 for (pass
= 0; pass
< 2; pass
++) {
5826 if (input_unsigned
) {
5827 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5829 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5832 if (input_unsigned
) {
5833 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5835 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5838 tmp
= tcg_temp_new_i32();
5839 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5840 neon_store_reg(rd
, pass
, tmp
);
5842 tcg_temp_free_i64(tmp64
);
5845 imm
= (uint16_t)shift
;
5849 imm
= (uint32_t)shift
;
5851 tmp2
= tcg_const_i32(imm
);
5852 tmp4
= neon_load_reg(rm
+ 1, 0);
5853 tmp5
= neon_load_reg(rm
+ 1, 1);
5854 for (pass
= 0; pass
< 2; pass
++) {
5856 tmp
= neon_load_reg(rm
, 0);
5860 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5863 tmp3
= neon_load_reg(rm
, 1);
5867 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5869 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5870 tcg_temp_free_i32(tmp
);
5871 tcg_temp_free_i32(tmp3
);
5872 tmp
= tcg_temp_new_i32();
5873 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5874 neon_store_reg(rd
, pass
, tmp
);
5876 tcg_temp_free_i32(tmp2
);
5878 } else if (op
== 10) {
5880 if (q
|| (rd
& 1)) {
5883 tmp
= neon_load_reg(rm
, 0);
5884 tmp2
= neon_load_reg(rm
, 1);
5885 for (pass
= 0; pass
< 2; pass
++) {
5889 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5892 /* The shift is less than the width of the source
5893 type, so we can just shift the whole register. */
5894 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5895 /* Widen the result of shift: we need to clear
5896 * the potential overflow bits resulting from
5897 * left bits of the narrow input appearing as
5898 * right bits of left the neighbour narrow
5900 if (size
< 2 || !u
) {
5903 imm
= (0xffu
>> (8 - shift
));
5905 } else if (size
== 1) {
5906 imm
= 0xffff >> (16 - shift
);
5909 imm
= 0xffffffff >> (32 - shift
);
5912 imm64
= imm
| (((uint64_t)imm
) << 32);
5916 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5919 neon_store_reg64(cpu_V0
, rd
+ pass
);
5921 } else if (op
>= 14) {
5922 /* VCVT fixed-point. */
5923 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5926 /* We have already masked out the must-be-1 top bit of imm6,
5927 * hence this 32-shift where the ARM ARM has 64-imm6.
5930 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5931 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5934 gen_vfp_ulto(0, shift
, 1);
5936 gen_vfp_slto(0, shift
, 1);
5939 gen_vfp_toul(0, shift
, 1);
5941 gen_vfp_tosl(0, shift
, 1);
5943 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
5948 } else { /* (insn & 0x00380080) == 0 */
5950 if (q
&& (rd
& 1)) {
5954 op
= (insn
>> 8) & 0xf;
5955 /* One register and immediate. */
5956 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5957 invert
= (insn
& (1 << 5)) != 0;
5958 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5959 * We choose to not special-case this and will behave as if a
5960 * valid constant encoding of 0 had been given.
5979 imm
= (imm
<< 8) | (imm
<< 24);
5982 imm
= (imm
<< 8) | 0xff;
5985 imm
= (imm
<< 16) | 0xffff;
5988 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5996 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5997 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
6003 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6004 if (op
& 1 && op
< 12) {
6005 tmp
= neon_load_reg(rd
, pass
);
6007 /* The immediate value has already been inverted, so
6009 tcg_gen_andi_i32(tmp
, tmp
, imm
);
6011 tcg_gen_ori_i32(tmp
, tmp
, imm
);
6015 tmp
= tcg_temp_new_i32();
6016 if (op
== 14 && invert
) {
6020 for (n
= 0; n
< 4; n
++) {
6021 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
6022 val
|= 0xff << (n
* 8);
6024 tcg_gen_movi_i32(tmp
, val
);
6026 tcg_gen_movi_i32(tmp
, imm
);
6029 neon_store_reg(rd
, pass
, tmp
);
6032 } else { /* (insn & 0x00800010 == 0x00800000) */
6034 op
= (insn
>> 8) & 0xf;
6035 if ((insn
& (1 << 6)) == 0) {
6036 /* Three registers of different lengths. */
6040 /* undefreq: bit 0 : UNDEF if size == 0
6041 * bit 1 : UNDEF if size == 1
6042 * bit 2 : UNDEF if size == 2
6043 * bit 3 : UNDEF if U == 1
6044 * Note that [2:0] set implies 'always UNDEF'
6047 /* prewiden, src1_wide, src2_wide, undefreq */
6048 static const int neon_3reg_wide
[16][4] = {
6049 {1, 0, 0, 0}, /* VADDL */
6050 {1, 1, 0, 0}, /* VADDW */
6051 {1, 0, 0, 0}, /* VSUBL */
6052 {1, 1, 0, 0}, /* VSUBW */
6053 {0, 1, 1, 0}, /* VADDHN */
6054 {0, 0, 0, 0}, /* VABAL */
6055 {0, 1, 1, 0}, /* VSUBHN */
6056 {0, 0, 0, 0}, /* VABDL */
6057 {0, 0, 0, 0}, /* VMLAL */
6058 {0, 0, 0, 9}, /* VQDMLAL */
6059 {0, 0, 0, 0}, /* VMLSL */
6060 {0, 0, 0, 9}, /* VQDMLSL */
6061 {0, 0, 0, 0}, /* Integer VMULL */
6062 {0, 0, 0, 1}, /* VQDMULL */
6063 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6064 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6067 prewiden
= neon_3reg_wide
[op
][0];
6068 src1_wide
= neon_3reg_wide
[op
][1];
6069 src2_wide
= neon_3reg_wide
[op
][2];
6070 undefreq
= neon_3reg_wide
[op
][3];
6072 if ((undefreq
& (1 << size
)) ||
6073 ((undefreq
& 8) && u
)) {
6076 if ((src1_wide
&& (rn
& 1)) ||
6077 (src2_wide
&& (rm
& 1)) ||
6078 (!src2_wide
&& (rd
& 1))) {
6082 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6083 * outside the loop below as it only performs a single pass.
6085 if (op
== 14 && size
== 2) {
6086 TCGv_i64 tcg_rn
, tcg_rm
, tcg_rd
;
6088 if (!arm_dc_feature(s
, ARM_FEATURE_V8_PMULL
)) {
6091 tcg_rn
= tcg_temp_new_i64();
6092 tcg_rm
= tcg_temp_new_i64();
6093 tcg_rd
= tcg_temp_new_i64();
6094 neon_load_reg64(tcg_rn
, rn
);
6095 neon_load_reg64(tcg_rm
, rm
);
6096 gen_helper_neon_pmull_64_lo(tcg_rd
, tcg_rn
, tcg_rm
);
6097 neon_store_reg64(tcg_rd
, rd
);
6098 gen_helper_neon_pmull_64_hi(tcg_rd
, tcg_rn
, tcg_rm
);
6099 neon_store_reg64(tcg_rd
, rd
+ 1);
6100 tcg_temp_free_i64(tcg_rn
);
6101 tcg_temp_free_i64(tcg_rm
);
6102 tcg_temp_free_i64(tcg_rd
);
6106 /* Avoid overlapping operands. Wide source operands are
6107 always aligned so will never overlap with wide
6108 destinations in problematic ways. */
6109 if (rd
== rm
&& !src2_wide
) {
6110 tmp
= neon_load_reg(rm
, 1);
6111 neon_store_scratch(2, tmp
);
6112 } else if (rd
== rn
&& !src1_wide
) {
6113 tmp
= neon_load_reg(rn
, 1);
6114 neon_store_scratch(2, tmp
);
6116 TCGV_UNUSED_I32(tmp3
);
6117 for (pass
= 0; pass
< 2; pass
++) {
6119 neon_load_reg64(cpu_V0
, rn
+ pass
);
6120 TCGV_UNUSED_I32(tmp
);
6122 if (pass
== 1 && rd
== rn
) {
6123 tmp
= neon_load_scratch(2);
6125 tmp
= neon_load_reg(rn
, pass
);
6128 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
6132 neon_load_reg64(cpu_V1
, rm
+ pass
);
6133 TCGV_UNUSED_I32(tmp2
);
6135 if (pass
== 1 && rd
== rm
) {
6136 tmp2
= neon_load_scratch(2);
6138 tmp2
= neon_load_reg(rm
, pass
);
6141 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
6145 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6146 gen_neon_addl(size
);
6148 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6149 gen_neon_subl(size
);
6151 case 5: case 7: /* VABAL, VABDL */
6152 switch ((size
<< 1) | u
) {
6154 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
6157 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
6160 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
6163 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
6166 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
6169 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
6173 tcg_temp_free_i32(tmp2
);
6174 tcg_temp_free_i32(tmp
);
6176 case 8: case 9: case 10: case 11: case 12: case 13:
6177 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6178 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6180 case 14: /* Polynomial VMULL */
6181 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
6182 tcg_temp_free_i32(tmp2
);
6183 tcg_temp_free_i32(tmp
);
6185 default: /* 15 is RESERVED: caught earlier */
6190 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6191 neon_store_reg64(cpu_V0
, rd
+ pass
);
6192 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
6194 neon_load_reg64(cpu_V1
, rd
+ pass
);
6196 case 10: /* VMLSL */
6197 gen_neon_negl(cpu_V0
, size
);
6199 case 5: case 8: /* VABAL, VMLAL */
6200 gen_neon_addl(size
);
6202 case 9: case 11: /* VQDMLAL, VQDMLSL */
6203 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6205 gen_neon_negl(cpu_V0
, size
);
6207 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6212 neon_store_reg64(cpu_V0
, rd
+ pass
);
6213 } else if (op
== 4 || op
== 6) {
6214 /* Narrowing operation. */
6215 tmp
= tcg_temp_new_i32();
6219 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
6222 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
6225 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6226 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
6233 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
6236 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
6239 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
6240 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6241 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
6249 neon_store_reg(rd
, 0, tmp3
);
6250 neon_store_reg(rd
, 1, tmp
);
6253 /* Write back the result. */
6254 neon_store_reg64(cpu_V0
, rd
+ pass
);
6258 /* Two registers and a scalar. NB that for ops of this form
6259 * the ARM ARM labels bit 24 as Q, but it is in our variable
6266 case 1: /* Float VMLA scalar */
6267 case 5: /* Floating point VMLS scalar */
6268 case 9: /* Floating point VMUL scalar */
6273 case 0: /* Integer VMLA scalar */
6274 case 4: /* Integer VMLS scalar */
6275 case 8: /* Integer VMUL scalar */
6276 case 12: /* VQDMULH scalar */
6277 case 13: /* VQRDMULH scalar */
6278 if (u
&& ((rd
| rn
) & 1)) {
6281 tmp
= neon_get_scalar(size
, rm
);
6282 neon_store_scratch(0, tmp
);
6283 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
6284 tmp
= neon_load_scratch(0);
6285 tmp2
= neon_load_reg(rn
, pass
);
6288 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6290 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6292 } else if (op
== 13) {
6294 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6296 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6298 } else if (op
& 1) {
6299 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6300 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
6301 tcg_temp_free_ptr(fpstatus
);
6304 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
6305 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
6306 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
6310 tcg_temp_free_i32(tmp2
);
6313 tmp2
= neon_load_reg(rd
, pass
);
6316 gen_neon_add(size
, tmp
, tmp2
);
6320 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6321 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
6322 tcg_temp_free_ptr(fpstatus
);
6326 gen_neon_rsb(size
, tmp
, tmp2
);
6330 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6331 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
6332 tcg_temp_free_ptr(fpstatus
);
6338 tcg_temp_free_i32(tmp2
);
6340 neon_store_reg(rd
, pass
, tmp
);
6343 case 3: /* VQDMLAL scalar */
6344 case 7: /* VQDMLSL scalar */
6345 case 11: /* VQDMULL scalar */
6350 case 2: /* VMLAL sclar */
6351 case 6: /* VMLSL scalar */
6352 case 10: /* VMULL scalar */
6356 tmp2
= neon_get_scalar(size
, rm
);
6357 /* We need a copy of tmp2 because gen_neon_mull
6358 * deletes it during pass 0. */
6359 tmp4
= tcg_temp_new_i32();
6360 tcg_gen_mov_i32(tmp4
, tmp2
);
6361 tmp3
= neon_load_reg(rn
, 1);
6363 for (pass
= 0; pass
< 2; pass
++) {
6365 tmp
= neon_load_reg(rn
, 0);
6370 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6372 neon_load_reg64(cpu_V1
, rd
+ pass
);
6376 gen_neon_negl(cpu_V0
, size
);
6379 gen_neon_addl(size
);
6382 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6384 gen_neon_negl(cpu_V0
, size
);
6386 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6392 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6397 neon_store_reg64(cpu_V0
, rd
+ pass
);
6402 default: /* 14 and 15 are RESERVED */
6406 } else { /* size == 3 */
6409 imm
= (insn
>> 8) & 0xf;
6414 if (q
&& ((rd
| rn
| rm
) & 1)) {
6419 neon_load_reg64(cpu_V0
, rn
);
6421 neon_load_reg64(cpu_V1
, rn
+ 1);
6423 } else if (imm
== 8) {
6424 neon_load_reg64(cpu_V0
, rn
+ 1);
6426 neon_load_reg64(cpu_V1
, rm
);
6429 tmp64
= tcg_temp_new_i64();
6431 neon_load_reg64(cpu_V0
, rn
);
6432 neon_load_reg64(tmp64
, rn
+ 1);
6434 neon_load_reg64(cpu_V0
, rn
+ 1);
6435 neon_load_reg64(tmp64
, rm
);
6437 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
6438 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
6439 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6441 neon_load_reg64(cpu_V1
, rm
);
6443 neon_load_reg64(cpu_V1
, rm
+ 1);
6446 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6447 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
6448 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
6449 tcg_temp_free_i64(tmp64
);
6452 neon_load_reg64(cpu_V0
, rn
);
6453 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
6454 neon_load_reg64(cpu_V1
, rm
);
6455 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6456 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6458 neon_store_reg64(cpu_V0
, rd
);
6460 neon_store_reg64(cpu_V1
, rd
+ 1);
6462 } else if ((insn
& (1 << 11)) == 0) {
6463 /* Two register misc. */
6464 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
6465 size
= (insn
>> 18) & 3;
6466 /* UNDEF for unknown op values and bad op-size combinations */
6467 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
6470 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
6471 q
&& ((rm
| rd
) & 1)) {
6475 case NEON_2RM_VREV64
:
6476 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
6477 tmp
= neon_load_reg(rm
, pass
* 2);
6478 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
6480 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6481 case 1: gen_swap_half(tmp
); break;
6482 case 2: /* no-op */ break;
6485 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
6487 neon_store_reg(rd
, pass
* 2, tmp2
);
6490 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
6491 case 1: gen_swap_half(tmp2
); break;
6494 neon_store_reg(rd
, pass
* 2, tmp2
);
6498 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
6499 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
6500 for (pass
= 0; pass
< q
+ 1; pass
++) {
6501 tmp
= neon_load_reg(rm
, pass
* 2);
6502 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
6503 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
6504 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
6506 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
6507 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
6508 case 2: tcg_gen_add_i64(CPU_V001
); break;
6511 if (op
>= NEON_2RM_VPADAL
) {
6513 neon_load_reg64(cpu_V1
, rd
+ pass
);
6514 gen_neon_addl(size
);
6516 neon_store_reg64(cpu_V0
, rd
+ pass
);
6522 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
6523 tmp
= neon_load_reg(rm
, n
);
6524 tmp2
= neon_load_reg(rd
, n
+ 1);
6525 neon_store_reg(rm
, n
, tmp2
);
6526 neon_store_reg(rd
, n
+ 1, tmp
);
6533 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
6538 if (gen_neon_zip(rd
, rm
, size
, q
)) {
6542 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
6543 /* also VQMOVUN; op field and mnemonics don't line up */
6547 TCGV_UNUSED_I32(tmp2
);
6548 for (pass
= 0; pass
< 2; pass
++) {
6549 neon_load_reg64(cpu_V0
, rm
+ pass
);
6550 tmp
= tcg_temp_new_i32();
6551 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
6556 neon_store_reg(rd
, 0, tmp2
);
6557 neon_store_reg(rd
, 1, tmp
);
6561 case NEON_2RM_VSHLL
:
6562 if (q
|| (rd
& 1)) {
6565 tmp
= neon_load_reg(rm
, 0);
6566 tmp2
= neon_load_reg(rm
, 1);
6567 for (pass
= 0; pass
< 2; pass
++) {
6570 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
6571 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
6572 neon_store_reg64(cpu_V0
, rd
+ pass
);
6575 case NEON_2RM_VCVT_F16_F32
:
6576 if (!arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
) ||
6580 tmp
= tcg_temp_new_i32();
6581 tmp2
= tcg_temp_new_i32();
6582 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
6583 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
6584 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
6585 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
6586 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6587 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6588 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
6589 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
6590 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
6591 neon_store_reg(rd
, 0, tmp2
);
6592 tmp2
= tcg_temp_new_i32();
6593 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
6594 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6595 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6596 neon_store_reg(rd
, 1, tmp2
);
6597 tcg_temp_free_i32(tmp
);
6599 case NEON_2RM_VCVT_F32_F16
:
6600 if (!arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
) ||
6604 tmp3
= tcg_temp_new_i32();
6605 tmp
= neon_load_reg(rm
, 0);
6606 tmp2
= neon_load_reg(rm
, 1);
6607 tcg_gen_ext16u_i32(tmp3
, tmp
);
6608 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6609 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
6610 tcg_gen_shri_i32(tmp3
, tmp
, 16);
6611 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6612 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
6613 tcg_temp_free_i32(tmp
);
6614 tcg_gen_ext16u_i32(tmp3
, tmp2
);
6615 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6616 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
6617 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
6618 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6619 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
6620 tcg_temp_free_i32(tmp2
);
6621 tcg_temp_free_i32(tmp3
);
6623 case NEON_2RM_AESE
: case NEON_2RM_AESMC
:
6624 if (!arm_dc_feature(s
, ARM_FEATURE_V8_AES
)
6625 || ((rm
| rd
) & 1)) {
6628 tmp
= tcg_const_i32(rd
);
6629 tmp2
= tcg_const_i32(rm
);
6631 /* Bit 6 is the lowest opcode bit; it distinguishes between
6632 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6634 tmp3
= tcg_const_i32(extract32(insn
, 6, 1));
6636 if (op
== NEON_2RM_AESE
) {
6637 gen_helper_crypto_aese(cpu_env
, tmp
, tmp2
, tmp3
);
6639 gen_helper_crypto_aesmc(cpu_env
, tmp
, tmp2
, tmp3
);
6641 tcg_temp_free_i32(tmp
);
6642 tcg_temp_free_i32(tmp2
);
6643 tcg_temp_free_i32(tmp3
);
6645 case NEON_2RM_SHA1H
:
6646 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)
6647 || ((rm
| rd
) & 1)) {
6650 tmp
= tcg_const_i32(rd
);
6651 tmp2
= tcg_const_i32(rm
);
6653 gen_helper_crypto_sha1h(cpu_env
, tmp
, tmp2
);
6655 tcg_temp_free_i32(tmp
);
6656 tcg_temp_free_i32(tmp2
);
6658 case NEON_2RM_SHA1SU1
:
6659 if ((rm
| rd
) & 1) {
6662 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6664 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA256
)) {
6667 } else if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)) {
6670 tmp
= tcg_const_i32(rd
);
6671 tmp2
= tcg_const_i32(rm
);
6673 gen_helper_crypto_sha256su0(cpu_env
, tmp
, tmp2
);
6675 gen_helper_crypto_sha1su1(cpu_env
, tmp
, tmp2
);
6677 tcg_temp_free_i32(tmp
);
6678 tcg_temp_free_i32(tmp2
);
6682 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6683 if (neon_2rm_is_float_op(op
)) {
6684 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
6685 neon_reg_offset(rm
, pass
));
6686 TCGV_UNUSED_I32(tmp
);
6688 tmp
= neon_load_reg(rm
, pass
);
6691 case NEON_2RM_VREV32
:
6693 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6694 case 1: gen_swap_half(tmp
); break;
6698 case NEON_2RM_VREV16
:
6703 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
6704 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
6705 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
6711 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
6712 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
6713 case 2: gen_helper_clz(tmp
, tmp
); break;
6718 gen_helper_neon_cnt_u8(tmp
, tmp
);
6721 tcg_gen_not_i32(tmp
, tmp
);
6723 case NEON_2RM_VQABS
:
6726 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
6729 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
6732 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
6737 case NEON_2RM_VQNEG
:
6740 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
6743 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
6746 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
6751 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
6752 tmp2
= tcg_const_i32(0);
6754 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
6755 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
6756 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
6759 tcg_temp_free_i32(tmp2
);
6760 if (op
== NEON_2RM_VCLE0
) {
6761 tcg_gen_not_i32(tmp
, tmp
);
6764 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
6765 tmp2
= tcg_const_i32(0);
6767 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
6768 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
6769 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
6772 tcg_temp_free_i32(tmp2
);
6773 if (op
== NEON_2RM_VCLT0
) {
6774 tcg_gen_not_i32(tmp
, tmp
);
6777 case NEON_2RM_VCEQ0
:
6778 tmp2
= tcg_const_i32(0);
6780 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
6781 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
6782 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
6785 tcg_temp_free_i32(tmp2
);
6789 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
6790 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
6791 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
6796 tmp2
= tcg_const_i32(0);
6797 gen_neon_rsb(size
, tmp
, tmp2
);
6798 tcg_temp_free_i32(tmp2
);
6800 case NEON_2RM_VCGT0_F
:
6802 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6803 tmp2
= tcg_const_i32(0);
6804 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6805 tcg_temp_free_i32(tmp2
);
6806 tcg_temp_free_ptr(fpstatus
);
6809 case NEON_2RM_VCGE0_F
:
6811 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6812 tmp2
= tcg_const_i32(0);
6813 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6814 tcg_temp_free_i32(tmp2
);
6815 tcg_temp_free_ptr(fpstatus
);
6818 case NEON_2RM_VCEQ0_F
:
6820 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6821 tmp2
= tcg_const_i32(0);
6822 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6823 tcg_temp_free_i32(tmp2
);
6824 tcg_temp_free_ptr(fpstatus
);
6827 case NEON_2RM_VCLE0_F
:
6829 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6830 tmp2
= tcg_const_i32(0);
6831 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6832 tcg_temp_free_i32(tmp2
);
6833 tcg_temp_free_ptr(fpstatus
);
6836 case NEON_2RM_VCLT0_F
:
6838 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6839 tmp2
= tcg_const_i32(0);
6840 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6841 tcg_temp_free_i32(tmp2
);
6842 tcg_temp_free_ptr(fpstatus
);
6845 case NEON_2RM_VABS_F
:
6848 case NEON_2RM_VNEG_F
:
6852 tmp2
= neon_load_reg(rd
, pass
);
6853 neon_store_reg(rm
, pass
, tmp2
);
6856 tmp2
= neon_load_reg(rd
, pass
);
6858 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6859 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6862 neon_store_reg(rm
, pass
, tmp2
);
6864 case NEON_2RM_VRINTN
:
6865 case NEON_2RM_VRINTA
:
6866 case NEON_2RM_VRINTM
:
6867 case NEON_2RM_VRINTP
:
6868 case NEON_2RM_VRINTZ
:
6871 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6874 if (op
== NEON_2RM_VRINTZ
) {
6875 rmode
= FPROUNDING_ZERO
;
6877 rmode
= fp_decode_rm
[((op
& 0x6) >> 1) ^ 1];
6880 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6881 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6883 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpstatus
);
6884 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6886 tcg_temp_free_ptr(fpstatus
);
6887 tcg_temp_free_i32(tcg_rmode
);
6890 case NEON_2RM_VRINTX
:
6892 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6893 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpstatus
);
6894 tcg_temp_free_ptr(fpstatus
);
6897 case NEON_2RM_VCVTAU
:
6898 case NEON_2RM_VCVTAS
:
6899 case NEON_2RM_VCVTNU
:
6900 case NEON_2RM_VCVTNS
:
6901 case NEON_2RM_VCVTPU
:
6902 case NEON_2RM_VCVTPS
:
6903 case NEON_2RM_VCVTMU
:
6904 case NEON_2RM_VCVTMS
:
6906 bool is_signed
= !extract32(insn
, 7, 1);
6907 TCGv_ptr fpst
= get_fpstatus_ptr(1);
6908 TCGv_i32 tcg_rmode
, tcg_shift
;
6909 int rmode
= fp_decode_rm
[extract32(insn
, 8, 2)];
6911 tcg_shift
= tcg_const_i32(0);
6912 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6913 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6917 gen_helper_vfp_tosls(cpu_F0s
, cpu_F0s
,
6920 gen_helper_vfp_touls(cpu_F0s
, cpu_F0s
,
6924 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6926 tcg_temp_free_i32(tcg_rmode
);
6927 tcg_temp_free_i32(tcg_shift
);
6928 tcg_temp_free_ptr(fpst
);
6931 case NEON_2RM_VRECPE
:
6933 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6934 gen_helper_recpe_u32(tmp
, tmp
, fpstatus
);
6935 tcg_temp_free_ptr(fpstatus
);
6938 case NEON_2RM_VRSQRTE
:
6940 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6941 gen_helper_rsqrte_u32(tmp
, tmp
, fpstatus
);
6942 tcg_temp_free_ptr(fpstatus
);
6945 case NEON_2RM_VRECPE_F
:
6947 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6948 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
6949 tcg_temp_free_ptr(fpstatus
);
6952 case NEON_2RM_VRSQRTE_F
:
6954 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6955 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
6956 tcg_temp_free_ptr(fpstatus
);
6959 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6962 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6965 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6966 gen_vfp_tosiz(0, 1);
6968 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6969 gen_vfp_touiz(0, 1);
6972 /* Reserved op values were caught by the
6973 * neon_2rm_sizes[] check earlier.
6977 if (neon_2rm_is_float_op(op
)) {
6978 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
6979 neon_reg_offset(rd
, pass
));
6981 neon_store_reg(rd
, pass
, tmp
);
6986 } else if ((insn
& (1 << 10)) == 0) {
6988 int n
= ((insn
>> 8) & 3) + 1;
6989 if ((rn
+ n
) > 32) {
6990 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6991 * helper function running off the end of the register file.
6996 if (insn
& (1 << 6)) {
6997 tmp
= neon_load_reg(rd
, 0);
6999 tmp
= tcg_temp_new_i32();
7000 tcg_gen_movi_i32(tmp
, 0);
7002 tmp2
= neon_load_reg(rm
, 0);
7003 tmp4
= tcg_const_i32(rn
);
7004 tmp5
= tcg_const_i32(n
);
7005 gen_helper_neon_tbl(tmp2
, cpu_env
, tmp2
, tmp
, tmp4
, tmp5
);
7006 tcg_temp_free_i32(tmp
);
7007 if (insn
& (1 << 6)) {
7008 tmp
= neon_load_reg(rd
, 1);
7010 tmp
= tcg_temp_new_i32();
7011 tcg_gen_movi_i32(tmp
, 0);
7013 tmp3
= neon_load_reg(rm
, 1);
7014 gen_helper_neon_tbl(tmp3
, cpu_env
, tmp3
, tmp
, tmp4
, tmp5
);
7015 tcg_temp_free_i32(tmp5
);
7016 tcg_temp_free_i32(tmp4
);
7017 neon_store_reg(rd
, 0, tmp2
);
7018 neon_store_reg(rd
, 1, tmp3
);
7019 tcg_temp_free_i32(tmp
);
7020 } else if ((insn
& 0x380) == 0) {
7022 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
7025 if (insn
& (1 << 19)) {
7026 tmp
= neon_load_reg(rm
, 1);
7028 tmp
= neon_load_reg(rm
, 0);
7030 if (insn
& (1 << 16)) {
7031 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
7032 } else if (insn
& (1 << 17)) {
7033 if ((insn
>> 18) & 1)
7034 gen_neon_dup_high16(tmp
);
7036 gen_neon_dup_low16(tmp
);
7038 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
7039 tmp2
= tcg_temp_new_i32();
7040 tcg_gen_mov_i32(tmp2
, tmp
);
7041 neon_store_reg(rd
, pass
, tmp2
);
7043 tcg_temp_free_i32(tmp
);
7052 static int disas_coproc_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
7054 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
7055 const ARMCPRegInfo
*ri
;
7057 cpnum
= (insn
>> 8) & 0xf;
7059 /* First check for coprocessor space used for XScale/iwMMXt insns */
7060 if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && (cpnum
< 2)) {
7061 if (extract32(s
->c15_cpar
, cpnum
, 1) == 0) {
7064 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
7065 return disas_iwmmxt_insn(env
, s
, insn
);
7066 } else if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
)) {
7067 return disas_dsp_insn(env
, s
, insn
);
7072 /* Otherwise treat as a generic register access */
7073 is64
= (insn
& (1 << 25)) == 0;
7074 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
7082 opc1
= (insn
>> 4) & 0xf;
7084 rt2
= (insn
>> 16) & 0xf;
7086 crn
= (insn
>> 16) & 0xf;
7087 opc1
= (insn
>> 21) & 7;
7088 opc2
= (insn
>> 5) & 7;
7091 isread
= (insn
>> 20) & 1;
7092 rt
= (insn
>> 12) & 0xf;
7094 ri
= get_arm_cp_reginfo(s
->cp_regs
,
7095 ENCODE_CP_REG(cpnum
, is64
, crn
, crm
, opc1
, opc2
));
7097 /* Check access permissions */
7098 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
7103 (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && cpnum
< 14)) {
7104 /* Emit code to perform further access permissions checks at
7105 * runtime; this may result in an exception.
7106 * Note that on XScale all cp0..c13 registers do an access check
7107 * call in order to handle c15_cpar.
7113 /* Note that since we are an implementation which takes an
7114 * exception on a trapped conditional instruction only if the
7115 * instruction passes its condition code check, we can take
7116 * advantage of the clause in the ARM ARM that allows us to set
7117 * the COND field in the instruction to 0xE in all cases.
7118 * We could fish the actual condition out of the insn (ARM)
7119 * or the condexec bits (Thumb) but it isn't necessary.
7124 syndrome
= syn_cp14_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7127 syndrome
= syn_cp14_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7128 rt
, isread
, s
->thumb
);
7133 syndrome
= syn_cp15_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7136 syndrome
= syn_cp15_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7137 rt
, isread
, s
->thumb
);
7141 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7142 * so this can only happen if this is an ARMv7 or earlier CPU,
7143 * in which case the syndrome information won't actually be
7146 assert(!arm_dc_feature(s
, ARM_FEATURE_V8
));
7147 syndrome
= syn_uncategorized();
7151 gen_set_pc_im(s
, s
->pc
);
7152 tmpptr
= tcg_const_ptr(ri
);
7153 tcg_syn
= tcg_const_i32(syndrome
);
7154 gen_helper_access_check_cp_reg(cpu_env
, tmpptr
, tcg_syn
);
7155 tcg_temp_free_ptr(tmpptr
);
7156 tcg_temp_free_i32(tcg_syn
);
7159 /* Handle special cases first */
7160 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
7167 gen_set_pc_im(s
, s
->pc
);
7168 s
->is_jmp
= DISAS_WFI
;
7174 if (use_icount
&& (ri
->type
& ARM_CP_IO
)) {
7183 if (ri
->type
& ARM_CP_CONST
) {
7184 tmp64
= tcg_const_i64(ri
->resetvalue
);
7185 } else if (ri
->readfn
) {
7187 tmp64
= tcg_temp_new_i64();
7188 tmpptr
= tcg_const_ptr(ri
);
7189 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
7190 tcg_temp_free_ptr(tmpptr
);
7192 tmp64
= tcg_temp_new_i64();
7193 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7195 tmp
= tcg_temp_new_i32();
7196 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7197 store_reg(s
, rt
, tmp
);
7198 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7199 tmp
= tcg_temp_new_i32();
7200 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7201 tcg_temp_free_i64(tmp64
);
7202 store_reg(s
, rt2
, tmp
);
7205 if (ri
->type
& ARM_CP_CONST
) {
7206 tmp
= tcg_const_i32(ri
->resetvalue
);
7207 } else if (ri
->readfn
) {
7209 tmp
= tcg_temp_new_i32();
7210 tmpptr
= tcg_const_ptr(ri
);
7211 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
7212 tcg_temp_free_ptr(tmpptr
);
7214 tmp
= load_cpu_offset(ri
->fieldoffset
);
7217 /* Destination register of r15 for 32 bit loads sets
7218 * the condition codes from the high 4 bits of the value
7221 tcg_temp_free_i32(tmp
);
7223 store_reg(s
, rt
, tmp
);
7228 if (ri
->type
& ARM_CP_CONST
) {
7229 /* If not forbidden by access permissions, treat as WI */
7234 TCGv_i32 tmplo
, tmphi
;
7235 TCGv_i64 tmp64
= tcg_temp_new_i64();
7236 tmplo
= load_reg(s
, rt
);
7237 tmphi
= load_reg(s
, rt2
);
7238 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
7239 tcg_temp_free_i32(tmplo
);
7240 tcg_temp_free_i32(tmphi
);
7242 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
7243 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
7244 tcg_temp_free_ptr(tmpptr
);
7246 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7248 tcg_temp_free_i64(tmp64
);
7253 tmp
= load_reg(s
, rt
);
7254 tmpptr
= tcg_const_ptr(ri
);
7255 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
7256 tcg_temp_free_ptr(tmpptr
);
7257 tcg_temp_free_i32(tmp
);
7259 TCGv_i32 tmp
= load_reg(s
, rt
);
7260 store_cpu_offset(tmp
, ri
->fieldoffset
);
7265 if (use_icount
&& (ri
->type
& ARM_CP_IO
)) {
7266 /* I/O operations must end the TB here (whether read or write) */
7269 } else if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
7270 /* We default to ending the TB on a coprocessor register write,
7271 * but allow this to be suppressed by the register definition
7272 * (usually only necessary to work around guest bugs).
7280 /* Unknown register; this might be a guest error or a QEMU
7281 * unimplemented feature.
7284 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7285 "64 bit system register cp:%d opc1: %d crm:%d\n",
7286 isread
? "read" : "write", cpnum
, opc1
, crm
);
7288 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7289 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
7290 isread
? "read" : "write", cpnum
, opc1
, crn
, crm
, opc2
);
7297 /* Store a 64-bit value to a register pair. Clobbers val. */
7298 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
7301 tmp
= tcg_temp_new_i32();
7302 tcg_gen_trunc_i64_i32(tmp
, val
);
7303 store_reg(s
, rlow
, tmp
);
7304 tmp
= tcg_temp_new_i32();
7305 tcg_gen_shri_i64(val
, val
, 32);
7306 tcg_gen_trunc_i64_i32(tmp
, val
);
7307 store_reg(s
, rhigh
, tmp
);
7310 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7311 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
7316 /* Load value and extend to 64 bits. */
7317 tmp
= tcg_temp_new_i64();
7318 tmp2
= load_reg(s
, rlow
);
7319 tcg_gen_extu_i32_i64(tmp
, tmp2
);
7320 tcg_temp_free_i32(tmp2
);
7321 tcg_gen_add_i64(val
, val
, tmp
);
7322 tcg_temp_free_i64(tmp
);
7325 /* load and add a 64-bit value from a register pair. */
7326 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
7332 /* Load 64-bit value rd:rn. */
7333 tmpl
= load_reg(s
, rlow
);
7334 tmph
= load_reg(s
, rhigh
);
7335 tmp
= tcg_temp_new_i64();
7336 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
7337 tcg_temp_free_i32(tmpl
);
7338 tcg_temp_free_i32(tmph
);
7339 tcg_gen_add_i64(val
, val
, tmp
);
7340 tcg_temp_free_i64(tmp
);
7343 /* Set N and Z flags from hi|lo. */
7344 static void gen_logicq_cc(TCGv_i32 lo
, TCGv_i32 hi
)
7346 tcg_gen_mov_i32(cpu_NF
, hi
);
7347 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
7350 /* Load/Store exclusive instructions are implemented by remembering
7351 the value/address loaded, and seeing if these are the same
7352 when the store is performed. This should be sufficient to implement
7353 the architecturally mandated semantics, and avoids having to monitor
7356 In system emulation mode only one CPU will be running at once, so
7357 this sequence is effectively atomic. In user emulation mode we
7358 throw an exception and handle the atomic operation elsewhere. */
7359 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
7360 TCGv_i32 addr
, int size
)
7362 TCGv_i32 tmp
= tcg_temp_new_i32();
7368 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
7371 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
7375 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
7382 TCGv_i32 tmp2
= tcg_temp_new_i32();
7383 TCGv_i32 tmp3
= tcg_temp_new_i32();
7385 tcg_gen_addi_i32(tmp2
, addr
, 4);
7386 gen_aa32_ld32u(tmp3
, tmp2
, get_mem_index(s
));
7387 tcg_temp_free_i32(tmp2
);
7388 tcg_gen_concat_i32_i64(cpu_exclusive_val
, tmp
, tmp3
);
7389 store_reg(s
, rt2
, tmp3
);
7391 tcg_gen_extu_i32_i64(cpu_exclusive_val
, tmp
);
7394 store_reg(s
, rt
, tmp
);
7395 tcg_gen_extu_i32_i64(cpu_exclusive_addr
, addr
);
7398 static void gen_clrex(DisasContext
*s
)
7400 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7403 #ifdef CONFIG_USER_ONLY
7404 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7405 TCGv_i32 addr
, int size
)
7407 tcg_gen_extu_i32_i64(cpu_exclusive_test
, addr
);
7408 tcg_gen_movi_i32(cpu_exclusive_info
,
7409 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
7410 gen_exception_internal_insn(s
, 4, EXCP_STREX
);
7413 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7414 TCGv_i32 addr
, int size
)
7417 TCGv_i64 val64
, extaddr
;
7421 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7427 fail_label
= gen_new_label();
7428 done_label
= gen_new_label();
7429 extaddr
= tcg_temp_new_i64();
7430 tcg_gen_extu_i32_i64(extaddr
, addr
);
7431 tcg_gen_brcond_i64(TCG_COND_NE
, extaddr
, cpu_exclusive_addr
, fail_label
);
7432 tcg_temp_free_i64(extaddr
);
7434 tmp
= tcg_temp_new_i32();
7437 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
7440 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
7444 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
7450 val64
= tcg_temp_new_i64();
7452 TCGv_i32 tmp2
= tcg_temp_new_i32();
7453 TCGv_i32 tmp3
= tcg_temp_new_i32();
7454 tcg_gen_addi_i32(tmp2
, addr
, 4);
7455 gen_aa32_ld32u(tmp3
, tmp2
, get_mem_index(s
));
7456 tcg_temp_free_i32(tmp2
);
7457 tcg_gen_concat_i32_i64(val64
, tmp
, tmp3
);
7458 tcg_temp_free_i32(tmp3
);
7460 tcg_gen_extu_i32_i64(val64
, tmp
);
7462 tcg_temp_free_i32(tmp
);
7464 tcg_gen_brcond_i64(TCG_COND_NE
, val64
, cpu_exclusive_val
, fail_label
);
7465 tcg_temp_free_i64(val64
);
7467 tmp
= load_reg(s
, rt
);
7470 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
7473 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
7477 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
7482 tcg_temp_free_i32(tmp
);
7484 tcg_gen_addi_i32(addr
, addr
, 4);
7485 tmp
= load_reg(s
, rt2
);
7486 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
7487 tcg_temp_free_i32(tmp
);
7489 tcg_gen_movi_i32(cpu_R
[rd
], 0);
7490 tcg_gen_br(done_label
);
7491 gen_set_label(fail_label
);
7492 tcg_gen_movi_i32(cpu_R
[rd
], 1);
7493 gen_set_label(done_label
);
7494 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7501 * @mode: mode field from insn (which stack to store to)
7502 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7503 * @writeback: true if writeback bit set
7505 * Generate code for the SRS (Store Return State) insn.
7507 static void gen_srs(DisasContext
*s
,
7508 uint32_t mode
, uint32_t amode
, bool writeback
)
7511 TCGv_i32 addr
= tcg_temp_new_i32();
7512 TCGv_i32 tmp
= tcg_const_i32(mode
);
7513 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7514 tcg_temp_free_i32(tmp
);
7531 tcg_gen_addi_i32(addr
, addr
, offset
);
7532 tmp
= load_reg(s
, 14);
7533 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
7534 tcg_temp_free_i32(tmp
);
7535 tmp
= load_cpu_field(spsr
);
7536 tcg_gen_addi_i32(addr
, addr
, 4);
7537 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
7538 tcg_temp_free_i32(tmp
);
7556 tcg_gen_addi_i32(addr
, addr
, offset
);
7557 tmp
= tcg_const_i32(mode
);
7558 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7559 tcg_temp_free_i32(tmp
);
7561 tcg_temp_free_i32(addr
);
7564 static void disas_arm_insn(CPUARMState
* env
, DisasContext
*s
)
7566 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
7573 insn
= arm_ldl_code(env
, s
->pc
, s
->bswap_code
);
7576 /* M variants do not implement ARM mode. */
7581 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7582 * choose to UNDEF. In ARMv5 and above the space is used
7583 * for miscellaneous unconditional instructions.
7587 /* Unconditional instructions. */
7588 if (((insn
>> 25) & 7) == 1) {
7589 /* NEON Data processing. */
7590 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
7594 if (disas_neon_data_insn(env
, s
, insn
))
7598 if ((insn
& 0x0f100000) == 0x04000000) {
7599 /* NEON load/store. */
7600 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
7604 if (disas_neon_ls_insn(env
, s
, insn
))
7608 if ((insn
& 0x0f000e10) == 0x0e000a00) {
7610 if (disas_vfp_insn(env
, s
, insn
)) {
7615 if (((insn
& 0x0f30f000) == 0x0510f000) ||
7616 ((insn
& 0x0f30f010) == 0x0710f000)) {
7617 if ((insn
& (1 << 22)) == 0) {
7619 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
7623 /* Otherwise PLD; v5TE+ */
7627 if (((insn
& 0x0f70f000) == 0x0450f000) ||
7628 ((insn
& 0x0f70f010) == 0x0650f000)) {
7630 return; /* PLI; V7 */
7632 if (((insn
& 0x0f700000) == 0x04100000) ||
7633 ((insn
& 0x0f700010) == 0x06100000)) {
7634 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
7637 return; /* v7MP: Unallocated memory hint: must NOP */
7640 if ((insn
& 0x0ffffdff) == 0x01010000) {
7643 if (((insn
>> 9) & 1) != s
->bswap_code
) {
7644 /* Dynamic endianness switching not implemented. */
7645 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented setend\n");
7649 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
7650 switch ((insn
>> 4) & 0xf) {
7659 /* We don't emulate caches so these are a no-op. */
7664 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
7670 gen_srs(s
, (insn
& 0x1f), (insn
>> 23) & 3, insn
& (1 << 21));
7672 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
7678 rn
= (insn
>> 16) & 0xf;
7679 addr
= load_reg(s
, rn
);
7680 i
= (insn
>> 23) & 3;
7682 case 0: offset
= -4; break; /* DA */
7683 case 1: offset
= 0; break; /* IA */
7684 case 2: offset
= -8; break; /* DB */
7685 case 3: offset
= 4; break; /* IB */
7689 tcg_gen_addi_i32(addr
, addr
, offset
);
7690 /* Load PC into tmp and CPSR into tmp2. */
7691 tmp
= tcg_temp_new_i32();
7692 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
7693 tcg_gen_addi_i32(addr
, addr
, 4);
7694 tmp2
= tcg_temp_new_i32();
7695 gen_aa32_ld32u(tmp2
, addr
, get_mem_index(s
));
7696 if (insn
& (1 << 21)) {
7697 /* Base writeback. */
7699 case 0: offset
= -8; break;
7700 case 1: offset
= 4; break;
7701 case 2: offset
= -4; break;
7702 case 3: offset
= 0; break;
7706 tcg_gen_addi_i32(addr
, addr
, offset
);
7707 store_reg(s
, rn
, addr
);
7709 tcg_temp_free_i32(addr
);
7711 gen_rfe(s
, tmp
, tmp2
);
7713 } else if ((insn
& 0x0e000000) == 0x0a000000) {
7714 /* branch link and change to thumb (blx <offset>) */
7717 val
= (uint32_t)s
->pc
;
7718 tmp
= tcg_temp_new_i32();
7719 tcg_gen_movi_i32(tmp
, val
);
7720 store_reg(s
, 14, tmp
);
7721 /* Sign-extend the 24-bit offset */
7722 offset
= (((int32_t)insn
) << 8) >> 8;
7723 /* offset * 4 + bit24 * 2 + (thumb bit) */
7724 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
7725 /* pipeline offset */
7727 /* protected by ARCH(5); above, near the start of uncond block */
7730 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
7731 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
7732 /* iWMMXt register transfer. */
7733 if (extract32(s
->c15_cpar
, 1, 1)) {
7734 if (!disas_iwmmxt_insn(env
, s
, insn
)) {
7739 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
7740 /* Coprocessor double register transfer. */
7742 } else if ((insn
& 0x0f000010) == 0x0e000010) {
7743 /* Additional coprocessor register transfer. */
7744 } else if ((insn
& 0x0ff10020) == 0x01000000) {
7747 /* cps (privileged) */
7751 if (insn
& (1 << 19)) {
7752 if (insn
& (1 << 8))
7754 if (insn
& (1 << 7))
7756 if (insn
& (1 << 6))
7758 if (insn
& (1 << 18))
7761 if (insn
& (1 << 17)) {
7763 val
|= (insn
& 0x1f);
7766 gen_set_psr_im(s
, mask
, 0, val
);
7773 /* if not always execute, we generate a conditional jump to
7775 s
->condlabel
= gen_new_label();
7776 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
7779 if ((insn
& 0x0f900000) == 0x03000000) {
7780 if ((insn
& (1 << 21)) == 0) {
7782 rd
= (insn
>> 12) & 0xf;
7783 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
7784 if ((insn
& (1 << 22)) == 0) {
7786 tmp
= tcg_temp_new_i32();
7787 tcg_gen_movi_i32(tmp
, val
);
7790 tmp
= load_reg(s
, rd
);
7791 tcg_gen_ext16u_i32(tmp
, tmp
);
7792 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
7794 store_reg(s
, rd
, tmp
);
7796 if (((insn
>> 12) & 0xf) != 0xf)
7798 if (((insn
>> 16) & 0xf) == 0) {
7799 gen_nop_hint(s
, insn
& 0xff);
7801 /* CPSR = immediate */
7803 shift
= ((insn
>> 8) & 0xf) * 2;
7805 val
= (val
>> shift
) | (val
<< (32 - shift
));
7806 i
= ((insn
& (1 << 22)) != 0);
7807 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
7811 } else if ((insn
& 0x0f900000) == 0x01000000
7812 && (insn
& 0x00000090) != 0x00000090) {
7813 /* miscellaneous instructions */
7814 op1
= (insn
>> 21) & 3;
7815 sh
= (insn
>> 4) & 0xf;
7818 case 0x0: /* move program status register */
7821 tmp
= load_reg(s
, rm
);
7822 i
= ((op1
& 2) != 0);
7823 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
7827 rd
= (insn
>> 12) & 0xf;
7831 tmp
= load_cpu_field(spsr
);
7833 tmp
= tcg_temp_new_i32();
7834 gen_helper_cpsr_read(tmp
, cpu_env
);
7836 store_reg(s
, rd
, tmp
);
7841 /* branch/exchange thumb (bx). */
7843 tmp
= load_reg(s
, rm
);
7845 } else if (op1
== 3) {
7848 rd
= (insn
>> 12) & 0xf;
7849 tmp
= load_reg(s
, rm
);
7850 gen_helper_clz(tmp
, tmp
);
7851 store_reg(s
, rd
, tmp
);
7859 /* Trivial implementation equivalent to bx. */
7860 tmp
= load_reg(s
, rm
);
7871 /* branch link/exchange thumb (blx) */
7872 tmp
= load_reg(s
, rm
);
7873 tmp2
= tcg_temp_new_i32();
7874 tcg_gen_movi_i32(tmp2
, s
->pc
);
7875 store_reg(s
, 14, tmp2
);
7881 uint32_t c
= extract32(insn
, 8, 4);
7883 /* Check this CPU supports ARMv8 CRC instructions.
7884 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7885 * Bits 8, 10 and 11 should be zero.
7887 if (!arm_dc_feature(s
, ARM_FEATURE_CRC
) || op1
== 0x3 ||
7892 rn
= extract32(insn
, 16, 4);
7893 rd
= extract32(insn
, 12, 4);
7895 tmp
= load_reg(s
, rn
);
7896 tmp2
= load_reg(s
, rm
);
7898 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
7899 } else if (op1
== 1) {
7900 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
7902 tmp3
= tcg_const_i32(1 << op1
);
7904 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
7906 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
7908 tcg_temp_free_i32(tmp2
);
7909 tcg_temp_free_i32(tmp3
);
7910 store_reg(s
, rd
, tmp
);
7913 case 0x5: /* saturating add/subtract */
7915 rd
= (insn
>> 12) & 0xf;
7916 rn
= (insn
>> 16) & 0xf;
7917 tmp
= load_reg(s
, rm
);
7918 tmp2
= load_reg(s
, rn
);
7920 gen_helper_double_saturate(tmp2
, cpu_env
, tmp2
);
7922 gen_helper_sub_saturate(tmp
, cpu_env
, tmp
, tmp2
);
7924 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
7925 tcg_temp_free_i32(tmp2
);
7926 store_reg(s
, rd
, tmp
);
7930 int imm16
= extract32(insn
, 0, 4) | (extract32(insn
, 8, 12) << 4);
7935 gen_exception_insn(s
, 4, EXCP_BKPT
,
7936 syn_aa32_bkpt(imm16
, false));
7939 /* Hypervisor call (v7) */
7947 /* Secure monitor call (v6+) */
7959 case 0x8: /* signed multiply */
7964 rs
= (insn
>> 8) & 0xf;
7965 rn
= (insn
>> 12) & 0xf;
7966 rd
= (insn
>> 16) & 0xf;
7968 /* (32 * 16) >> 16 */
7969 tmp
= load_reg(s
, rm
);
7970 tmp2
= load_reg(s
, rs
);
7972 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7975 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7976 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7977 tmp
= tcg_temp_new_i32();
7978 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7979 tcg_temp_free_i64(tmp64
);
7980 if ((sh
& 2) == 0) {
7981 tmp2
= load_reg(s
, rn
);
7982 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
7983 tcg_temp_free_i32(tmp2
);
7985 store_reg(s
, rd
, tmp
);
7988 tmp
= load_reg(s
, rm
);
7989 tmp2
= load_reg(s
, rs
);
7990 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
7991 tcg_temp_free_i32(tmp2
);
7993 tmp64
= tcg_temp_new_i64();
7994 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7995 tcg_temp_free_i32(tmp
);
7996 gen_addq(s
, tmp64
, rn
, rd
);
7997 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7998 tcg_temp_free_i64(tmp64
);
8001 tmp2
= load_reg(s
, rn
);
8002 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8003 tcg_temp_free_i32(tmp2
);
8005 store_reg(s
, rd
, tmp
);
8012 } else if (((insn
& 0x0e000000) == 0 &&
8013 (insn
& 0x00000090) != 0x90) ||
8014 ((insn
& 0x0e000000) == (1 << 25))) {
8015 int set_cc
, logic_cc
, shiftop
;
8017 op1
= (insn
>> 21) & 0xf;
8018 set_cc
= (insn
>> 20) & 1;
8019 logic_cc
= table_logic_cc
[op1
] & set_cc
;
8021 /* data processing instruction */
8022 if (insn
& (1 << 25)) {
8023 /* immediate operand */
8025 shift
= ((insn
>> 8) & 0xf) * 2;
8027 val
= (val
>> shift
) | (val
<< (32 - shift
));
8029 tmp2
= tcg_temp_new_i32();
8030 tcg_gen_movi_i32(tmp2
, val
);
8031 if (logic_cc
&& shift
) {
8032 gen_set_CF_bit31(tmp2
);
8037 tmp2
= load_reg(s
, rm
);
8038 shiftop
= (insn
>> 5) & 3;
8039 if (!(insn
& (1 << 4))) {
8040 shift
= (insn
>> 7) & 0x1f;
8041 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8043 rs
= (insn
>> 8) & 0xf;
8044 tmp
= load_reg(s
, rs
);
8045 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
8048 if (op1
!= 0x0f && op1
!= 0x0d) {
8049 rn
= (insn
>> 16) & 0xf;
8050 tmp
= load_reg(s
, rn
);
8052 TCGV_UNUSED_I32(tmp
);
8054 rd
= (insn
>> 12) & 0xf;
8057 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8061 store_reg_bx(env
, s
, rd
, tmp
);
8064 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8068 store_reg_bx(env
, s
, rd
, tmp
);
8071 if (set_cc
&& rd
== 15) {
8072 /* SUBS r15, ... is used for exception return. */
8076 gen_sub_CC(tmp
, tmp
, tmp2
);
8077 gen_exception_return(s
, tmp
);
8080 gen_sub_CC(tmp
, tmp
, tmp2
);
8082 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8084 store_reg_bx(env
, s
, rd
, tmp
);
8089 gen_sub_CC(tmp
, tmp2
, tmp
);
8091 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8093 store_reg_bx(env
, s
, rd
, tmp
);
8097 gen_add_CC(tmp
, tmp
, tmp2
);
8099 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8101 store_reg_bx(env
, s
, rd
, tmp
);
8105 gen_adc_CC(tmp
, tmp
, tmp2
);
8107 gen_add_carry(tmp
, tmp
, tmp2
);
8109 store_reg_bx(env
, s
, rd
, tmp
);
8113 gen_sbc_CC(tmp
, tmp
, tmp2
);
8115 gen_sub_carry(tmp
, tmp
, tmp2
);
8117 store_reg_bx(env
, s
, rd
, tmp
);
8121 gen_sbc_CC(tmp
, tmp2
, tmp
);
8123 gen_sub_carry(tmp
, tmp2
, tmp
);
8125 store_reg_bx(env
, s
, rd
, tmp
);
8129 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8132 tcg_temp_free_i32(tmp
);
8136 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8139 tcg_temp_free_i32(tmp
);
8143 gen_sub_CC(tmp
, tmp
, tmp2
);
8145 tcg_temp_free_i32(tmp
);
8149 gen_add_CC(tmp
, tmp
, tmp2
);
8151 tcg_temp_free_i32(tmp
);
8154 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8158 store_reg_bx(env
, s
, rd
, tmp
);
8161 if (logic_cc
&& rd
== 15) {
8162 /* MOVS r15, ... is used for exception return. */
8166 gen_exception_return(s
, tmp2
);
8171 store_reg_bx(env
, s
, rd
, tmp2
);
8175 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8179 store_reg_bx(env
, s
, rd
, tmp
);
8183 tcg_gen_not_i32(tmp2
, tmp2
);
8187 store_reg_bx(env
, s
, rd
, tmp2
);
8190 if (op1
!= 0x0f && op1
!= 0x0d) {
8191 tcg_temp_free_i32(tmp2
);
8194 /* other instructions */
8195 op1
= (insn
>> 24) & 0xf;
8199 /* multiplies, extra load/stores */
8200 sh
= (insn
>> 5) & 3;
8203 rd
= (insn
>> 16) & 0xf;
8204 rn
= (insn
>> 12) & 0xf;
8205 rs
= (insn
>> 8) & 0xf;
8207 op1
= (insn
>> 20) & 0xf;
8209 case 0: case 1: case 2: case 3: case 6:
8211 tmp
= load_reg(s
, rs
);
8212 tmp2
= load_reg(s
, rm
);
8213 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8214 tcg_temp_free_i32(tmp2
);
8215 if (insn
& (1 << 22)) {
8216 /* Subtract (mls) */
8218 tmp2
= load_reg(s
, rn
);
8219 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8220 tcg_temp_free_i32(tmp2
);
8221 } else if (insn
& (1 << 21)) {
8223 tmp2
= load_reg(s
, rn
);
8224 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8225 tcg_temp_free_i32(tmp2
);
8227 if (insn
& (1 << 20))
8229 store_reg(s
, rd
, tmp
);
8232 /* 64 bit mul double accumulate (UMAAL) */
8234 tmp
= load_reg(s
, rs
);
8235 tmp2
= load_reg(s
, rm
);
8236 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8237 gen_addq_lo(s
, tmp64
, rn
);
8238 gen_addq_lo(s
, tmp64
, rd
);
8239 gen_storeq_reg(s
, rn
, rd
, tmp64
);
8240 tcg_temp_free_i64(tmp64
);
8242 case 8: case 9: case 10: case 11:
8243 case 12: case 13: case 14: case 15:
8244 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8245 tmp
= load_reg(s
, rs
);
8246 tmp2
= load_reg(s
, rm
);
8247 if (insn
& (1 << 22)) {
8248 tcg_gen_muls2_i32(tmp
, tmp2
, tmp
, tmp2
);
8250 tcg_gen_mulu2_i32(tmp
, tmp2
, tmp
, tmp2
);
8252 if (insn
& (1 << 21)) { /* mult accumulate */
8253 TCGv_i32 al
= load_reg(s
, rn
);
8254 TCGv_i32 ah
= load_reg(s
, rd
);
8255 tcg_gen_add2_i32(tmp
, tmp2
, tmp
, tmp2
, al
, ah
);
8256 tcg_temp_free_i32(al
);
8257 tcg_temp_free_i32(ah
);
8259 if (insn
& (1 << 20)) {
8260 gen_logicq_cc(tmp
, tmp2
);
8262 store_reg(s
, rn
, tmp
);
8263 store_reg(s
, rd
, tmp2
);
8269 rn
= (insn
>> 16) & 0xf;
8270 rd
= (insn
>> 12) & 0xf;
8271 if (insn
& (1 << 23)) {
8272 /* load/store exclusive */
8273 int op2
= (insn
>> 8) & 3;
8274 op1
= (insn
>> 21) & 0x3;
8277 case 0: /* lda/stl */
8283 case 1: /* reserved */
8285 case 2: /* ldaex/stlex */
8288 case 3: /* ldrex/strex */
8297 addr
= tcg_temp_local_new_i32();
8298 load_reg_var(s
, addr
, rn
);
8300 /* Since the emulation does not have barriers,
8301 the acquire/release semantics need no special
8304 if (insn
& (1 << 20)) {
8305 tmp
= tcg_temp_new_i32();
8308 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
8311 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
8314 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
8319 store_reg(s
, rd
, tmp
);
8322 tmp
= load_reg(s
, rm
);
8325 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8328 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
8331 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
8336 tcg_temp_free_i32(tmp
);
8338 } else if (insn
& (1 << 20)) {
8341 gen_load_exclusive(s
, rd
, 15, addr
, 2);
8343 case 1: /* ldrexd */
8344 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
8346 case 2: /* ldrexb */
8347 gen_load_exclusive(s
, rd
, 15, addr
, 0);
8349 case 3: /* ldrexh */
8350 gen_load_exclusive(s
, rd
, 15, addr
, 1);
8359 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
8361 case 1: /* strexd */
8362 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
8364 case 2: /* strexb */
8365 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
8367 case 3: /* strexh */
8368 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
8374 tcg_temp_free_i32(addr
);
8376 /* SWP instruction */
8379 /* ??? This is not really atomic. However we know
8380 we never have multiple CPUs running in parallel,
8381 so it is good enough. */
8382 addr
= load_reg(s
, rn
);
8383 tmp
= load_reg(s
, rm
);
8384 tmp2
= tcg_temp_new_i32();
8385 if (insn
& (1 << 22)) {
8386 gen_aa32_ld8u(tmp2
, addr
, get_mem_index(s
));
8387 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
8389 gen_aa32_ld32u(tmp2
, addr
, get_mem_index(s
));
8390 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8392 tcg_temp_free_i32(tmp
);
8393 tcg_temp_free_i32(addr
);
8394 store_reg(s
, rd
, tmp2
);
8400 /* Misc load/store */
8401 rn
= (insn
>> 16) & 0xf;
8402 rd
= (insn
>> 12) & 0xf;
8403 addr
= load_reg(s
, rn
);
8404 if (insn
& (1 << 24))
8405 gen_add_datah_offset(s
, insn
, 0, addr
);
8407 if (insn
& (1 << 20)) {
8409 tmp
= tcg_temp_new_i32();
8412 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
8415 gen_aa32_ld8s(tmp
, addr
, get_mem_index(s
));
8419 gen_aa32_ld16s(tmp
, addr
, get_mem_index(s
));
8423 } else if (sh
& 2) {
8428 tmp
= load_reg(s
, rd
);
8429 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8430 tcg_temp_free_i32(tmp
);
8431 tcg_gen_addi_i32(addr
, addr
, 4);
8432 tmp
= load_reg(s
, rd
+ 1);
8433 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8434 tcg_temp_free_i32(tmp
);
8438 tmp
= tcg_temp_new_i32();
8439 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
8440 store_reg(s
, rd
, tmp
);
8441 tcg_gen_addi_i32(addr
, addr
, 4);
8442 tmp
= tcg_temp_new_i32();
8443 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
8447 address_offset
= -4;
8450 tmp
= load_reg(s
, rd
);
8451 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
8452 tcg_temp_free_i32(tmp
);
8455 /* Perform base writeback before the loaded value to
8456 ensure correct behavior with overlapping index registers.
8457 ldrd with base writeback is is undefined if the
8458 destination and index registers overlap. */
8459 if (!(insn
& (1 << 24))) {
8460 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
8461 store_reg(s
, rn
, addr
);
8462 } else if (insn
& (1 << 21)) {
8464 tcg_gen_addi_i32(addr
, addr
, address_offset
);
8465 store_reg(s
, rn
, addr
);
8467 tcg_temp_free_i32(addr
);
8470 /* Complete the load. */
8471 store_reg(s
, rd
, tmp
);
8480 if (insn
& (1 << 4)) {
8482 /* Armv6 Media instructions. */
8484 rn
= (insn
>> 16) & 0xf;
8485 rd
= (insn
>> 12) & 0xf;
8486 rs
= (insn
>> 8) & 0xf;
8487 switch ((insn
>> 23) & 3) {
8488 case 0: /* Parallel add/subtract. */
8489 op1
= (insn
>> 20) & 7;
8490 tmp
= load_reg(s
, rn
);
8491 tmp2
= load_reg(s
, rm
);
8492 sh
= (insn
>> 5) & 7;
8493 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
8495 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
8496 tcg_temp_free_i32(tmp2
);
8497 store_reg(s
, rd
, tmp
);
8500 if ((insn
& 0x00700020) == 0) {
8501 /* Halfword pack. */
8502 tmp
= load_reg(s
, rn
);
8503 tmp2
= load_reg(s
, rm
);
8504 shift
= (insn
>> 7) & 0x1f;
8505 if (insn
& (1 << 6)) {
8509 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8510 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8511 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8515 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8516 tcg_gen_ext16u_i32(tmp
, tmp
);
8517 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8519 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8520 tcg_temp_free_i32(tmp2
);
8521 store_reg(s
, rd
, tmp
);
8522 } else if ((insn
& 0x00200020) == 0x00200000) {
8524 tmp
= load_reg(s
, rm
);
8525 shift
= (insn
>> 7) & 0x1f;
8526 if (insn
& (1 << 6)) {
8529 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8531 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8533 sh
= (insn
>> 16) & 0x1f;
8534 tmp2
= tcg_const_i32(sh
);
8535 if (insn
& (1 << 22))
8536 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
8538 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
8539 tcg_temp_free_i32(tmp2
);
8540 store_reg(s
, rd
, tmp
);
8541 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
8543 tmp
= load_reg(s
, rm
);
8544 sh
= (insn
>> 16) & 0x1f;
8545 tmp2
= tcg_const_i32(sh
);
8546 if (insn
& (1 << 22))
8547 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
8549 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
8550 tcg_temp_free_i32(tmp2
);
8551 store_reg(s
, rd
, tmp
);
8552 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
8554 tmp
= load_reg(s
, rn
);
8555 tmp2
= load_reg(s
, rm
);
8556 tmp3
= tcg_temp_new_i32();
8557 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
8558 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8559 tcg_temp_free_i32(tmp3
);
8560 tcg_temp_free_i32(tmp2
);
8561 store_reg(s
, rd
, tmp
);
8562 } else if ((insn
& 0x000003e0) == 0x00000060) {
8563 tmp
= load_reg(s
, rm
);
8564 shift
= (insn
>> 10) & 3;
8565 /* ??? In many cases it's not necessary to do a
8566 rotate, a shift is sufficient. */
8568 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8569 op1
= (insn
>> 20) & 7;
8571 case 0: gen_sxtb16(tmp
); break;
8572 case 2: gen_sxtb(tmp
); break;
8573 case 3: gen_sxth(tmp
); break;
8574 case 4: gen_uxtb16(tmp
); break;
8575 case 6: gen_uxtb(tmp
); break;
8576 case 7: gen_uxth(tmp
); break;
8577 default: goto illegal_op
;
8580 tmp2
= load_reg(s
, rn
);
8581 if ((op1
& 3) == 0) {
8582 gen_add16(tmp
, tmp2
);
8584 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8585 tcg_temp_free_i32(tmp2
);
8588 store_reg(s
, rd
, tmp
);
8589 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
8591 tmp
= load_reg(s
, rm
);
8592 if (insn
& (1 << 22)) {
8593 if (insn
& (1 << 7)) {
8597 gen_helper_rbit(tmp
, tmp
);
8600 if (insn
& (1 << 7))
8603 tcg_gen_bswap32_i32(tmp
, tmp
);
8605 store_reg(s
, rd
, tmp
);
8610 case 2: /* Multiplies (Type 3). */
8611 switch ((insn
>> 20) & 0x7) {
8613 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
8614 /* op2 not 00x or 11x : UNDEF */
8617 /* Signed multiply most significant [accumulate].
8618 (SMMUL, SMMLA, SMMLS) */
8619 tmp
= load_reg(s
, rm
);
8620 tmp2
= load_reg(s
, rs
);
8621 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8624 tmp
= load_reg(s
, rd
);
8625 if (insn
& (1 << 6)) {
8626 tmp64
= gen_subq_msw(tmp64
, tmp
);
8628 tmp64
= gen_addq_msw(tmp64
, tmp
);
8631 if (insn
& (1 << 5)) {
8632 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8634 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8635 tmp
= tcg_temp_new_i32();
8636 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8637 tcg_temp_free_i64(tmp64
);
8638 store_reg(s
, rn
, tmp
);
8642 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8643 if (insn
& (1 << 7)) {
8646 tmp
= load_reg(s
, rm
);
8647 tmp2
= load_reg(s
, rs
);
8648 if (insn
& (1 << 5))
8649 gen_swap_half(tmp2
);
8650 gen_smul_dual(tmp
, tmp2
);
8651 if (insn
& (1 << 22)) {
8652 /* smlald, smlsld */
8655 tmp64
= tcg_temp_new_i64();
8656 tmp64_2
= tcg_temp_new_i64();
8657 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8658 tcg_gen_ext_i32_i64(tmp64_2
, tmp2
);
8659 tcg_temp_free_i32(tmp
);
8660 tcg_temp_free_i32(tmp2
);
8661 if (insn
& (1 << 6)) {
8662 tcg_gen_sub_i64(tmp64
, tmp64
, tmp64_2
);
8664 tcg_gen_add_i64(tmp64
, tmp64
, tmp64_2
);
8666 tcg_temp_free_i64(tmp64_2
);
8667 gen_addq(s
, tmp64
, rd
, rn
);
8668 gen_storeq_reg(s
, rd
, rn
, tmp64
);
8669 tcg_temp_free_i64(tmp64
);
8671 /* smuad, smusd, smlad, smlsd */
8672 if (insn
& (1 << 6)) {
8673 /* This subtraction cannot overflow. */
8674 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8676 /* This addition cannot overflow 32 bits;
8677 * however it may overflow considered as a
8678 * signed operation, in which case we must set
8681 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8683 tcg_temp_free_i32(tmp2
);
8686 tmp2
= load_reg(s
, rd
);
8687 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8688 tcg_temp_free_i32(tmp2
);
8690 store_reg(s
, rn
, tmp
);
8696 if (!arm_dc_feature(s
, ARM_FEATURE_ARM_DIV
)) {
8699 if (((insn
>> 5) & 7) || (rd
!= 15)) {
8702 tmp
= load_reg(s
, rm
);
8703 tmp2
= load_reg(s
, rs
);
8704 if (insn
& (1 << 21)) {
8705 gen_helper_udiv(tmp
, tmp
, tmp2
);
8707 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8709 tcg_temp_free_i32(tmp2
);
8710 store_reg(s
, rn
, tmp
);
8717 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
8719 case 0: /* Unsigned sum of absolute differences. */
8721 tmp
= load_reg(s
, rm
);
8722 tmp2
= load_reg(s
, rs
);
8723 gen_helper_usad8(tmp
, tmp
, tmp2
);
8724 tcg_temp_free_i32(tmp2
);
8726 tmp2
= load_reg(s
, rd
);
8727 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8728 tcg_temp_free_i32(tmp2
);
8730 store_reg(s
, rn
, tmp
);
8732 case 0x20: case 0x24: case 0x28: case 0x2c:
8733 /* Bitfield insert/clear. */
8735 shift
= (insn
>> 7) & 0x1f;
8736 i
= (insn
>> 16) & 0x1f;
8739 tmp
= tcg_temp_new_i32();
8740 tcg_gen_movi_i32(tmp
, 0);
8742 tmp
= load_reg(s
, rm
);
8745 tmp2
= load_reg(s
, rd
);
8746 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, i
);
8747 tcg_temp_free_i32(tmp2
);
8749 store_reg(s
, rd
, tmp
);
8751 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8752 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
8754 tmp
= load_reg(s
, rm
);
8755 shift
= (insn
>> 7) & 0x1f;
8756 i
= ((insn
>> 16) & 0x1f) + 1;
8761 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
8763 gen_sbfx(tmp
, shift
, i
);
8766 store_reg(s
, rd
, tmp
);
8776 /* Check for undefined extension instructions
8777 * per the ARM Bible IE:
8778 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8780 sh
= (0xf << 20) | (0xf << 4);
8781 if (op1
== 0x7 && ((insn
& sh
) == sh
))
8785 /* load/store byte/word */
8786 rn
= (insn
>> 16) & 0xf;
8787 rd
= (insn
>> 12) & 0xf;
8788 tmp2
= load_reg(s
, rn
);
8789 if ((insn
& 0x01200000) == 0x00200000) {
8793 i
= get_mem_index(s
);
8795 if (insn
& (1 << 24))
8796 gen_add_data_offset(s
, insn
, tmp2
);
8797 if (insn
& (1 << 20)) {
8799 tmp
= tcg_temp_new_i32();
8800 if (insn
& (1 << 22)) {
8801 gen_aa32_ld8u(tmp
, tmp2
, i
);
8803 gen_aa32_ld32u(tmp
, tmp2
, i
);
8807 tmp
= load_reg(s
, rd
);
8808 if (insn
& (1 << 22)) {
8809 gen_aa32_st8(tmp
, tmp2
, i
);
8811 gen_aa32_st32(tmp
, tmp2
, i
);
8813 tcg_temp_free_i32(tmp
);
8815 if (!(insn
& (1 << 24))) {
8816 gen_add_data_offset(s
, insn
, tmp2
);
8817 store_reg(s
, rn
, tmp2
);
8818 } else if (insn
& (1 << 21)) {
8819 store_reg(s
, rn
, tmp2
);
8821 tcg_temp_free_i32(tmp2
);
8823 if (insn
& (1 << 20)) {
8824 /* Complete the load. */
8825 store_reg_from_load(env
, s
, rd
, tmp
);
8831 int j
, n
, user
, loaded_base
;
8832 TCGv_i32 loaded_var
;
8833 /* load/store multiple words */
8834 /* XXX: store correct base if write back */
8836 if (insn
& (1 << 22)) {
8838 goto illegal_op
; /* only usable in supervisor mode */
8840 if ((insn
& (1 << 15)) == 0)
8843 rn
= (insn
>> 16) & 0xf;
8844 addr
= load_reg(s
, rn
);
8846 /* compute total size */
8848 TCGV_UNUSED_I32(loaded_var
);
8851 if (insn
& (1 << i
))
8854 /* XXX: test invalid n == 0 case ? */
8855 if (insn
& (1 << 23)) {
8856 if (insn
& (1 << 24)) {
8858 tcg_gen_addi_i32(addr
, addr
, 4);
8860 /* post increment */
8863 if (insn
& (1 << 24)) {
8865 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
8867 /* post decrement */
8869 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
8874 if (insn
& (1 << i
)) {
8875 if (insn
& (1 << 20)) {
8877 tmp
= tcg_temp_new_i32();
8878 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
8880 tmp2
= tcg_const_i32(i
);
8881 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
8882 tcg_temp_free_i32(tmp2
);
8883 tcg_temp_free_i32(tmp
);
8884 } else if (i
== rn
) {
8888 store_reg_from_load(env
, s
, i
, tmp
);
8893 /* special case: r15 = PC + 8 */
8894 val
= (long)s
->pc
+ 4;
8895 tmp
= tcg_temp_new_i32();
8896 tcg_gen_movi_i32(tmp
, val
);
8898 tmp
= tcg_temp_new_i32();
8899 tmp2
= tcg_const_i32(i
);
8900 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
8901 tcg_temp_free_i32(tmp2
);
8903 tmp
= load_reg(s
, i
);
8905 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
8906 tcg_temp_free_i32(tmp
);
8909 /* no need to add after the last transfer */
8911 tcg_gen_addi_i32(addr
, addr
, 4);
8914 if (insn
& (1 << 21)) {
8916 if (insn
& (1 << 23)) {
8917 if (insn
& (1 << 24)) {
8920 /* post increment */
8921 tcg_gen_addi_i32(addr
, addr
, 4);
8924 if (insn
& (1 << 24)) {
8927 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
8929 /* post decrement */
8930 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
8933 store_reg(s
, rn
, addr
);
8935 tcg_temp_free_i32(addr
);
8938 store_reg(s
, rn
, loaded_var
);
8940 if ((insn
& (1 << 22)) && !user
) {
8941 /* Restore CPSR from SPSR. */
8942 tmp
= load_cpu_field(spsr
);
8943 gen_set_cpsr(tmp
, CPSR_ERET_MASK
);
8944 tcg_temp_free_i32(tmp
);
8945 s
->is_jmp
= DISAS_UPDATE
;
8954 /* branch (and link) */
8955 val
= (int32_t)s
->pc
;
8956 if (insn
& (1 << 24)) {
8957 tmp
= tcg_temp_new_i32();
8958 tcg_gen_movi_i32(tmp
, val
);
8959 store_reg(s
, 14, tmp
);
8961 offset
= sextract32(insn
<< 2, 0, 26);
8969 if (((insn
>> 8) & 0xe) == 10) {
8971 if (disas_vfp_insn(env
, s
, insn
)) {
8974 } else if (disas_coproc_insn(env
, s
, insn
)) {
8981 gen_set_pc_im(s
, s
->pc
);
8982 s
->svc_imm
= extract32(insn
, 0, 24);
8983 s
->is_jmp
= DISAS_SWI
;
8987 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized());
8993 /* Return true if this is a Thumb-2 logical op. */
8995 thumb2_logic_op(int op
)
9000 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9001 then set condition code flags based on the result of the operation.
9002 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9003 to the high bit of T1.
9004 Returns zero if the opcode is valid. */
9007 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
,
9008 TCGv_i32 t0
, TCGv_i32 t1
)
9015 tcg_gen_and_i32(t0
, t0
, t1
);
9019 tcg_gen_andc_i32(t0
, t0
, t1
);
9023 tcg_gen_or_i32(t0
, t0
, t1
);
9027 tcg_gen_orc_i32(t0
, t0
, t1
);
9031 tcg_gen_xor_i32(t0
, t0
, t1
);
9036 gen_add_CC(t0
, t0
, t1
);
9038 tcg_gen_add_i32(t0
, t0
, t1
);
9042 gen_adc_CC(t0
, t0
, t1
);
9048 gen_sbc_CC(t0
, t0
, t1
);
9050 gen_sub_carry(t0
, t0
, t1
);
9055 gen_sub_CC(t0
, t0
, t1
);
9057 tcg_gen_sub_i32(t0
, t0
, t1
);
9061 gen_sub_CC(t0
, t1
, t0
);
9063 tcg_gen_sub_i32(t0
, t1
, t0
);
9065 default: /* 5, 6, 7, 9, 12, 15. */
9071 gen_set_CF_bit31(t1
);
9076 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9078 static int disas_thumb2_insn(CPUARMState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
9080 uint32_t insn
, imm
, shift
, offset
;
9081 uint32_t rd
, rn
, rm
, rs
;
9092 if (!(arm_dc_feature(s
, ARM_FEATURE_THUMB2
)
9093 || arm_dc_feature(s
, ARM_FEATURE_M
))) {
9094 /* Thumb-1 cores may need to treat bl and blx as a pair of
9095 16-bit instructions to get correct prefetch abort behavior. */
9097 if ((insn
& (1 << 12)) == 0) {
9099 /* Second half of blx. */
9100 offset
= ((insn
& 0x7ff) << 1);
9101 tmp
= load_reg(s
, 14);
9102 tcg_gen_addi_i32(tmp
, tmp
, offset
);
9103 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
9105 tmp2
= tcg_temp_new_i32();
9106 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
9107 store_reg(s
, 14, tmp2
);
9111 if (insn
& (1 << 11)) {
9112 /* Second half of bl. */
9113 offset
= ((insn
& 0x7ff) << 1) | 1;
9114 tmp
= load_reg(s
, 14);
9115 tcg_gen_addi_i32(tmp
, tmp
, offset
);
9117 tmp2
= tcg_temp_new_i32();
9118 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
9119 store_reg(s
, 14, tmp2
);
9123 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
9124 /* Instruction spans a page boundary. Implement it as two
9125 16-bit instructions in case the second half causes an
9127 offset
= ((int32_t)insn
<< 21) >> 9;
9128 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
9131 /* Fall through to 32-bit decode. */
9134 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
9136 insn
|= (uint32_t)insn_hw1
<< 16;
9138 if ((insn
& 0xf800e800) != 0xf000e800) {
9142 rn
= (insn
>> 16) & 0xf;
9143 rs
= (insn
>> 12) & 0xf;
9144 rd
= (insn
>> 8) & 0xf;
9146 switch ((insn
>> 25) & 0xf) {
9147 case 0: case 1: case 2: case 3:
9148 /* 16-bit instructions. Should never happen. */
9151 if (insn
& (1 << 22)) {
9152 /* Other load/store, table branch. */
9153 if (insn
& 0x01200000) {
9154 /* Load/store doubleword. */
9156 addr
= tcg_temp_new_i32();
9157 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
9159 addr
= load_reg(s
, rn
);
9161 offset
= (insn
& 0xff) * 4;
9162 if ((insn
& (1 << 23)) == 0)
9164 if (insn
& (1 << 24)) {
9165 tcg_gen_addi_i32(addr
, addr
, offset
);
9168 if (insn
& (1 << 20)) {
9170 tmp
= tcg_temp_new_i32();
9171 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9172 store_reg(s
, rs
, tmp
);
9173 tcg_gen_addi_i32(addr
, addr
, 4);
9174 tmp
= tcg_temp_new_i32();
9175 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9176 store_reg(s
, rd
, tmp
);
9179 tmp
= load_reg(s
, rs
);
9180 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
9181 tcg_temp_free_i32(tmp
);
9182 tcg_gen_addi_i32(addr
, addr
, 4);
9183 tmp
= load_reg(s
, rd
);
9184 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
9185 tcg_temp_free_i32(tmp
);
9187 if (insn
& (1 << 21)) {
9188 /* Base writeback. */
9191 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
9192 store_reg(s
, rn
, addr
);
9194 tcg_temp_free_i32(addr
);
9196 } else if ((insn
& (1 << 23)) == 0) {
9197 /* Load/store exclusive word. */
9198 addr
= tcg_temp_local_new_i32();
9199 load_reg_var(s
, addr
, rn
);
9200 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
9201 if (insn
& (1 << 20)) {
9202 gen_load_exclusive(s
, rs
, 15, addr
, 2);
9204 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
9206 tcg_temp_free_i32(addr
);
9207 } else if ((insn
& (7 << 5)) == 0) {
9210 addr
= tcg_temp_new_i32();
9211 tcg_gen_movi_i32(addr
, s
->pc
);
9213 addr
= load_reg(s
, rn
);
9215 tmp
= load_reg(s
, rm
);
9216 tcg_gen_add_i32(addr
, addr
, tmp
);
9217 if (insn
& (1 << 4)) {
9219 tcg_gen_add_i32(addr
, addr
, tmp
);
9220 tcg_temp_free_i32(tmp
);
9221 tmp
= tcg_temp_new_i32();
9222 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
9224 tcg_temp_free_i32(tmp
);
9225 tmp
= tcg_temp_new_i32();
9226 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
9228 tcg_temp_free_i32(addr
);
9229 tcg_gen_shli_i32(tmp
, tmp
, 1);
9230 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
9231 store_reg(s
, 15, tmp
);
9233 int op2
= (insn
>> 6) & 0x3;
9234 op
= (insn
>> 4) & 0x3;
9239 /* Load/store exclusive byte/halfword/doubleword */
9246 /* Load-acquire/store-release */
9252 /* Load-acquire/store-release exclusive */
9256 addr
= tcg_temp_local_new_i32();
9257 load_reg_var(s
, addr
, rn
);
9259 if (insn
& (1 << 20)) {
9260 tmp
= tcg_temp_new_i32();
9263 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
9266 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
9269 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9274 store_reg(s
, rs
, tmp
);
9276 tmp
= load_reg(s
, rs
);
9279 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
9282 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
9285 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
9290 tcg_temp_free_i32(tmp
);
9292 } else if (insn
& (1 << 20)) {
9293 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
9295 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
9297 tcg_temp_free_i32(addr
);
9300 /* Load/store multiple, RFE, SRS. */
9301 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
9302 /* RFE, SRS: not available in user mode or on M profile */
9303 if (IS_USER(s
) || IS_M(env
)) {
9306 if (insn
& (1 << 20)) {
9308 addr
= load_reg(s
, rn
);
9309 if ((insn
& (1 << 24)) == 0)
9310 tcg_gen_addi_i32(addr
, addr
, -8);
9311 /* Load PC into tmp and CPSR into tmp2. */
9312 tmp
= tcg_temp_new_i32();
9313 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9314 tcg_gen_addi_i32(addr
, addr
, 4);
9315 tmp2
= tcg_temp_new_i32();
9316 gen_aa32_ld32u(tmp2
, addr
, get_mem_index(s
));
9317 if (insn
& (1 << 21)) {
9318 /* Base writeback. */
9319 if (insn
& (1 << 24)) {
9320 tcg_gen_addi_i32(addr
, addr
, 4);
9322 tcg_gen_addi_i32(addr
, addr
, -4);
9324 store_reg(s
, rn
, addr
);
9326 tcg_temp_free_i32(addr
);
9328 gen_rfe(s
, tmp
, tmp2
);
9331 gen_srs(s
, (insn
& 0x1f), (insn
& (1 << 24)) ? 1 : 2,
9335 int i
, loaded_base
= 0;
9336 TCGv_i32 loaded_var
;
9337 /* Load/store multiple. */
9338 addr
= load_reg(s
, rn
);
9340 for (i
= 0; i
< 16; i
++) {
9341 if (insn
& (1 << i
))
9344 if (insn
& (1 << 24)) {
9345 tcg_gen_addi_i32(addr
, addr
, -offset
);
9348 TCGV_UNUSED_I32(loaded_var
);
9349 for (i
= 0; i
< 16; i
++) {
9350 if ((insn
& (1 << i
)) == 0)
9352 if (insn
& (1 << 20)) {
9354 tmp
= tcg_temp_new_i32();
9355 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
9358 } else if (i
== rn
) {
9362 store_reg(s
, i
, tmp
);
9366 tmp
= load_reg(s
, i
);
9367 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
9368 tcg_temp_free_i32(tmp
);
9370 tcg_gen_addi_i32(addr
, addr
, 4);
9373 store_reg(s
, rn
, loaded_var
);
9375 if (insn
& (1 << 21)) {
9376 /* Base register writeback. */
9377 if (insn
& (1 << 24)) {
9378 tcg_gen_addi_i32(addr
, addr
, -offset
);
9380 /* Fault if writeback register is in register list. */
9381 if (insn
& (1 << rn
))
9383 store_reg(s
, rn
, addr
);
9385 tcg_temp_free_i32(addr
);
9392 op
= (insn
>> 21) & 0xf;
9394 /* Halfword pack. */
9395 tmp
= load_reg(s
, rn
);
9396 tmp2
= load_reg(s
, rm
);
9397 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
9398 if (insn
& (1 << 5)) {
9402 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
9403 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
9404 tcg_gen_ext16u_i32(tmp2
, tmp2
);
9408 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
9409 tcg_gen_ext16u_i32(tmp
, tmp
);
9410 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
9412 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9413 tcg_temp_free_i32(tmp2
);
9414 store_reg(s
, rd
, tmp
);
9416 /* Data processing register constant shift. */
9418 tmp
= tcg_temp_new_i32();
9419 tcg_gen_movi_i32(tmp
, 0);
9421 tmp
= load_reg(s
, rn
);
9423 tmp2
= load_reg(s
, rm
);
9425 shiftop
= (insn
>> 4) & 3;
9426 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
9427 conds
= (insn
& (1 << 20)) != 0;
9428 logic_cc
= (conds
&& thumb2_logic_op(op
));
9429 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
9430 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
9432 tcg_temp_free_i32(tmp2
);
9434 store_reg(s
, rd
, tmp
);
9436 tcg_temp_free_i32(tmp
);
9440 case 13: /* Misc data processing. */
9441 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
9442 if (op
< 4 && (insn
& 0xf000) != 0xf000)
9445 case 0: /* Register controlled shift. */
9446 tmp
= load_reg(s
, rn
);
9447 tmp2
= load_reg(s
, rm
);
9448 if ((insn
& 0x70) != 0)
9450 op
= (insn
>> 21) & 3;
9451 logic_cc
= (insn
& (1 << 20)) != 0;
9452 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
9455 store_reg_bx(env
, s
, rd
, tmp
);
9457 case 1: /* Sign/zero extend. */
9458 tmp
= load_reg(s
, rm
);
9459 shift
= (insn
>> 4) & 3;
9460 /* ??? In many cases it's not necessary to do a
9461 rotate, a shift is sufficient. */
9463 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
9464 op
= (insn
>> 20) & 7;
9466 case 0: gen_sxth(tmp
); break;
9467 case 1: gen_uxth(tmp
); break;
9468 case 2: gen_sxtb16(tmp
); break;
9469 case 3: gen_uxtb16(tmp
); break;
9470 case 4: gen_sxtb(tmp
); break;
9471 case 5: gen_uxtb(tmp
); break;
9472 default: goto illegal_op
;
9475 tmp2
= load_reg(s
, rn
);
9476 if ((op
>> 1) == 1) {
9477 gen_add16(tmp
, tmp2
);
9479 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9480 tcg_temp_free_i32(tmp2
);
9483 store_reg(s
, rd
, tmp
);
9485 case 2: /* SIMD add/subtract. */
9486 op
= (insn
>> 20) & 7;
9487 shift
= (insn
>> 4) & 7;
9488 if ((op
& 3) == 3 || (shift
& 3) == 3)
9490 tmp
= load_reg(s
, rn
);
9491 tmp2
= load_reg(s
, rm
);
9492 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
9493 tcg_temp_free_i32(tmp2
);
9494 store_reg(s
, rd
, tmp
);
9496 case 3: /* Other data processing. */
9497 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
9499 /* Saturating add/subtract. */
9500 tmp
= load_reg(s
, rn
);
9501 tmp2
= load_reg(s
, rm
);
9503 gen_helper_double_saturate(tmp
, cpu_env
, tmp
);
9505 gen_helper_sub_saturate(tmp
, cpu_env
, tmp2
, tmp
);
9507 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
9508 tcg_temp_free_i32(tmp2
);
9510 tmp
= load_reg(s
, rn
);
9512 case 0x0a: /* rbit */
9513 gen_helper_rbit(tmp
, tmp
);
9515 case 0x08: /* rev */
9516 tcg_gen_bswap32_i32(tmp
, tmp
);
9518 case 0x09: /* rev16 */
9521 case 0x0b: /* revsh */
9524 case 0x10: /* sel */
9525 tmp2
= load_reg(s
, rm
);
9526 tmp3
= tcg_temp_new_i32();
9527 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
9528 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
9529 tcg_temp_free_i32(tmp3
);
9530 tcg_temp_free_i32(tmp2
);
9532 case 0x18: /* clz */
9533 gen_helper_clz(tmp
, tmp
);
9543 uint32_t sz
= op
& 0x3;
9544 uint32_t c
= op
& 0x8;
9546 if (!arm_dc_feature(s
, ARM_FEATURE_CRC
)) {
9550 tmp2
= load_reg(s
, rm
);
9552 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
9553 } else if (sz
== 1) {
9554 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
9556 tmp3
= tcg_const_i32(1 << sz
);
9558 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
9560 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
9562 tcg_temp_free_i32(tmp2
);
9563 tcg_temp_free_i32(tmp3
);
9570 store_reg(s
, rd
, tmp
);
9572 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9573 op
= (insn
>> 4) & 0xf;
9574 tmp
= load_reg(s
, rn
);
9575 tmp2
= load_reg(s
, rm
);
9576 switch ((insn
>> 20) & 7) {
9577 case 0: /* 32 x 32 -> 32 */
9578 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9579 tcg_temp_free_i32(tmp2
);
9581 tmp2
= load_reg(s
, rs
);
9583 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
9585 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9586 tcg_temp_free_i32(tmp2
);
9589 case 1: /* 16 x 16 -> 32 */
9590 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
9591 tcg_temp_free_i32(tmp2
);
9593 tmp2
= load_reg(s
, rs
);
9594 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9595 tcg_temp_free_i32(tmp2
);
9598 case 2: /* Dual multiply add. */
9599 case 4: /* Dual multiply subtract. */
9601 gen_swap_half(tmp2
);
9602 gen_smul_dual(tmp
, tmp2
);
9603 if (insn
& (1 << 22)) {
9604 /* This subtraction cannot overflow. */
9605 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9607 /* This addition cannot overflow 32 bits;
9608 * however it may overflow considered as a signed
9609 * operation, in which case we must set the Q flag.
9611 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9613 tcg_temp_free_i32(tmp2
);
9616 tmp2
= load_reg(s
, rs
);
9617 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9618 tcg_temp_free_i32(tmp2
);
9621 case 3: /* 32 * 16 -> 32msb */
9623 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
9626 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9627 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
9628 tmp
= tcg_temp_new_i32();
9629 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
9630 tcg_temp_free_i64(tmp64
);
9633 tmp2
= load_reg(s
, rs
);
9634 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9635 tcg_temp_free_i32(tmp2
);
9638 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9639 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9641 tmp
= load_reg(s
, rs
);
9642 if (insn
& (1 << 20)) {
9643 tmp64
= gen_addq_msw(tmp64
, tmp
);
9645 tmp64
= gen_subq_msw(tmp64
, tmp
);
9648 if (insn
& (1 << 4)) {
9649 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
9651 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
9652 tmp
= tcg_temp_new_i32();
9653 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
9654 tcg_temp_free_i64(tmp64
);
9656 case 7: /* Unsigned sum of absolute differences. */
9657 gen_helper_usad8(tmp
, tmp
, tmp2
);
9658 tcg_temp_free_i32(tmp2
);
9660 tmp2
= load_reg(s
, rs
);
9661 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9662 tcg_temp_free_i32(tmp2
);
9666 store_reg(s
, rd
, tmp
);
9668 case 6: case 7: /* 64-bit multiply, Divide. */
9669 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
9670 tmp
= load_reg(s
, rn
);
9671 tmp2
= load_reg(s
, rm
);
9672 if ((op
& 0x50) == 0x10) {
9674 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DIV
)) {
9678 gen_helper_udiv(tmp
, tmp
, tmp2
);
9680 gen_helper_sdiv(tmp
, tmp
, tmp2
);
9681 tcg_temp_free_i32(tmp2
);
9682 store_reg(s
, rd
, tmp
);
9683 } else if ((op
& 0xe) == 0xc) {
9684 /* Dual multiply accumulate long. */
9686 gen_swap_half(tmp2
);
9687 gen_smul_dual(tmp
, tmp2
);
9689 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9691 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9693 tcg_temp_free_i32(tmp2
);
9695 tmp64
= tcg_temp_new_i64();
9696 tcg_gen_ext_i32_i64(tmp64
, tmp
);
9697 tcg_temp_free_i32(tmp
);
9698 gen_addq(s
, tmp64
, rs
, rd
);
9699 gen_storeq_reg(s
, rs
, rd
, tmp64
);
9700 tcg_temp_free_i64(tmp64
);
9703 /* Unsigned 64-bit multiply */
9704 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
9708 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
9709 tcg_temp_free_i32(tmp2
);
9710 tmp64
= tcg_temp_new_i64();
9711 tcg_gen_ext_i32_i64(tmp64
, tmp
);
9712 tcg_temp_free_i32(tmp
);
9714 /* Signed 64-bit multiply */
9715 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9720 gen_addq_lo(s
, tmp64
, rs
);
9721 gen_addq_lo(s
, tmp64
, rd
);
9722 } else if (op
& 0x40) {
9723 /* 64-bit accumulate. */
9724 gen_addq(s
, tmp64
, rs
, rd
);
9726 gen_storeq_reg(s
, rs
, rd
, tmp64
);
9727 tcg_temp_free_i64(tmp64
);
9732 case 6: case 7: case 14: case 15:
9734 if (((insn
>> 24) & 3) == 3) {
9735 /* Translate into the equivalent ARM encoding. */
9736 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
9737 if (disas_neon_data_insn(env
, s
, insn
))
9739 } else if (((insn
>> 8) & 0xe) == 10) {
9740 if (disas_vfp_insn(env
, s
, insn
)) {
9744 if (insn
& (1 << 28))
9746 if (disas_coproc_insn (env
, s
, insn
))
9750 case 8: case 9: case 10: case 11:
9751 if (insn
& (1 << 15)) {
9752 /* Branches, misc control. */
9753 if (insn
& 0x5000) {
9754 /* Unconditional branch. */
9755 /* signextend(hw1[10:0]) -> offset[:12]. */
9756 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
9757 /* hw1[10:0] -> offset[11:1]. */
9758 offset
|= (insn
& 0x7ff) << 1;
9759 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9760 offset[24:22] already have the same value because of the
9761 sign extension above. */
9762 offset
^= ((~insn
) & (1 << 13)) << 10;
9763 offset
^= ((~insn
) & (1 << 11)) << 11;
9765 if (insn
& (1 << 14)) {
9766 /* Branch and link. */
9767 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
9771 if (insn
& (1 << 12)) {
9776 offset
&= ~(uint32_t)2;
9777 /* thumb2 bx, no need to check */
9778 gen_bx_im(s
, offset
);
9780 } else if (((insn
>> 23) & 7) == 7) {
9782 if (insn
& (1 << 13))
9785 if (insn
& (1 << 26)) {
9786 if (!(insn
& (1 << 20))) {
9787 /* Hypervisor call (v7) */
9788 int imm16
= extract32(insn
, 16, 4) << 12
9789 | extract32(insn
, 0, 12);
9796 /* Secure monitor call (v6+) */
9804 op
= (insn
>> 20) & 7;
9806 case 0: /* msr cpsr. */
9808 tmp
= load_reg(s
, rn
);
9809 addr
= tcg_const_i32(insn
& 0xff);
9810 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9811 tcg_temp_free_i32(addr
);
9812 tcg_temp_free_i32(tmp
);
9817 case 1: /* msr spsr. */
9820 tmp
= load_reg(s
, rn
);
9822 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
9826 case 2: /* cps, nop-hint. */
9827 if (((insn
>> 8) & 7) == 0) {
9828 gen_nop_hint(s
, insn
& 0xff);
9830 /* Implemented as NOP in user mode. */
9835 if (insn
& (1 << 10)) {
9836 if (insn
& (1 << 7))
9838 if (insn
& (1 << 6))
9840 if (insn
& (1 << 5))
9842 if (insn
& (1 << 9))
9843 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
9845 if (insn
& (1 << 8)) {
9847 imm
|= (insn
& 0x1f);
9850 gen_set_psr_im(s
, offset
, 0, imm
);
9853 case 3: /* Special control operations. */
9855 op
= (insn
>> 4) & 0xf;
9863 /* These execute as NOPs. */
9870 /* Trivial implementation equivalent to bx. */
9871 tmp
= load_reg(s
, rn
);
9874 case 5: /* Exception return. */
9878 if (rn
!= 14 || rd
!= 15) {
9881 tmp
= load_reg(s
, rn
);
9882 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
9883 gen_exception_return(s
, tmp
);
9885 case 6: /* mrs cpsr. */
9886 tmp
= tcg_temp_new_i32();
9888 addr
= tcg_const_i32(insn
& 0xff);
9889 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
9890 tcg_temp_free_i32(addr
);
9892 gen_helper_cpsr_read(tmp
, cpu_env
);
9894 store_reg(s
, rd
, tmp
);
9896 case 7: /* mrs spsr. */
9897 /* Not accessible in user mode. */
9898 if (IS_USER(s
) || IS_M(env
))
9900 tmp
= load_cpu_field(spsr
);
9901 store_reg(s
, rd
, tmp
);
9906 /* Conditional branch. */
9907 op
= (insn
>> 22) & 0xf;
9908 /* Generate a conditional jump to next instruction. */
9909 s
->condlabel
= gen_new_label();
9910 arm_gen_test_cc(op
^ 1, s
->condlabel
);
9913 /* offset[11:1] = insn[10:0] */
9914 offset
= (insn
& 0x7ff) << 1;
9915 /* offset[17:12] = insn[21:16]. */
9916 offset
|= (insn
& 0x003f0000) >> 4;
9917 /* offset[31:20] = insn[26]. */
9918 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
9919 /* offset[18] = insn[13]. */
9920 offset
|= (insn
& (1 << 13)) << 5;
9921 /* offset[19] = insn[11]. */
9922 offset
|= (insn
& (1 << 11)) << 8;
9924 /* jump to the offset */
9925 gen_jmp(s
, s
->pc
+ offset
);
9928 /* Data processing immediate. */
9929 if (insn
& (1 << 25)) {
9930 if (insn
& (1 << 24)) {
9931 if (insn
& (1 << 20))
9933 /* Bitfield/Saturate. */
9934 op
= (insn
>> 21) & 7;
9936 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
9938 tmp
= tcg_temp_new_i32();
9939 tcg_gen_movi_i32(tmp
, 0);
9941 tmp
= load_reg(s
, rn
);
9944 case 2: /* Signed bitfield extract. */
9946 if (shift
+ imm
> 32)
9949 gen_sbfx(tmp
, shift
, imm
);
9951 case 6: /* Unsigned bitfield extract. */
9953 if (shift
+ imm
> 32)
9956 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
9958 case 3: /* Bitfield insert/clear. */
9961 imm
= imm
+ 1 - shift
;
9963 tmp2
= load_reg(s
, rd
);
9964 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, imm
);
9965 tcg_temp_free_i32(tmp2
);
9970 default: /* Saturate. */
9973 tcg_gen_sari_i32(tmp
, tmp
, shift
);
9975 tcg_gen_shli_i32(tmp
, tmp
, shift
);
9977 tmp2
= tcg_const_i32(imm
);
9980 if ((op
& 1) && shift
== 0)
9981 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
9983 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
9986 if ((op
& 1) && shift
== 0)
9987 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
9989 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
9991 tcg_temp_free_i32(tmp2
);
9994 store_reg(s
, rd
, tmp
);
9996 imm
= ((insn
& 0x04000000) >> 15)
9997 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
9998 if (insn
& (1 << 22)) {
9999 /* 16-bit immediate. */
10000 imm
|= (insn
>> 4) & 0xf000;
10001 if (insn
& (1 << 23)) {
10003 tmp
= load_reg(s
, rd
);
10004 tcg_gen_ext16u_i32(tmp
, tmp
);
10005 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
10008 tmp
= tcg_temp_new_i32();
10009 tcg_gen_movi_i32(tmp
, imm
);
10012 /* Add/sub 12-bit immediate. */
10014 offset
= s
->pc
& ~(uint32_t)3;
10015 if (insn
& (1 << 23))
10019 tmp
= tcg_temp_new_i32();
10020 tcg_gen_movi_i32(tmp
, offset
);
10022 tmp
= load_reg(s
, rn
);
10023 if (insn
& (1 << 23))
10024 tcg_gen_subi_i32(tmp
, tmp
, imm
);
10026 tcg_gen_addi_i32(tmp
, tmp
, imm
);
10029 store_reg(s
, rd
, tmp
);
10032 int shifter_out
= 0;
10033 /* modified 12-bit immediate. */
10034 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
10035 imm
= (insn
& 0xff);
10038 /* Nothing to do. */
10040 case 1: /* 00XY00XY */
10043 case 2: /* XY00XY00 */
10047 case 3: /* XYXYXYXY */
10051 default: /* Rotated constant. */
10052 shift
= (shift
<< 1) | (imm
>> 7);
10054 imm
= imm
<< (32 - shift
);
10058 tmp2
= tcg_temp_new_i32();
10059 tcg_gen_movi_i32(tmp2
, imm
);
10060 rn
= (insn
>> 16) & 0xf;
10062 tmp
= tcg_temp_new_i32();
10063 tcg_gen_movi_i32(tmp
, 0);
10065 tmp
= load_reg(s
, rn
);
10067 op
= (insn
>> 21) & 0xf;
10068 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
10069 shifter_out
, tmp
, tmp2
))
10071 tcg_temp_free_i32(tmp2
);
10072 rd
= (insn
>> 8) & 0xf;
10074 store_reg(s
, rd
, tmp
);
10076 tcg_temp_free_i32(tmp
);
10081 case 12: /* Load/store single data item. */
10086 if ((insn
& 0x01100000) == 0x01000000) {
10087 if (disas_neon_ls_insn(env
, s
, insn
))
10091 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
10093 if (!(insn
& (1 << 20))) {
10097 /* Byte or halfword load space with dest == r15 : memory hints.
10098 * Catch them early so we don't emit pointless addressing code.
10099 * This space is a mix of:
10100 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10101 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10103 * unallocated hints, which must be treated as NOPs
10104 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10105 * which is easiest for the decoding logic
10106 * Some space which must UNDEF
10108 int op1
= (insn
>> 23) & 3;
10109 int op2
= (insn
>> 6) & 0x3f;
10114 /* UNPREDICTABLE, unallocated hint or
10115 * PLD/PLDW/PLI (literal)
10120 return 0; /* PLD/PLDW/PLI or unallocated hint */
10122 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
10123 return 0; /* PLD/PLDW/PLI or unallocated hint */
10125 /* UNDEF space, or an UNPREDICTABLE */
10129 memidx
= get_mem_index(s
);
10131 addr
= tcg_temp_new_i32();
10133 /* s->pc has already been incremented by 4. */
10134 imm
= s
->pc
& 0xfffffffc;
10135 if (insn
& (1 << 23))
10136 imm
+= insn
& 0xfff;
10138 imm
-= insn
& 0xfff;
10139 tcg_gen_movi_i32(addr
, imm
);
10141 addr
= load_reg(s
, rn
);
10142 if (insn
& (1 << 23)) {
10143 /* Positive offset. */
10144 imm
= insn
& 0xfff;
10145 tcg_gen_addi_i32(addr
, addr
, imm
);
10148 switch ((insn
>> 8) & 0xf) {
10149 case 0x0: /* Shifted Register. */
10150 shift
= (insn
>> 4) & 0xf;
10152 tcg_temp_free_i32(addr
);
10155 tmp
= load_reg(s
, rm
);
10157 tcg_gen_shli_i32(tmp
, tmp
, shift
);
10158 tcg_gen_add_i32(addr
, addr
, tmp
);
10159 tcg_temp_free_i32(tmp
);
10161 case 0xc: /* Negative offset. */
10162 tcg_gen_addi_i32(addr
, addr
, -imm
);
10164 case 0xe: /* User privilege. */
10165 tcg_gen_addi_i32(addr
, addr
, imm
);
10166 memidx
= MMU_USER_IDX
;
10168 case 0x9: /* Post-decrement. */
10170 /* Fall through. */
10171 case 0xb: /* Post-increment. */
10175 case 0xd: /* Pre-decrement. */
10177 /* Fall through. */
10178 case 0xf: /* Pre-increment. */
10179 tcg_gen_addi_i32(addr
, addr
, imm
);
10183 tcg_temp_free_i32(addr
);
10188 if (insn
& (1 << 20)) {
10190 tmp
= tcg_temp_new_i32();
10193 gen_aa32_ld8u(tmp
, addr
, memidx
);
10196 gen_aa32_ld8s(tmp
, addr
, memidx
);
10199 gen_aa32_ld16u(tmp
, addr
, memidx
);
10202 gen_aa32_ld16s(tmp
, addr
, memidx
);
10205 gen_aa32_ld32u(tmp
, addr
, memidx
);
10208 tcg_temp_free_i32(tmp
);
10209 tcg_temp_free_i32(addr
);
10215 store_reg(s
, rs
, tmp
);
10219 tmp
= load_reg(s
, rs
);
10222 gen_aa32_st8(tmp
, addr
, memidx
);
10225 gen_aa32_st16(tmp
, addr
, memidx
);
10228 gen_aa32_st32(tmp
, addr
, memidx
);
10231 tcg_temp_free_i32(tmp
);
10232 tcg_temp_free_i32(addr
);
10235 tcg_temp_free_i32(tmp
);
10238 tcg_gen_addi_i32(addr
, addr
, imm
);
10240 store_reg(s
, rn
, addr
);
10242 tcg_temp_free_i32(addr
);
10254 static void disas_thumb_insn(CPUARMState
*env
, DisasContext
*s
)
10256 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
10263 if (s
->condexec_mask
) {
10264 cond
= s
->condexec_cond
;
10265 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
10266 s
->condlabel
= gen_new_label();
10267 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
10272 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
10275 switch (insn
>> 12) {
10279 op
= (insn
>> 11) & 3;
10282 rn
= (insn
>> 3) & 7;
10283 tmp
= load_reg(s
, rn
);
10284 if (insn
& (1 << 10)) {
10286 tmp2
= tcg_temp_new_i32();
10287 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
10290 rm
= (insn
>> 6) & 7;
10291 tmp2
= load_reg(s
, rm
);
10293 if (insn
& (1 << 9)) {
10294 if (s
->condexec_mask
)
10295 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10297 gen_sub_CC(tmp
, tmp
, tmp2
);
10299 if (s
->condexec_mask
)
10300 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10302 gen_add_CC(tmp
, tmp
, tmp2
);
10304 tcg_temp_free_i32(tmp2
);
10305 store_reg(s
, rd
, tmp
);
10307 /* shift immediate */
10308 rm
= (insn
>> 3) & 7;
10309 shift
= (insn
>> 6) & 0x1f;
10310 tmp
= load_reg(s
, rm
);
10311 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
10312 if (!s
->condexec_mask
)
10314 store_reg(s
, rd
, tmp
);
10318 /* arithmetic large immediate */
10319 op
= (insn
>> 11) & 3;
10320 rd
= (insn
>> 8) & 0x7;
10321 if (op
== 0) { /* mov */
10322 tmp
= tcg_temp_new_i32();
10323 tcg_gen_movi_i32(tmp
, insn
& 0xff);
10324 if (!s
->condexec_mask
)
10326 store_reg(s
, rd
, tmp
);
10328 tmp
= load_reg(s
, rd
);
10329 tmp2
= tcg_temp_new_i32();
10330 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
10333 gen_sub_CC(tmp
, tmp
, tmp2
);
10334 tcg_temp_free_i32(tmp
);
10335 tcg_temp_free_i32(tmp2
);
10338 if (s
->condexec_mask
)
10339 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10341 gen_add_CC(tmp
, tmp
, tmp2
);
10342 tcg_temp_free_i32(tmp2
);
10343 store_reg(s
, rd
, tmp
);
10346 if (s
->condexec_mask
)
10347 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10349 gen_sub_CC(tmp
, tmp
, tmp2
);
10350 tcg_temp_free_i32(tmp2
);
10351 store_reg(s
, rd
, tmp
);
10357 if (insn
& (1 << 11)) {
10358 rd
= (insn
>> 8) & 7;
10359 /* load pc-relative. Bit 1 of PC is ignored. */
10360 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
10361 val
&= ~(uint32_t)2;
10362 addr
= tcg_temp_new_i32();
10363 tcg_gen_movi_i32(addr
, val
);
10364 tmp
= tcg_temp_new_i32();
10365 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10366 tcg_temp_free_i32(addr
);
10367 store_reg(s
, rd
, tmp
);
10370 if (insn
& (1 << 10)) {
10371 /* data processing extended or blx */
10372 rd
= (insn
& 7) | ((insn
>> 4) & 8);
10373 rm
= (insn
>> 3) & 0xf;
10374 op
= (insn
>> 8) & 3;
10377 tmp
= load_reg(s
, rd
);
10378 tmp2
= load_reg(s
, rm
);
10379 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10380 tcg_temp_free_i32(tmp2
);
10381 store_reg(s
, rd
, tmp
);
10384 tmp
= load_reg(s
, rd
);
10385 tmp2
= load_reg(s
, rm
);
10386 gen_sub_CC(tmp
, tmp
, tmp2
);
10387 tcg_temp_free_i32(tmp2
);
10388 tcg_temp_free_i32(tmp
);
10390 case 2: /* mov/cpy */
10391 tmp
= load_reg(s
, rm
);
10392 store_reg(s
, rd
, tmp
);
10394 case 3:/* branch [and link] exchange thumb register */
10395 tmp
= load_reg(s
, rm
);
10396 if (insn
& (1 << 7)) {
10398 val
= (uint32_t)s
->pc
| 1;
10399 tmp2
= tcg_temp_new_i32();
10400 tcg_gen_movi_i32(tmp2
, val
);
10401 store_reg(s
, 14, tmp2
);
10403 /* already thumb, no need to check */
10410 /* data processing register */
10412 rm
= (insn
>> 3) & 7;
10413 op
= (insn
>> 6) & 0xf;
10414 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
10415 /* the shift/rotate ops want the operands backwards */
10424 if (op
== 9) { /* neg */
10425 tmp
= tcg_temp_new_i32();
10426 tcg_gen_movi_i32(tmp
, 0);
10427 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
10428 tmp
= load_reg(s
, rd
);
10430 TCGV_UNUSED_I32(tmp
);
10433 tmp2
= load_reg(s
, rm
);
10435 case 0x0: /* and */
10436 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
10437 if (!s
->condexec_mask
)
10440 case 0x1: /* eor */
10441 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
10442 if (!s
->condexec_mask
)
10445 case 0x2: /* lsl */
10446 if (s
->condexec_mask
) {
10447 gen_shl(tmp2
, tmp2
, tmp
);
10449 gen_helper_shl_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10450 gen_logic_CC(tmp2
);
10453 case 0x3: /* lsr */
10454 if (s
->condexec_mask
) {
10455 gen_shr(tmp2
, tmp2
, tmp
);
10457 gen_helper_shr_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10458 gen_logic_CC(tmp2
);
10461 case 0x4: /* asr */
10462 if (s
->condexec_mask
) {
10463 gen_sar(tmp2
, tmp2
, tmp
);
10465 gen_helper_sar_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10466 gen_logic_CC(tmp2
);
10469 case 0x5: /* adc */
10470 if (s
->condexec_mask
) {
10471 gen_adc(tmp
, tmp2
);
10473 gen_adc_CC(tmp
, tmp
, tmp2
);
10476 case 0x6: /* sbc */
10477 if (s
->condexec_mask
) {
10478 gen_sub_carry(tmp
, tmp
, tmp2
);
10480 gen_sbc_CC(tmp
, tmp
, tmp2
);
10483 case 0x7: /* ror */
10484 if (s
->condexec_mask
) {
10485 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
10486 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
10488 gen_helper_ror_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10489 gen_logic_CC(tmp2
);
10492 case 0x8: /* tst */
10493 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
10497 case 0x9: /* neg */
10498 if (s
->condexec_mask
)
10499 tcg_gen_neg_i32(tmp
, tmp2
);
10501 gen_sub_CC(tmp
, tmp
, tmp2
);
10503 case 0xa: /* cmp */
10504 gen_sub_CC(tmp
, tmp
, tmp2
);
10507 case 0xb: /* cmn */
10508 gen_add_CC(tmp
, tmp
, tmp2
);
10511 case 0xc: /* orr */
10512 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
10513 if (!s
->condexec_mask
)
10516 case 0xd: /* mul */
10517 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
10518 if (!s
->condexec_mask
)
10521 case 0xe: /* bic */
10522 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
10523 if (!s
->condexec_mask
)
10526 case 0xf: /* mvn */
10527 tcg_gen_not_i32(tmp2
, tmp2
);
10528 if (!s
->condexec_mask
)
10529 gen_logic_CC(tmp2
);
10536 store_reg(s
, rm
, tmp2
);
10538 tcg_temp_free_i32(tmp
);
10540 store_reg(s
, rd
, tmp
);
10541 tcg_temp_free_i32(tmp2
);
10544 tcg_temp_free_i32(tmp
);
10545 tcg_temp_free_i32(tmp2
);
10550 /* load/store register offset. */
10552 rn
= (insn
>> 3) & 7;
10553 rm
= (insn
>> 6) & 7;
10554 op
= (insn
>> 9) & 7;
10555 addr
= load_reg(s
, rn
);
10556 tmp
= load_reg(s
, rm
);
10557 tcg_gen_add_i32(addr
, addr
, tmp
);
10558 tcg_temp_free_i32(tmp
);
10560 if (op
< 3) { /* store */
10561 tmp
= load_reg(s
, rd
);
10563 tmp
= tcg_temp_new_i32();
10568 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10571 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
10574 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
10576 case 3: /* ldrsb */
10577 gen_aa32_ld8s(tmp
, addr
, get_mem_index(s
));
10580 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10583 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
10586 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
10588 case 7: /* ldrsh */
10589 gen_aa32_ld16s(tmp
, addr
, get_mem_index(s
));
10592 if (op
>= 3) { /* load */
10593 store_reg(s
, rd
, tmp
);
10595 tcg_temp_free_i32(tmp
);
10597 tcg_temp_free_i32(addr
);
10601 /* load/store word immediate offset */
10603 rn
= (insn
>> 3) & 7;
10604 addr
= load_reg(s
, rn
);
10605 val
= (insn
>> 4) & 0x7c;
10606 tcg_gen_addi_i32(addr
, addr
, val
);
10608 if (insn
& (1 << 11)) {
10610 tmp
= tcg_temp_new_i32();
10611 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10612 store_reg(s
, rd
, tmp
);
10615 tmp
= load_reg(s
, rd
);
10616 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10617 tcg_temp_free_i32(tmp
);
10619 tcg_temp_free_i32(addr
);
10623 /* load/store byte immediate offset */
10625 rn
= (insn
>> 3) & 7;
10626 addr
= load_reg(s
, rn
);
10627 val
= (insn
>> 6) & 0x1f;
10628 tcg_gen_addi_i32(addr
, addr
, val
);
10630 if (insn
& (1 << 11)) {
10632 tmp
= tcg_temp_new_i32();
10633 gen_aa32_ld8u(tmp
, addr
, get_mem_index(s
));
10634 store_reg(s
, rd
, tmp
);
10637 tmp
= load_reg(s
, rd
);
10638 gen_aa32_st8(tmp
, addr
, get_mem_index(s
));
10639 tcg_temp_free_i32(tmp
);
10641 tcg_temp_free_i32(addr
);
10645 /* load/store halfword immediate offset */
10647 rn
= (insn
>> 3) & 7;
10648 addr
= load_reg(s
, rn
);
10649 val
= (insn
>> 5) & 0x3e;
10650 tcg_gen_addi_i32(addr
, addr
, val
);
10652 if (insn
& (1 << 11)) {
10654 tmp
= tcg_temp_new_i32();
10655 gen_aa32_ld16u(tmp
, addr
, get_mem_index(s
));
10656 store_reg(s
, rd
, tmp
);
10659 tmp
= load_reg(s
, rd
);
10660 gen_aa32_st16(tmp
, addr
, get_mem_index(s
));
10661 tcg_temp_free_i32(tmp
);
10663 tcg_temp_free_i32(addr
);
10667 /* load/store from stack */
10668 rd
= (insn
>> 8) & 7;
10669 addr
= load_reg(s
, 13);
10670 val
= (insn
& 0xff) * 4;
10671 tcg_gen_addi_i32(addr
, addr
, val
);
10673 if (insn
& (1 << 11)) {
10675 tmp
= tcg_temp_new_i32();
10676 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10677 store_reg(s
, rd
, tmp
);
10680 tmp
= load_reg(s
, rd
);
10681 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10682 tcg_temp_free_i32(tmp
);
10684 tcg_temp_free_i32(addr
);
10688 /* add to high reg */
10689 rd
= (insn
>> 8) & 7;
10690 if (insn
& (1 << 11)) {
10692 tmp
= load_reg(s
, 13);
10694 /* PC. bit 1 is ignored. */
10695 tmp
= tcg_temp_new_i32();
10696 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
10698 val
= (insn
& 0xff) * 4;
10699 tcg_gen_addi_i32(tmp
, tmp
, val
);
10700 store_reg(s
, rd
, tmp
);
10705 op
= (insn
>> 8) & 0xf;
10708 /* adjust stack pointer */
10709 tmp
= load_reg(s
, 13);
10710 val
= (insn
& 0x7f) * 4;
10711 if (insn
& (1 << 7))
10712 val
= -(int32_t)val
;
10713 tcg_gen_addi_i32(tmp
, tmp
, val
);
10714 store_reg(s
, 13, tmp
);
10717 case 2: /* sign/zero extend. */
10720 rm
= (insn
>> 3) & 7;
10721 tmp
= load_reg(s
, rm
);
10722 switch ((insn
>> 6) & 3) {
10723 case 0: gen_sxth(tmp
); break;
10724 case 1: gen_sxtb(tmp
); break;
10725 case 2: gen_uxth(tmp
); break;
10726 case 3: gen_uxtb(tmp
); break;
10728 store_reg(s
, rd
, tmp
);
10730 case 4: case 5: case 0xc: case 0xd:
10732 addr
= load_reg(s
, 13);
10733 if (insn
& (1 << 8))
10737 for (i
= 0; i
< 8; i
++) {
10738 if (insn
& (1 << i
))
10741 if ((insn
& (1 << 11)) == 0) {
10742 tcg_gen_addi_i32(addr
, addr
, -offset
);
10744 for (i
= 0; i
< 8; i
++) {
10745 if (insn
& (1 << i
)) {
10746 if (insn
& (1 << 11)) {
10748 tmp
= tcg_temp_new_i32();
10749 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10750 store_reg(s
, i
, tmp
);
10753 tmp
= load_reg(s
, i
);
10754 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10755 tcg_temp_free_i32(tmp
);
10757 /* advance to the next address. */
10758 tcg_gen_addi_i32(addr
, addr
, 4);
10761 TCGV_UNUSED_I32(tmp
);
10762 if (insn
& (1 << 8)) {
10763 if (insn
& (1 << 11)) {
10765 tmp
= tcg_temp_new_i32();
10766 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10767 /* don't set the pc until the rest of the instruction
10771 tmp
= load_reg(s
, 14);
10772 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10773 tcg_temp_free_i32(tmp
);
10775 tcg_gen_addi_i32(addr
, addr
, 4);
10777 if ((insn
& (1 << 11)) == 0) {
10778 tcg_gen_addi_i32(addr
, addr
, -offset
);
10780 /* write back the new stack pointer */
10781 store_reg(s
, 13, addr
);
10782 /* set the new PC value */
10783 if ((insn
& 0x0900) == 0x0900) {
10784 store_reg_from_load(env
, s
, 15, tmp
);
10788 case 1: case 3: case 9: case 11: /* czb */
10790 tmp
= load_reg(s
, rm
);
10791 s
->condlabel
= gen_new_label();
10793 if (insn
& (1 << 11))
10794 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
10796 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
10797 tcg_temp_free_i32(tmp
);
10798 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
10799 val
= (uint32_t)s
->pc
+ 2;
10804 case 15: /* IT, nop-hint. */
10805 if ((insn
& 0xf) == 0) {
10806 gen_nop_hint(s
, (insn
>> 4) & 0xf);
10810 s
->condexec_cond
= (insn
>> 4) & 0xe;
10811 s
->condexec_mask
= insn
& 0x1f;
10812 /* No actual code generated for this insn, just setup state. */
10815 case 0xe: /* bkpt */
10817 int imm8
= extract32(insn
, 0, 8);
10819 gen_exception_insn(s
, 2, EXCP_BKPT
, syn_aa32_bkpt(imm8
, true));
10823 case 0xa: /* rev */
10825 rn
= (insn
>> 3) & 0x7;
10827 tmp
= load_reg(s
, rn
);
10828 switch ((insn
>> 6) & 3) {
10829 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
10830 case 1: gen_rev16(tmp
); break;
10831 case 3: gen_revsh(tmp
); break;
10832 default: goto illegal_op
;
10834 store_reg(s
, rd
, tmp
);
10838 switch ((insn
>> 5) & 7) {
10842 if (((insn
>> 3) & 1) != s
->bswap_code
) {
10843 /* Dynamic endianness switching not implemented. */
10844 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented setend\n");
10855 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
10858 addr
= tcg_const_i32(19);
10859 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
10860 tcg_temp_free_i32(addr
);
10864 addr
= tcg_const_i32(16);
10865 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
10866 tcg_temp_free_i32(addr
);
10868 tcg_temp_free_i32(tmp
);
10871 if (insn
& (1 << 4)) {
10872 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
10876 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
10891 /* load/store multiple */
10892 TCGv_i32 loaded_var
;
10893 TCGV_UNUSED_I32(loaded_var
);
10894 rn
= (insn
>> 8) & 0x7;
10895 addr
= load_reg(s
, rn
);
10896 for (i
= 0; i
< 8; i
++) {
10897 if (insn
& (1 << i
)) {
10898 if (insn
& (1 << 11)) {
10900 tmp
= tcg_temp_new_i32();
10901 gen_aa32_ld32u(tmp
, addr
, get_mem_index(s
));
10905 store_reg(s
, i
, tmp
);
10909 tmp
= load_reg(s
, i
);
10910 gen_aa32_st32(tmp
, addr
, get_mem_index(s
));
10911 tcg_temp_free_i32(tmp
);
10913 /* advance to the next address */
10914 tcg_gen_addi_i32(addr
, addr
, 4);
10917 if ((insn
& (1 << rn
)) == 0) {
10918 /* base reg not in list: base register writeback */
10919 store_reg(s
, rn
, addr
);
10921 /* base reg in list: if load, complete it now */
10922 if (insn
& (1 << 11)) {
10923 store_reg(s
, rn
, loaded_var
);
10925 tcg_temp_free_i32(addr
);
10930 /* conditional branch or swi */
10931 cond
= (insn
>> 8) & 0xf;
10937 gen_set_pc_im(s
, s
->pc
);
10938 s
->svc_imm
= extract32(insn
, 0, 8);
10939 s
->is_jmp
= DISAS_SWI
;
10942 /* generate a conditional jump to next instruction */
10943 s
->condlabel
= gen_new_label();
10944 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
10947 /* jump to the offset */
10948 val
= (uint32_t)s
->pc
+ 2;
10949 offset
= ((int32_t)insn
<< 24) >> 24;
10950 val
+= offset
<< 1;
10955 if (insn
& (1 << 11)) {
10956 if (disas_thumb2_insn(env
, s
, insn
))
10960 /* unconditional branch */
10961 val
= (uint32_t)s
->pc
;
10962 offset
= ((int32_t)insn
<< 21) >> 21;
10963 val
+= (offset
<< 1) + 2;
10968 if (disas_thumb2_insn(env
, s
, insn
))
10974 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized());
10978 gen_exception_insn(s
, 2, EXCP_UDEF
, syn_uncategorized());
10981 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10982 basic block 'tb'. If search_pc is TRUE, also generate PC
10983 information for each intermediate instruction. */
10984 static inline void gen_intermediate_code_internal(ARMCPU
*cpu
,
10985 TranslationBlock
*tb
,
10988 CPUState
*cs
= CPU(cpu
);
10989 CPUARMState
*env
= &cpu
->env
;
10990 DisasContext dc1
, *dc
= &dc1
;
10992 uint16_t *gen_opc_end
;
10994 target_ulong pc_start
;
10995 target_ulong next_page_start
;
10999 /* generate intermediate code */
11001 /* The A64 decoder has its own top level loop, because it doesn't need
11002 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11004 if (ARM_TBFLAG_AARCH64_STATE(tb
->flags
)) {
11005 gen_intermediate_code_internal_a64(cpu
, tb
, search_pc
);
11013 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
11015 dc
->is_jmp
= DISAS_NEXT
;
11017 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
11021 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
11022 dc
->bswap_code
= ARM_TBFLAG_BSWAP_CODE(tb
->flags
);
11023 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
11024 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
11025 #if !defined(CONFIG_USER_ONLY)
11026 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
11028 dc
->cpacr_fpen
= ARM_TBFLAG_CPACR_FPEN(tb
->flags
);
11029 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
11030 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
11031 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
11032 dc
->c15_cpar
= ARM_TBFLAG_XSCALE_CPAR(tb
->flags
);
11033 dc
->cp_regs
= cpu
->cp_regs
;
11034 dc
->current_el
= arm_current_el(env
);
11035 dc
->features
= env
->features
;
11037 /* Single step state. The code-generation logic here is:
11039 * generate code with no special handling for single-stepping (except
11040 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11041 * this happens anyway because those changes are all system register or
11043 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11044 * emit code for one insn
11045 * emit code to clear PSTATE.SS
11046 * emit code to generate software step exception for completed step
11047 * end TB (as usual for having generated an exception)
11048 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11049 * emit code to generate a software step exception
11052 dc
->ss_active
= ARM_TBFLAG_SS_ACTIVE(tb
->flags
);
11053 dc
->pstate_ss
= ARM_TBFLAG_PSTATE_SS(tb
->flags
);
11054 dc
->is_ldex
= false;
11055 dc
->ss_same_el
= false; /* Can't be true since EL_d must be AArch64 */
11057 cpu_F0s
= tcg_temp_new_i32();
11058 cpu_F1s
= tcg_temp_new_i32();
11059 cpu_F0d
= tcg_temp_new_i64();
11060 cpu_F1d
= tcg_temp_new_i64();
11063 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11064 cpu_M0
= tcg_temp_new_i64();
11065 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
11068 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
11069 if (max_insns
== 0)
11070 max_insns
= CF_COUNT_MASK
;
11074 tcg_clear_temp_count();
11076 /* A note on handling of the condexec (IT) bits:
11078 * We want to avoid the overhead of having to write the updated condexec
11079 * bits back to the CPUARMState for every instruction in an IT block. So:
11080 * (1) if the condexec bits are not already zero then we write
11081 * zero back into the CPUARMState now. This avoids complications trying
11082 * to do it at the end of the block. (For example if we don't do this
11083 * it's hard to identify whether we can safely skip writing condexec
11084 * at the end of the TB, which we definitely want to do for the case
11085 * where a TB doesn't do anything with the IT state at all.)
11086 * (2) if we are going to leave the TB then we call gen_set_condexec()
11087 * which will write the correct value into CPUARMState if zero is wrong.
11088 * This is done both for leaving the TB at the end, and for leaving
11089 * it because of an exception we know will happen, which is done in
11090 * gen_exception_insn(). The latter is necessary because we need to
11091 * leave the TB with the PC/IT state just prior to execution of the
11092 * instruction which caused the exception.
11093 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11094 * then the CPUARMState will be wrong and we need to reset it.
11095 * This is handled in the same way as restoration of the
11096 * PC in these situations: we will be called again with search_pc=1
11097 * and generate a mapping of the condexec bits for each PC in
11098 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
11099 * this to restore the condexec bits.
11101 * Note that there are no instructions which can read the condexec
11102 * bits, and none which can write non-static values to them, so
11103 * we don't need to care about whether CPUARMState is correct in the
11107 /* Reset the conditional execution bits immediately. This avoids
11108 complications trying to do it at the end of the block. */
11109 if (dc
->condexec_mask
|| dc
->condexec_cond
)
11111 TCGv_i32 tmp
= tcg_temp_new_i32();
11112 tcg_gen_movi_i32(tmp
, 0);
11113 store_cpu_field(tmp
, condexec_bits
);
11116 #ifdef CONFIG_USER_ONLY
11117 /* Intercept jump to the magic kernel page. */
11118 if (dc
->pc
>= 0xffff0000) {
11119 /* We always get here via a jump, so know we are not in a
11120 conditional execution block. */
11121 gen_exception_internal(EXCP_KERNEL_TRAP
);
11122 dc
->is_jmp
= DISAS_UPDATE
;
11126 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
11127 /* We always get here via a jump, so know we are not in a
11128 conditional execution block. */
11129 gen_exception_internal(EXCP_EXCEPTION_EXIT
);
11130 dc
->is_jmp
= DISAS_UPDATE
;
11135 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
11136 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
11137 if (bp
->pc
== dc
->pc
) {
11138 gen_exception_internal_insn(dc
, 0, EXCP_DEBUG
);
11139 /* Advance PC so that clearing the breakpoint will
11140 invalidate this TB. */
11142 goto done_generating
;
11147 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
11151 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
11153 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
11154 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
11155 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
11156 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
11159 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
11162 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
11163 tcg_gen_debug_insn_start(dc
->pc
);
11166 if (dc
->ss_active
&& !dc
->pstate_ss
) {
11167 /* Singlestep state is Active-pending.
11168 * If we're in this state at the start of a TB then either
11169 * a) we just took an exception to an EL which is being debugged
11170 * and this is the first insn in the exception handler
11171 * b) debug exceptions were masked and we just unmasked them
11172 * without changing EL (eg by clearing PSTATE.D)
11173 * In either case we're going to take a swstep exception in the
11174 * "did not step an insn" case, and so the syndrome ISV and EX
11175 * bits should be zero.
11177 assert(num_insns
== 0);
11178 gen_exception(EXCP_UDEF
, syn_swstep(dc
->ss_same_el
, 0, 0));
11179 goto done_generating
;
11183 disas_thumb_insn(env
, dc
);
11184 if (dc
->condexec_mask
) {
11185 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
11186 | ((dc
->condexec_mask
>> 4) & 1);
11187 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
11188 if (dc
->condexec_mask
== 0) {
11189 dc
->condexec_cond
= 0;
11193 disas_arm_insn(env
, dc
);
11196 if (dc
->condjmp
&& !dc
->is_jmp
) {
11197 gen_set_label(dc
->condlabel
);
11201 if (tcg_check_temp_count()) {
11202 fprintf(stderr
, "TCG temporary leak before "TARGET_FMT_lx
"\n",
11206 /* Translation stops when a conditional branch is encountered.
11207 * Otherwise the subsequent code could get translated several times.
11208 * Also stop translation when a page boundary is reached. This
11209 * ensures prefetch aborts occur at the right place. */
11211 } while (!dc
->is_jmp
&& tcg_ctx
.gen_opc_ptr
< gen_opc_end
&&
11212 !cs
->singlestep_enabled
&&
11215 dc
->pc
< next_page_start
&&
11216 num_insns
< max_insns
);
11218 if (tb
->cflags
& CF_LAST_IO
) {
11220 /* FIXME: This can theoretically happen with self-modifying
11222 cpu_abort(cs
, "IO on conditional branch instruction");
11227 /* At this stage dc->condjmp will only be set when the skipped
11228 instruction was a conditional branch or trap, and the PC has
11229 already been written. */
11230 if (unlikely(cs
->singlestep_enabled
|| dc
->ss_active
)) {
11231 /* Make sure the pc is updated, and raise a debug exception. */
11233 gen_set_condexec(dc
);
11234 if (dc
->is_jmp
== DISAS_SWI
) {
11235 gen_ss_advance(dc
);
11236 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
));
11237 } else if (dc
->is_jmp
== DISAS_HVC
) {
11238 gen_ss_advance(dc
);
11239 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
));
11240 } else if (dc
->is_jmp
== DISAS_SMC
) {
11241 gen_ss_advance(dc
);
11242 gen_exception(EXCP_SMC
, syn_aa32_smc());
11243 } else if (dc
->ss_active
) {
11244 gen_step_complete_exception(dc
);
11246 gen_exception_internal(EXCP_DEBUG
);
11248 gen_set_label(dc
->condlabel
);
11250 if (dc
->condjmp
|| !dc
->is_jmp
) {
11251 gen_set_pc_im(dc
, dc
->pc
);
11254 gen_set_condexec(dc
);
11255 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
11256 gen_ss_advance(dc
);
11257 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
));
11258 } else if (dc
->is_jmp
== DISAS_HVC
&& !dc
->condjmp
) {
11259 gen_ss_advance(dc
);
11260 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
));
11261 } else if (dc
->is_jmp
== DISAS_SMC
&& !dc
->condjmp
) {
11262 gen_ss_advance(dc
);
11263 gen_exception(EXCP_SMC
, syn_aa32_smc());
11264 } else if (dc
->ss_active
) {
11265 gen_step_complete_exception(dc
);
11267 /* FIXME: Single stepping a WFI insn will not halt
11269 gen_exception_internal(EXCP_DEBUG
);
11272 /* While branches must always occur at the end of an IT block,
11273 there are a few other things that can cause us to terminate
11274 the TB in the middle of an IT block:
11275 - Exception generating instructions (bkpt, swi, undefined).
11277 - Hardware watchpoints.
11278 Hardware breakpoints have already been handled and skip this code.
11280 gen_set_condexec(dc
);
11281 switch(dc
->is_jmp
) {
11283 gen_goto_tb(dc
, 1, dc
->pc
);
11288 /* indicate that the hash table must be used to find the next TB */
11289 tcg_gen_exit_tb(0);
11291 case DISAS_TB_JUMP
:
11292 /* nothing more to generate */
11295 gen_helper_wfi(cpu_env
);
11298 gen_helper_wfe(cpu_env
);
11301 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
));
11304 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
));
11307 gen_exception(EXCP_SMC
, syn_aa32_smc());
11311 gen_set_label(dc
->condlabel
);
11312 gen_set_condexec(dc
);
11313 gen_goto_tb(dc
, 1, dc
->pc
);
11319 gen_tb_end(tb
, num_insns
);
11320 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
11323 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
11324 qemu_log("----------------\n");
11325 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
11326 log_target_disas(env
, pc_start
, dc
->pc
- pc_start
,
11327 dc
->thumb
| (dc
->bswap_code
<< 1));
11332 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
11335 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
11337 tb
->size
= dc
->pc
- pc_start
;
11338 tb
->icount
= num_insns
;
11342 void gen_intermediate_code(CPUARMState
*env
, TranslationBlock
*tb
)
11344 gen_intermediate_code_internal(arm_env_get_cpu(env
), tb
, false);
11347 void gen_intermediate_code_pc(CPUARMState
*env
, TranslationBlock
*tb
)
11349 gen_intermediate_code_internal(arm_env_get_cpu(env
), tb
, true);
11352 static const char *cpu_mode_names
[16] = {
11353 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11354 "???", "???", "hyp", "und", "???", "???", "???", "sys"
11357 void arm_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
11360 ARMCPU
*cpu
= ARM_CPU(cs
);
11361 CPUARMState
*env
= &cpu
->env
;
11366 aarch64_cpu_dump_state(cs
, f
, cpu_fprintf
, flags
);
11370 for(i
=0;i
<16;i
++) {
11371 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
11373 cpu_fprintf(f
, "\n");
11375 cpu_fprintf(f
, " ");
11377 psr
= cpsr_read(env
);
11378 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
11380 psr
& (1 << 31) ? 'N' : '-',
11381 psr
& (1 << 30) ? 'Z' : '-',
11382 psr
& (1 << 29) ? 'C' : '-',
11383 psr
& (1 << 28) ? 'V' : '-',
11384 psr
& CPSR_T
? 'T' : 'A',
11385 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
11387 if (flags
& CPU_DUMP_FPU
) {
11388 int numvfpregs
= 0;
11389 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
11392 if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
11395 for (i
= 0; i
< numvfpregs
; i
++) {
11396 uint64_t v
= float64_val(env
->vfp
.regs
[i
]);
11397 cpu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
11398 i
* 2, (uint32_t)v
,
11399 i
* 2 + 1, (uint32_t)(v
>> 32),
11402 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
11406 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
, int pc_pos
)
11409 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
11410 env
->condexec_bits
= 0;
11412 env
->regs
[15] = tcg_ctx
.gen_opc_pc
[pc_pos
];
11413 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];