4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "disas/disas.h"
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
45 #define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
47 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
49 /* internal defines */
50 typedef struct DisasContext
{
53 /* Nonzero if this instruction has been conditionally skipped. */
55 /* The label that will be jumped to when the instruction is skipped. */
57 /* Thumb-2 conditional execution bits. */
60 struct TranslationBlock
*tb
;
61 int singlestep_enabled
;
64 #if !defined(CONFIG_USER_ONLY)
72 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
74 #if defined(CONFIG_USER_ONLY)
77 #define IS_USER(s) (s->user)
80 /* These instructions trap after executing, so defer them until after the
81 conditional execution state has been updated. */
85 static TCGv_ptr cpu_env
;
86 /* We reuse the same 64-bit temporaries for efficiency. */
87 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
88 static TCGv_i32 cpu_R
[16];
89 static TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
90 static TCGv_i32 cpu_exclusive_addr
;
91 static TCGv_i32 cpu_exclusive_val
;
92 static TCGv_i32 cpu_exclusive_high
;
93 #ifdef CONFIG_USER_ONLY
94 static TCGv_i32 cpu_exclusive_test
;
95 static TCGv_i32 cpu_exclusive_info
;
98 /* FIXME: These should be removed. */
99 static TCGv_i32 cpu_F0s
, cpu_F1s
;
100 static TCGv_i64 cpu_F0d
, cpu_F1d
;
102 #include "exec/gen-icount.h"
104 static const char *regnames
[] =
105 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
106 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
108 /* initialize TCG globals. */
109 void arm_translate_init(void)
113 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
115 for (i
= 0; i
< 16; i
++) {
116 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUARMState
, regs
[i
]),
120 cpu_CF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, CF
), "CF");
121 cpu_NF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, NF
), "NF");
122 cpu_VF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, VF
), "VF");
123 cpu_ZF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, ZF
), "ZF");
125 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
126 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
127 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
128 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
129 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
130 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
131 #ifdef CONFIG_USER_ONLY
132 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
133 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
134 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
135 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
142 static inline TCGv_i32
load_cpu_offset(int offset
)
144 TCGv_i32 tmp
= tcg_temp_new_i32();
145 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
149 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
151 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
153 tcg_gen_st_i32(var
, cpu_env
, offset
);
154 tcg_temp_free_i32(var
);
157 #define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUARMState, name))
160 /* Set a variable to the value of a CPU register. */
161 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
165 /* normally, since we updated PC, we need only to add one insn */
167 addr
= (long)s
->pc
+ 2;
169 addr
= (long)s
->pc
+ 4;
170 tcg_gen_movi_i32(var
, addr
);
172 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
176 /* Create a new temporary and set it to the value of a CPU register. */
177 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
179 TCGv_i32 tmp
= tcg_temp_new_i32();
180 load_reg_var(s
, tmp
, reg
);
184 /* Set a CPU register. The source must be a temporary and will be
186 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
189 tcg_gen_andi_i32(var
, var
, ~1);
190 s
->is_jmp
= DISAS_JUMP
;
192 tcg_gen_mov_i32(cpu_R
[reg
], var
);
193 tcg_temp_free_i32(var
);
196 /* Value extensions. */
197 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
199 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
202 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
206 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
208 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
209 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
210 tcg_temp_free_i32(tmp_mask
);
212 /* Set NZCV flags from the high 4 bits of var. */
213 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
215 static void gen_exception(int excp
)
217 TCGv_i32 tmp
= tcg_temp_new_i32();
218 tcg_gen_movi_i32(tmp
, excp
);
219 gen_helper_exception(cpu_env
, tmp
);
220 tcg_temp_free_i32(tmp
);
223 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
225 TCGv_i32 tmp1
= tcg_temp_new_i32();
226 TCGv_i32 tmp2
= tcg_temp_new_i32();
227 tcg_gen_ext16s_i32(tmp1
, a
);
228 tcg_gen_ext16s_i32(tmp2
, b
);
229 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
230 tcg_temp_free_i32(tmp2
);
231 tcg_gen_sari_i32(a
, a
, 16);
232 tcg_gen_sari_i32(b
, b
, 16);
233 tcg_gen_mul_i32(b
, b
, a
);
234 tcg_gen_mov_i32(a
, tmp1
);
235 tcg_temp_free_i32(tmp1
);
238 /* Byteswap each halfword. */
239 static void gen_rev16(TCGv_i32 var
)
241 TCGv_i32 tmp
= tcg_temp_new_i32();
242 tcg_gen_shri_i32(tmp
, var
, 8);
243 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
244 tcg_gen_shli_i32(var
, var
, 8);
245 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
246 tcg_gen_or_i32(var
, var
, tmp
);
247 tcg_temp_free_i32(tmp
);
250 /* Byteswap low halfword and sign extend. */
251 static void gen_revsh(TCGv_i32 var
)
253 tcg_gen_ext16u_i32(var
, var
);
254 tcg_gen_bswap16_i32(var
, var
);
255 tcg_gen_ext16s_i32(var
, var
);
258 /* Unsigned bitfield extract. */
259 static void gen_ubfx(TCGv_i32 var
, int shift
, uint32_t mask
)
262 tcg_gen_shri_i32(var
, var
, shift
);
263 tcg_gen_andi_i32(var
, var
, mask
);
266 /* Signed bitfield extract. */
267 static void gen_sbfx(TCGv_i32 var
, int shift
, int width
)
272 tcg_gen_sari_i32(var
, var
, shift
);
273 if (shift
+ width
< 32) {
274 signbit
= 1u << (width
- 1);
275 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
276 tcg_gen_xori_i32(var
, var
, signbit
);
277 tcg_gen_subi_i32(var
, var
, signbit
);
281 /* Return (b << 32) + a. Mark inputs as dead */
282 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv_i32 b
)
284 TCGv_i64 tmp64
= tcg_temp_new_i64();
286 tcg_gen_extu_i32_i64(tmp64
, b
);
287 tcg_temp_free_i32(b
);
288 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
289 tcg_gen_add_i64(a
, tmp64
, a
);
291 tcg_temp_free_i64(tmp64
);
295 /* Return (b << 32) - a. Mark inputs as dead. */
296 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv_i32 b
)
298 TCGv_i64 tmp64
= tcg_temp_new_i64();
300 tcg_gen_extu_i32_i64(tmp64
, b
);
301 tcg_temp_free_i32(b
);
302 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
303 tcg_gen_sub_i64(a
, tmp64
, a
);
305 tcg_temp_free_i64(tmp64
);
309 /* 32x32->64 multiply. Marks inputs as dead. */
310 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
312 TCGv_i32 lo
= tcg_temp_new_i32();
313 TCGv_i32 hi
= tcg_temp_new_i32();
316 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
317 tcg_temp_free_i32(a
);
318 tcg_temp_free_i32(b
);
320 ret
= tcg_temp_new_i64();
321 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
322 tcg_temp_free_i32(lo
);
323 tcg_temp_free_i32(hi
);
328 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
330 TCGv_i32 lo
= tcg_temp_new_i32();
331 TCGv_i32 hi
= tcg_temp_new_i32();
334 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
335 tcg_temp_free_i32(a
);
336 tcg_temp_free_i32(b
);
338 ret
= tcg_temp_new_i64();
339 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
340 tcg_temp_free_i32(lo
);
341 tcg_temp_free_i32(hi
);
346 /* Swap low and high halfwords. */
347 static void gen_swap_half(TCGv_i32 var
)
349 TCGv_i32 tmp
= tcg_temp_new_i32();
350 tcg_gen_shri_i32(tmp
, var
, 16);
351 tcg_gen_shli_i32(var
, var
, 16);
352 tcg_gen_or_i32(var
, var
, tmp
);
353 tcg_temp_free_i32(tmp
);
356 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
357 tmp = (t0 ^ t1) & 0x8000;
360 t0 = (t0 + t1) ^ tmp;
363 static void gen_add16(TCGv_i32 t0
, TCGv_i32 t1
)
365 TCGv_i32 tmp
= tcg_temp_new_i32();
366 tcg_gen_xor_i32(tmp
, t0
, t1
);
367 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
368 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
369 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
370 tcg_gen_add_i32(t0
, t0
, t1
);
371 tcg_gen_xor_i32(t0
, t0
, tmp
);
372 tcg_temp_free_i32(tmp
);
373 tcg_temp_free_i32(t1
);
376 /* Set CF to the top bit of var. */
377 static void gen_set_CF_bit31(TCGv_i32 var
)
379 tcg_gen_shri_i32(cpu_CF
, var
, 31);
382 /* Set N and Z flags from var. */
383 static inline void gen_logic_CC(TCGv_i32 var
)
385 tcg_gen_mov_i32(cpu_NF
, var
);
386 tcg_gen_mov_i32(cpu_ZF
, var
);
390 static void gen_adc(TCGv_i32 t0
, TCGv_i32 t1
)
392 tcg_gen_add_i32(t0
, t0
, t1
);
393 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
396 /* dest = T0 + T1 + CF. */
397 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
399 tcg_gen_add_i32(dest
, t0
, t1
);
400 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
403 /* dest = T0 - T1 + CF - 1. */
404 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
406 tcg_gen_sub_i32(dest
, t0
, t1
);
407 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
408 tcg_gen_subi_i32(dest
, dest
, 1);
411 /* dest = T0 + T1. Compute C, N, V and Z flags */
412 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
414 TCGv_i32 tmp
= tcg_temp_new_i32();
415 tcg_gen_movi_i32(tmp
, 0);
416 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
417 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
418 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
419 tcg_gen_xor_i32(tmp
, t0
, t1
);
420 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
421 tcg_temp_free_i32(tmp
);
422 tcg_gen_mov_i32(dest
, cpu_NF
);
425 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
426 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
428 TCGv_i32 tmp
= tcg_temp_new_i32();
429 if (TCG_TARGET_HAS_add2_i32
) {
430 tcg_gen_movi_i32(tmp
, 0);
431 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
432 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
434 TCGv_i64 q0
= tcg_temp_new_i64();
435 TCGv_i64 q1
= tcg_temp_new_i64();
436 tcg_gen_extu_i32_i64(q0
, t0
);
437 tcg_gen_extu_i32_i64(q1
, t1
);
438 tcg_gen_add_i64(q0
, q0
, q1
);
439 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
440 tcg_gen_add_i64(q0
, q0
, q1
);
441 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
442 tcg_temp_free_i64(q0
);
443 tcg_temp_free_i64(q1
);
445 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
446 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
447 tcg_gen_xor_i32(tmp
, t0
, t1
);
448 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
449 tcg_temp_free_i32(tmp
);
450 tcg_gen_mov_i32(dest
, cpu_NF
);
453 /* dest = T0 - T1. Compute C, N, V and Z flags */
454 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
457 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
458 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
459 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
460 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
461 tmp
= tcg_temp_new_i32();
462 tcg_gen_xor_i32(tmp
, t0
, t1
);
463 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
464 tcg_temp_free_i32(tmp
);
465 tcg_gen_mov_i32(dest
, cpu_NF
);
468 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
469 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
471 TCGv_i32 tmp
= tcg_temp_new_i32();
472 tcg_gen_not_i32(tmp
, t1
);
473 gen_adc_CC(dest
, t0
, tmp
);
474 tcg_temp_free_i32(tmp
);
477 #define GEN_SHIFT(name) \
478 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
480 TCGv_i32 tmp1, tmp2, tmp3; \
481 tmp1 = tcg_temp_new_i32(); \
482 tcg_gen_andi_i32(tmp1, t1, 0xff); \
483 tmp2 = tcg_const_i32(0); \
484 tmp3 = tcg_const_i32(0x1f); \
485 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
486 tcg_temp_free_i32(tmp3); \
487 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
488 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
489 tcg_temp_free_i32(tmp2); \
490 tcg_temp_free_i32(tmp1); \
496 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
499 tmp1
= tcg_temp_new_i32();
500 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
501 tmp2
= tcg_const_i32(0x1f);
502 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
503 tcg_temp_free_i32(tmp2
);
504 tcg_gen_sar_i32(dest
, t0
, tmp1
);
505 tcg_temp_free_i32(tmp1
);
508 static void tcg_gen_abs_i32(TCGv_i32 dest
, TCGv_i32 src
)
510 TCGv_i32 c0
= tcg_const_i32(0);
511 TCGv_i32 tmp
= tcg_temp_new_i32();
512 tcg_gen_neg_i32(tmp
, src
);
513 tcg_gen_movcond_i32(TCG_COND_GT
, dest
, src
, c0
, src
, tmp
);
514 tcg_temp_free_i32(c0
);
515 tcg_temp_free_i32(tmp
);
518 static void shifter_out_im(TCGv_i32 var
, int shift
)
521 tcg_gen_andi_i32(cpu_CF
, var
, 1);
523 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
525 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
530 /* Shift by immediate. Includes special handling for shift == 0. */
531 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
532 int shift
, int flags
)
538 shifter_out_im(var
, 32 - shift
);
539 tcg_gen_shli_i32(var
, var
, shift
);
545 tcg_gen_shri_i32(cpu_CF
, var
, 31);
547 tcg_gen_movi_i32(var
, 0);
550 shifter_out_im(var
, shift
- 1);
551 tcg_gen_shri_i32(var
, var
, shift
);
558 shifter_out_im(var
, shift
- 1);
561 tcg_gen_sari_i32(var
, var
, shift
);
563 case 3: /* ROR/RRX */
566 shifter_out_im(var
, shift
- 1);
567 tcg_gen_rotri_i32(var
, var
, shift
); break;
569 TCGv_i32 tmp
= tcg_temp_new_i32();
570 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
572 shifter_out_im(var
, 0);
573 tcg_gen_shri_i32(var
, var
, 1);
574 tcg_gen_or_i32(var
, var
, tmp
);
575 tcg_temp_free_i32(tmp
);
580 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
581 TCGv_i32 shift
, int flags
)
585 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
586 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
587 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
588 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
593 gen_shl(var
, var
, shift
);
596 gen_shr(var
, var
, shift
);
599 gen_sar(var
, var
, shift
);
601 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
602 tcg_gen_rotr_i32(var
, var
, shift
); break;
605 tcg_temp_free_i32(shift
);
608 #define PAS_OP(pfx) \
610 case 0: gen_pas_helper(glue(pfx,add16)); break; \
611 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
612 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
613 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
614 case 4: gen_pas_helper(glue(pfx,add8)); break; \
615 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
617 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
622 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
624 tmp
= tcg_temp_new_ptr();
625 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
627 tcg_temp_free_ptr(tmp
);
630 tmp
= tcg_temp_new_ptr();
631 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
633 tcg_temp_free_ptr(tmp
);
635 #undef gen_pas_helper
636 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
649 #undef gen_pas_helper
654 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
655 #define PAS_OP(pfx) \
657 case 0: gen_pas_helper(glue(pfx,add8)); break; \
658 case 1: gen_pas_helper(glue(pfx,add16)); break; \
659 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
660 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
661 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
662 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
664 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
669 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
671 tmp
= tcg_temp_new_ptr();
672 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
674 tcg_temp_free_ptr(tmp
);
677 tmp
= tcg_temp_new_ptr();
678 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
680 tcg_temp_free_ptr(tmp
);
682 #undef gen_pas_helper
683 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
696 #undef gen_pas_helper
701 static void gen_test_cc(int cc
, int label
)
708 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
711 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
714 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_CF
, 0, label
);
717 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
720 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_NF
, 0, label
);
723 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_NF
, 0, label
);
726 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_VF
, 0, label
);
729 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_VF
, 0, label
);
731 case 8: /* hi: C && !Z */
732 inv
= gen_new_label();
733 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, inv
);
734 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
737 case 9: /* ls: !C || Z */
738 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
739 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
741 case 10: /* ge: N == V -> N ^ V == 0 */
742 tmp
= tcg_temp_new_i32();
743 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
744 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
745 tcg_temp_free_i32(tmp
);
747 case 11: /* lt: N != V -> N ^ V != 0 */
748 tmp
= tcg_temp_new_i32();
749 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
750 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
751 tcg_temp_free_i32(tmp
);
753 case 12: /* gt: !Z && N == V */
754 inv
= gen_new_label();
755 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, inv
);
756 tmp
= tcg_temp_new_i32();
757 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
758 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
759 tcg_temp_free_i32(tmp
);
762 case 13: /* le: Z || N != V */
763 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
764 tmp
= tcg_temp_new_i32();
765 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
766 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
767 tcg_temp_free_i32(tmp
);
770 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
775 static const uint8_t table_logic_cc
[16] = {
794 /* Set PC and Thumb state from an immediate address. */
795 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
799 s
->is_jmp
= DISAS_UPDATE
;
800 if (s
->thumb
!= (addr
& 1)) {
801 tmp
= tcg_temp_new_i32();
802 tcg_gen_movi_i32(tmp
, addr
& 1);
803 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
804 tcg_temp_free_i32(tmp
);
806 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
809 /* Set PC and Thumb state from var. var is marked as dead. */
810 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
812 s
->is_jmp
= DISAS_UPDATE
;
813 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
814 tcg_gen_andi_i32(var
, var
, 1);
815 store_cpu_field(var
, thumb
);
818 /* Variant of store_reg which uses branch&exchange logic when storing
819 to r15 in ARM architecture v7 and above. The source must be a temporary
820 and will be marked as dead. */
821 static inline void store_reg_bx(CPUARMState
*env
, DisasContext
*s
,
822 int reg
, TCGv_i32 var
)
824 if (reg
== 15 && ENABLE_ARCH_7
) {
827 store_reg(s
, reg
, var
);
831 /* Variant of store_reg which uses branch&exchange logic when storing
832 * to r15 in ARM architecture v5T and above. This is used for storing
833 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
834 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
835 static inline void store_reg_from_load(CPUARMState
*env
, DisasContext
*s
,
836 int reg
, TCGv_i32 var
)
838 if (reg
== 15 && ENABLE_ARCH_5
) {
841 store_reg(s
, reg
, var
);
845 static inline void gen_set_pc_im(uint32_t val
)
847 tcg_gen_movi_i32(cpu_R
[15], val
);
850 /* Force a TB lookup after an instruction that changes the CPU state. */
851 static inline void gen_lookup_tb(DisasContext
*s
)
853 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
854 s
->is_jmp
= DISAS_UPDATE
;
857 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
860 int val
, rm
, shift
, shiftop
;
863 if (!(insn
& (1 << 25))) {
866 if (!(insn
& (1 << 23)))
869 tcg_gen_addi_i32(var
, var
, val
);
873 shift
= (insn
>> 7) & 0x1f;
874 shiftop
= (insn
>> 5) & 3;
875 offset
= load_reg(s
, rm
);
876 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
877 if (!(insn
& (1 << 23)))
878 tcg_gen_sub_i32(var
, var
, offset
);
880 tcg_gen_add_i32(var
, var
, offset
);
881 tcg_temp_free_i32(offset
);
885 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
886 int extra
, TCGv_i32 var
)
891 if (insn
& (1 << 22)) {
893 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
894 if (!(insn
& (1 << 23)))
898 tcg_gen_addi_i32(var
, var
, val
);
902 tcg_gen_addi_i32(var
, var
, extra
);
904 offset
= load_reg(s
, rm
);
905 if (!(insn
& (1 << 23)))
906 tcg_gen_sub_i32(var
, var
, offset
);
908 tcg_gen_add_i32(var
, var
, offset
);
909 tcg_temp_free_i32(offset
);
913 static TCGv_ptr
get_fpstatus_ptr(int neon
)
915 TCGv_ptr statusptr
= tcg_temp_new_ptr();
918 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
920 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
922 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
926 #define VFP_OP2(name) \
927 static inline void gen_vfp_##name(int dp) \
929 TCGv_ptr fpst = get_fpstatus_ptr(0); \
931 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
933 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
935 tcg_temp_free_ptr(fpst); \
945 static inline void gen_vfp_F1_mul(int dp
)
947 /* Like gen_vfp_mul() but put result in F1 */
948 TCGv_ptr fpst
= get_fpstatus_ptr(0);
950 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
952 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
954 tcg_temp_free_ptr(fpst
);
957 static inline void gen_vfp_F1_neg(int dp
)
959 /* Like gen_vfp_neg() but put result in F1 */
961 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
963 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
967 static inline void gen_vfp_abs(int dp
)
970 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
972 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
975 static inline void gen_vfp_neg(int dp
)
978 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
980 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
983 static inline void gen_vfp_sqrt(int dp
)
986 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
988 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
991 static inline void gen_vfp_cmp(int dp
)
994 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
996 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
999 static inline void gen_vfp_cmpe(int dp
)
1002 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1004 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1007 static inline void gen_vfp_F1_ld0(int dp
)
1010 tcg_gen_movi_i64(cpu_F1d
, 0);
1012 tcg_gen_movi_i32(cpu_F1s
, 0);
1015 #define VFP_GEN_ITOF(name) \
1016 static inline void gen_vfp_##name(int dp, int neon) \
1018 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1020 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1022 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1024 tcg_temp_free_ptr(statusptr); \
1031 #define VFP_GEN_FTOI(name) \
1032 static inline void gen_vfp_##name(int dp, int neon) \
1034 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1036 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1038 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1040 tcg_temp_free_ptr(statusptr); \
1049 #define VFP_GEN_FIX(name) \
1050 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1052 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1053 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1055 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1057 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1059 tcg_temp_free_i32(tmp_shift); \
1060 tcg_temp_free_ptr(statusptr); \
1072 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1075 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1077 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1080 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1083 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1085 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1089 vfp_reg_offset (int dp
, int reg
)
1092 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1094 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1095 + offsetof(CPU_DoubleU
, l
.upper
);
1097 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1098 + offsetof(CPU_DoubleU
, l
.lower
);
1102 /* Return the offset of a 32-bit piece of a NEON register.
1103 zero is the least significant end of the register. */
1105 neon_reg_offset (int reg
, int n
)
1109 return vfp_reg_offset(0, sreg
);
1112 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1114 TCGv_i32 tmp
= tcg_temp_new_i32();
1115 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1119 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1121 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1122 tcg_temp_free_i32(var
);
1125 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1127 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1130 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1132 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1135 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1136 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1137 #define tcg_gen_st_f32 tcg_gen_st_i32
1138 #define tcg_gen_st_f64 tcg_gen_st_i64
1140 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1143 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1145 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1148 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1151 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1153 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1156 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1159 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1161 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1164 #define ARM_CP_RW_BIT (1 << 20)
1166 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1168 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1171 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1173 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1176 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1178 TCGv_i32 var
= tcg_temp_new_i32();
1179 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1183 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1185 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1186 tcg_temp_free_i32(var
);
1189 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1191 iwmmxt_store_reg(cpu_M0
, rn
);
1194 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1196 iwmmxt_load_reg(cpu_M0
, rn
);
1199 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1201 iwmmxt_load_reg(cpu_V1
, rn
);
1202 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1205 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1207 iwmmxt_load_reg(cpu_V1
, rn
);
1208 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1211 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1213 iwmmxt_load_reg(cpu_V1
, rn
);
1214 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1217 #define IWMMXT_OP(name) \
1218 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1220 iwmmxt_load_reg(cpu_V1, rn); \
1221 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1224 #define IWMMXT_OP_ENV(name) \
1225 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1227 iwmmxt_load_reg(cpu_V1, rn); \
1228 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1231 #define IWMMXT_OP_ENV_SIZE(name) \
1232 IWMMXT_OP_ENV(name##b) \
1233 IWMMXT_OP_ENV(name##w) \
1234 IWMMXT_OP_ENV(name##l)
1236 #define IWMMXT_OP_ENV1(name) \
1237 static inline void gen_op_iwmmxt_##name##_M0(void) \
1239 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1253 IWMMXT_OP_ENV_SIZE(unpackl
)
1254 IWMMXT_OP_ENV_SIZE(unpackh
)
1256 IWMMXT_OP_ENV1(unpacklub
)
1257 IWMMXT_OP_ENV1(unpackluw
)
1258 IWMMXT_OP_ENV1(unpacklul
)
1259 IWMMXT_OP_ENV1(unpackhub
)
1260 IWMMXT_OP_ENV1(unpackhuw
)
1261 IWMMXT_OP_ENV1(unpackhul
)
1262 IWMMXT_OP_ENV1(unpacklsb
)
1263 IWMMXT_OP_ENV1(unpacklsw
)
1264 IWMMXT_OP_ENV1(unpacklsl
)
1265 IWMMXT_OP_ENV1(unpackhsb
)
1266 IWMMXT_OP_ENV1(unpackhsw
)
1267 IWMMXT_OP_ENV1(unpackhsl
)
1269 IWMMXT_OP_ENV_SIZE(cmpeq
)
1270 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1271 IWMMXT_OP_ENV_SIZE(cmpgts
)
1273 IWMMXT_OP_ENV_SIZE(mins
)
1274 IWMMXT_OP_ENV_SIZE(minu
)
1275 IWMMXT_OP_ENV_SIZE(maxs
)
1276 IWMMXT_OP_ENV_SIZE(maxu
)
1278 IWMMXT_OP_ENV_SIZE(subn
)
1279 IWMMXT_OP_ENV_SIZE(addn
)
1280 IWMMXT_OP_ENV_SIZE(subu
)
1281 IWMMXT_OP_ENV_SIZE(addu
)
1282 IWMMXT_OP_ENV_SIZE(subs
)
1283 IWMMXT_OP_ENV_SIZE(adds
)
1285 IWMMXT_OP_ENV(avgb0
)
1286 IWMMXT_OP_ENV(avgb1
)
1287 IWMMXT_OP_ENV(avgw0
)
1288 IWMMXT_OP_ENV(avgw1
)
1292 IWMMXT_OP_ENV(packuw
)
1293 IWMMXT_OP_ENV(packul
)
1294 IWMMXT_OP_ENV(packuq
)
1295 IWMMXT_OP_ENV(packsw
)
1296 IWMMXT_OP_ENV(packsl
)
1297 IWMMXT_OP_ENV(packsq
)
1299 static void gen_op_iwmmxt_set_mup(void)
1302 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1303 tcg_gen_ori_i32(tmp
, tmp
, 2);
1304 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1307 static void gen_op_iwmmxt_set_cup(void)
1310 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1311 tcg_gen_ori_i32(tmp
, tmp
, 1);
1312 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1315 static void gen_op_iwmmxt_setpsr_nz(void)
1317 TCGv_i32 tmp
= tcg_temp_new_i32();
1318 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1319 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1322 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1324 iwmmxt_load_reg(cpu_V1
, rn
);
1325 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1326 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1329 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1336 rd
= (insn
>> 16) & 0xf;
1337 tmp
= load_reg(s
, rd
);
1339 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1340 if (insn
& (1 << 24)) {
1342 if (insn
& (1 << 23))
1343 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1345 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1346 tcg_gen_mov_i32(dest
, tmp
);
1347 if (insn
& (1 << 21))
1348 store_reg(s
, rd
, tmp
);
1350 tcg_temp_free_i32(tmp
);
1351 } else if (insn
& (1 << 21)) {
1353 tcg_gen_mov_i32(dest
, tmp
);
1354 if (insn
& (1 << 23))
1355 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1357 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1358 store_reg(s
, rd
, tmp
);
1359 } else if (!(insn
& (1 << 23)))
1364 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1366 int rd
= (insn
>> 0) & 0xf;
1369 if (insn
& (1 << 8)) {
1370 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1373 tmp
= iwmmxt_load_creg(rd
);
1376 tmp
= tcg_temp_new_i32();
1377 iwmmxt_load_reg(cpu_V0
, rd
);
1378 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1380 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1381 tcg_gen_mov_i32(dest
, tmp
);
1382 tcg_temp_free_i32(tmp
);
1386 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1387 (ie. an undefined instruction). */
1388 static int disas_iwmmxt_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
1391 int rdhi
, rdlo
, rd0
, rd1
, i
;
1393 TCGv_i32 tmp
, tmp2
, tmp3
;
1395 if ((insn
& 0x0e000e00) == 0x0c000000) {
1396 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1398 rdlo
= (insn
>> 12) & 0xf;
1399 rdhi
= (insn
>> 16) & 0xf;
1400 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1401 iwmmxt_load_reg(cpu_V0
, wrd
);
1402 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1403 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1404 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1405 } else { /* TMCRR */
1406 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1407 iwmmxt_store_reg(cpu_V0
, wrd
);
1408 gen_op_iwmmxt_set_mup();
1413 wrd
= (insn
>> 12) & 0xf;
1414 addr
= tcg_temp_new_i32();
1415 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1416 tcg_temp_free_i32(addr
);
1419 if (insn
& ARM_CP_RW_BIT
) {
1420 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1421 tmp
= tcg_temp_new_i32();
1422 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1423 iwmmxt_store_creg(wrd
, tmp
);
1426 if (insn
& (1 << 8)) {
1427 if (insn
& (1 << 22)) { /* WLDRD */
1428 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1430 } else { /* WLDRW wRd */
1431 tmp
= tcg_temp_new_i32();
1432 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1435 tmp
= tcg_temp_new_i32();
1436 if (insn
& (1 << 22)) { /* WLDRH */
1437 tcg_gen_qemu_ld16u(tmp
, addr
, IS_USER(s
));
1438 } else { /* WLDRB */
1439 tcg_gen_qemu_ld8u(tmp
, addr
, IS_USER(s
));
1443 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1444 tcg_temp_free_i32(tmp
);
1446 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1449 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1450 tmp
= iwmmxt_load_creg(wrd
);
1451 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
1453 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1454 tmp
= tcg_temp_new_i32();
1455 if (insn
& (1 << 8)) {
1456 if (insn
& (1 << 22)) { /* WSTRD */
1457 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1458 } else { /* WSTRW wRd */
1459 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1460 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
1463 if (insn
& (1 << 22)) { /* WSTRH */
1464 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1465 tcg_gen_qemu_st16(tmp
, addr
, IS_USER(s
));
1466 } else { /* WSTRB */
1467 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1468 tcg_gen_qemu_st8(tmp
, addr
, IS_USER(s
));
1472 tcg_temp_free_i32(tmp
);
1474 tcg_temp_free_i32(addr
);
1478 if ((insn
& 0x0f000000) != 0x0e000000)
1481 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1482 case 0x000: /* WOR */
1483 wrd
= (insn
>> 12) & 0xf;
1484 rd0
= (insn
>> 0) & 0xf;
1485 rd1
= (insn
>> 16) & 0xf;
1486 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1487 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1488 gen_op_iwmmxt_setpsr_nz();
1489 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1490 gen_op_iwmmxt_set_mup();
1491 gen_op_iwmmxt_set_cup();
1493 case 0x011: /* TMCR */
1496 rd
= (insn
>> 12) & 0xf;
1497 wrd
= (insn
>> 16) & 0xf;
1499 case ARM_IWMMXT_wCID
:
1500 case ARM_IWMMXT_wCASF
:
1502 case ARM_IWMMXT_wCon
:
1503 gen_op_iwmmxt_set_cup();
1505 case ARM_IWMMXT_wCSSF
:
1506 tmp
= iwmmxt_load_creg(wrd
);
1507 tmp2
= load_reg(s
, rd
);
1508 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1509 tcg_temp_free_i32(tmp2
);
1510 iwmmxt_store_creg(wrd
, tmp
);
1512 case ARM_IWMMXT_wCGR0
:
1513 case ARM_IWMMXT_wCGR1
:
1514 case ARM_IWMMXT_wCGR2
:
1515 case ARM_IWMMXT_wCGR3
:
1516 gen_op_iwmmxt_set_cup();
1517 tmp
= load_reg(s
, rd
);
1518 iwmmxt_store_creg(wrd
, tmp
);
1524 case 0x100: /* WXOR */
1525 wrd
= (insn
>> 12) & 0xf;
1526 rd0
= (insn
>> 0) & 0xf;
1527 rd1
= (insn
>> 16) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1529 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1535 case 0x111: /* TMRC */
1538 rd
= (insn
>> 12) & 0xf;
1539 wrd
= (insn
>> 16) & 0xf;
1540 tmp
= iwmmxt_load_creg(wrd
);
1541 store_reg(s
, rd
, tmp
);
1543 case 0x300: /* WANDN */
1544 wrd
= (insn
>> 12) & 0xf;
1545 rd0
= (insn
>> 0) & 0xf;
1546 rd1
= (insn
>> 16) & 0xf;
1547 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1548 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1549 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1550 gen_op_iwmmxt_setpsr_nz();
1551 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1552 gen_op_iwmmxt_set_mup();
1553 gen_op_iwmmxt_set_cup();
1555 case 0x200: /* WAND */
1556 wrd
= (insn
>> 12) & 0xf;
1557 rd0
= (insn
>> 0) & 0xf;
1558 rd1
= (insn
>> 16) & 0xf;
1559 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1560 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1561 gen_op_iwmmxt_setpsr_nz();
1562 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1563 gen_op_iwmmxt_set_mup();
1564 gen_op_iwmmxt_set_cup();
1566 case 0x810: case 0xa10: /* WMADD */
1567 wrd
= (insn
>> 12) & 0xf;
1568 rd0
= (insn
>> 0) & 0xf;
1569 rd1
= (insn
>> 16) & 0xf;
1570 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1571 if (insn
& (1 << 21))
1572 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1574 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1575 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1576 gen_op_iwmmxt_set_mup();
1578 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1579 wrd
= (insn
>> 12) & 0xf;
1580 rd0
= (insn
>> 16) & 0xf;
1581 rd1
= (insn
>> 0) & 0xf;
1582 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1583 switch ((insn
>> 22) & 3) {
1585 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1588 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1591 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1596 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1597 gen_op_iwmmxt_set_mup();
1598 gen_op_iwmmxt_set_cup();
1600 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1601 wrd
= (insn
>> 12) & 0xf;
1602 rd0
= (insn
>> 16) & 0xf;
1603 rd1
= (insn
>> 0) & 0xf;
1604 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1605 switch ((insn
>> 22) & 3) {
1607 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1610 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1613 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1618 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1619 gen_op_iwmmxt_set_mup();
1620 gen_op_iwmmxt_set_cup();
1622 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1623 wrd
= (insn
>> 12) & 0xf;
1624 rd0
= (insn
>> 16) & 0xf;
1625 rd1
= (insn
>> 0) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1627 if (insn
& (1 << 22))
1628 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1630 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1631 if (!(insn
& (1 << 20)))
1632 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1633 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1634 gen_op_iwmmxt_set_mup();
1636 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1637 wrd
= (insn
>> 12) & 0xf;
1638 rd0
= (insn
>> 16) & 0xf;
1639 rd1
= (insn
>> 0) & 0xf;
1640 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1641 if (insn
& (1 << 21)) {
1642 if (insn
& (1 << 20))
1643 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1645 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1647 if (insn
& (1 << 20))
1648 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1650 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1652 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1653 gen_op_iwmmxt_set_mup();
1655 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1656 wrd
= (insn
>> 12) & 0xf;
1657 rd0
= (insn
>> 16) & 0xf;
1658 rd1
= (insn
>> 0) & 0xf;
1659 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1660 if (insn
& (1 << 21))
1661 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1663 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1664 if (!(insn
& (1 << 20))) {
1665 iwmmxt_load_reg(cpu_V1
, wrd
);
1666 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1668 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1669 gen_op_iwmmxt_set_mup();
1671 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1672 wrd
= (insn
>> 12) & 0xf;
1673 rd0
= (insn
>> 16) & 0xf;
1674 rd1
= (insn
>> 0) & 0xf;
1675 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1676 switch ((insn
>> 22) & 3) {
1678 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1681 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1684 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1693 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1694 wrd
= (insn
>> 12) & 0xf;
1695 rd0
= (insn
>> 16) & 0xf;
1696 rd1
= (insn
>> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1698 if (insn
& (1 << 22)) {
1699 if (insn
& (1 << 20))
1700 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1702 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1704 if (insn
& (1 << 20))
1705 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1707 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1709 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1710 gen_op_iwmmxt_set_mup();
1711 gen_op_iwmmxt_set_cup();
1713 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1714 wrd
= (insn
>> 12) & 0xf;
1715 rd0
= (insn
>> 16) & 0xf;
1716 rd1
= (insn
>> 0) & 0xf;
1717 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1718 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1719 tcg_gen_andi_i32(tmp
, tmp
, 7);
1720 iwmmxt_load_reg(cpu_V1
, rd1
);
1721 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1722 tcg_temp_free_i32(tmp
);
1723 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1724 gen_op_iwmmxt_set_mup();
1726 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1727 if (((insn
>> 6) & 3) == 3)
1729 rd
= (insn
>> 12) & 0xf;
1730 wrd
= (insn
>> 16) & 0xf;
1731 tmp
= load_reg(s
, rd
);
1732 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1733 switch ((insn
>> 6) & 3) {
1735 tmp2
= tcg_const_i32(0xff);
1736 tmp3
= tcg_const_i32((insn
& 7) << 3);
1739 tmp2
= tcg_const_i32(0xffff);
1740 tmp3
= tcg_const_i32((insn
& 3) << 4);
1743 tmp2
= tcg_const_i32(0xffffffff);
1744 tmp3
= tcg_const_i32((insn
& 1) << 5);
1747 TCGV_UNUSED_I32(tmp2
);
1748 TCGV_UNUSED_I32(tmp3
);
1750 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1751 tcg_temp_free_i32(tmp3
);
1752 tcg_temp_free_i32(tmp2
);
1753 tcg_temp_free_i32(tmp
);
1754 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1755 gen_op_iwmmxt_set_mup();
1757 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1758 rd
= (insn
>> 12) & 0xf;
1759 wrd
= (insn
>> 16) & 0xf;
1760 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1762 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1763 tmp
= tcg_temp_new_i32();
1764 switch ((insn
>> 22) & 3) {
1766 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1767 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1769 tcg_gen_ext8s_i32(tmp
, tmp
);
1771 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1775 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1776 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1778 tcg_gen_ext16s_i32(tmp
, tmp
);
1780 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1784 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1785 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1788 store_reg(s
, rd
, tmp
);
1790 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1791 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1793 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1794 switch ((insn
>> 22) & 3) {
1796 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1799 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1802 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1805 tcg_gen_shli_i32(tmp
, tmp
, 28);
1807 tcg_temp_free_i32(tmp
);
1809 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1810 if (((insn
>> 6) & 3) == 3)
1812 rd
= (insn
>> 12) & 0xf;
1813 wrd
= (insn
>> 16) & 0xf;
1814 tmp
= load_reg(s
, rd
);
1815 switch ((insn
>> 6) & 3) {
1817 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1820 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1823 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1826 tcg_temp_free_i32(tmp
);
1827 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1828 gen_op_iwmmxt_set_mup();
1830 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1831 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1833 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1834 tmp2
= tcg_temp_new_i32();
1835 tcg_gen_mov_i32(tmp2
, tmp
);
1836 switch ((insn
>> 22) & 3) {
1838 for (i
= 0; i
< 7; i
++) {
1839 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1840 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1844 for (i
= 0; i
< 3; i
++) {
1845 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1846 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1850 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1851 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1855 tcg_temp_free_i32(tmp2
);
1856 tcg_temp_free_i32(tmp
);
1858 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1859 wrd
= (insn
>> 12) & 0xf;
1860 rd0
= (insn
>> 16) & 0xf;
1861 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1862 switch ((insn
>> 22) & 3) {
1864 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1867 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1870 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1875 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1876 gen_op_iwmmxt_set_mup();
1878 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1879 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1881 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1882 tmp2
= tcg_temp_new_i32();
1883 tcg_gen_mov_i32(tmp2
, tmp
);
1884 switch ((insn
>> 22) & 3) {
1886 for (i
= 0; i
< 7; i
++) {
1887 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1888 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1892 for (i
= 0; i
< 3; i
++) {
1893 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1894 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1898 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1899 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1903 tcg_temp_free_i32(tmp2
);
1904 tcg_temp_free_i32(tmp
);
1906 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1907 rd
= (insn
>> 12) & 0xf;
1908 rd0
= (insn
>> 16) & 0xf;
1909 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1911 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1912 tmp
= tcg_temp_new_i32();
1913 switch ((insn
>> 22) & 3) {
1915 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1918 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1921 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1924 store_reg(s
, rd
, tmp
);
1926 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1927 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1928 wrd
= (insn
>> 12) & 0xf;
1929 rd0
= (insn
>> 16) & 0xf;
1930 rd1
= (insn
>> 0) & 0xf;
1931 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1932 switch ((insn
>> 22) & 3) {
1934 if (insn
& (1 << 21))
1935 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1937 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1940 if (insn
& (1 << 21))
1941 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1943 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1946 if (insn
& (1 << 21))
1947 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1949 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1954 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1955 gen_op_iwmmxt_set_mup();
1956 gen_op_iwmmxt_set_cup();
1958 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1959 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1960 wrd
= (insn
>> 12) & 0xf;
1961 rd0
= (insn
>> 16) & 0xf;
1962 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1963 switch ((insn
>> 22) & 3) {
1965 if (insn
& (1 << 21))
1966 gen_op_iwmmxt_unpacklsb_M0();
1968 gen_op_iwmmxt_unpacklub_M0();
1971 if (insn
& (1 << 21))
1972 gen_op_iwmmxt_unpacklsw_M0();
1974 gen_op_iwmmxt_unpackluw_M0();
1977 if (insn
& (1 << 21))
1978 gen_op_iwmmxt_unpacklsl_M0();
1980 gen_op_iwmmxt_unpacklul_M0();
1985 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1986 gen_op_iwmmxt_set_mup();
1987 gen_op_iwmmxt_set_cup();
1989 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1990 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1991 wrd
= (insn
>> 12) & 0xf;
1992 rd0
= (insn
>> 16) & 0xf;
1993 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1994 switch ((insn
>> 22) & 3) {
1996 if (insn
& (1 << 21))
1997 gen_op_iwmmxt_unpackhsb_M0();
1999 gen_op_iwmmxt_unpackhub_M0();
2002 if (insn
& (1 << 21))
2003 gen_op_iwmmxt_unpackhsw_M0();
2005 gen_op_iwmmxt_unpackhuw_M0();
2008 if (insn
& (1 << 21))
2009 gen_op_iwmmxt_unpackhsl_M0();
2011 gen_op_iwmmxt_unpackhul_M0();
2016 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2017 gen_op_iwmmxt_set_mup();
2018 gen_op_iwmmxt_set_cup();
2020 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2021 case 0x214: case 0x614: case 0xa14: case 0xe14:
2022 if (((insn
>> 22) & 3) == 0)
2024 wrd
= (insn
>> 12) & 0xf;
2025 rd0
= (insn
>> 16) & 0xf;
2026 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2027 tmp
= tcg_temp_new_i32();
2028 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2029 tcg_temp_free_i32(tmp
);
2032 switch ((insn
>> 22) & 3) {
2034 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2037 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2040 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2043 tcg_temp_free_i32(tmp
);
2044 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2045 gen_op_iwmmxt_set_mup();
2046 gen_op_iwmmxt_set_cup();
2048 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2049 case 0x014: case 0x414: case 0x814: case 0xc14:
2050 if (((insn
>> 22) & 3) == 0)
2052 wrd
= (insn
>> 12) & 0xf;
2053 rd0
= (insn
>> 16) & 0xf;
2054 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2055 tmp
= tcg_temp_new_i32();
2056 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2057 tcg_temp_free_i32(tmp
);
2060 switch ((insn
>> 22) & 3) {
2062 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2065 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2068 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2071 tcg_temp_free_i32(tmp
);
2072 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2073 gen_op_iwmmxt_set_mup();
2074 gen_op_iwmmxt_set_cup();
2076 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2077 case 0x114: case 0x514: case 0x914: case 0xd14:
2078 if (((insn
>> 22) & 3) == 0)
2080 wrd
= (insn
>> 12) & 0xf;
2081 rd0
= (insn
>> 16) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2083 tmp
= tcg_temp_new_i32();
2084 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2085 tcg_temp_free_i32(tmp
);
2088 switch ((insn
>> 22) & 3) {
2090 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2093 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2096 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2099 tcg_temp_free_i32(tmp
);
2100 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2101 gen_op_iwmmxt_set_mup();
2102 gen_op_iwmmxt_set_cup();
2104 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2105 case 0x314: case 0x714: case 0xb14: case 0xf14:
2106 if (((insn
>> 22) & 3) == 0)
2108 wrd
= (insn
>> 12) & 0xf;
2109 rd0
= (insn
>> 16) & 0xf;
2110 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2111 tmp
= tcg_temp_new_i32();
2112 switch ((insn
>> 22) & 3) {
2114 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2115 tcg_temp_free_i32(tmp
);
2118 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2121 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2122 tcg_temp_free_i32(tmp
);
2125 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2128 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2129 tcg_temp_free_i32(tmp
);
2132 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2135 tcg_temp_free_i32(tmp
);
2136 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2137 gen_op_iwmmxt_set_mup();
2138 gen_op_iwmmxt_set_cup();
2140 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2141 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2142 wrd
= (insn
>> 12) & 0xf;
2143 rd0
= (insn
>> 16) & 0xf;
2144 rd1
= (insn
>> 0) & 0xf;
2145 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2146 switch ((insn
>> 22) & 3) {
2148 if (insn
& (1 << 21))
2149 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2151 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2154 if (insn
& (1 << 21))
2155 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2157 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2160 if (insn
& (1 << 21))
2161 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2163 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2168 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2169 gen_op_iwmmxt_set_mup();
2171 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2172 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2173 wrd
= (insn
>> 12) & 0xf;
2174 rd0
= (insn
>> 16) & 0xf;
2175 rd1
= (insn
>> 0) & 0xf;
2176 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2177 switch ((insn
>> 22) & 3) {
2179 if (insn
& (1 << 21))
2180 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2182 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2185 if (insn
& (1 << 21))
2186 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2188 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2191 if (insn
& (1 << 21))
2192 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2194 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2199 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2200 gen_op_iwmmxt_set_mup();
2202 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2203 case 0x402: case 0x502: case 0x602: case 0x702:
2204 wrd
= (insn
>> 12) & 0xf;
2205 rd0
= (insn
>> 16) & 0xf;
2206 rd1
= (insn
>> 0) & 0xf;
2207 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2208 tmp
= tcg_const_i32((insn
>> 20) & 3);
2209 iwmmxt_load_reg(cpu_V1
, rd1
);
2210 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2211 tcg_temp_free_i32(tmp
);
2212 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2213 gen_op_iwmmxt_set_mup();
2215 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2216 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2217 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2218 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2219 wrd
= (insn
>> 12) & 0xf;
2220 rd0
= (insn
>> 16) & 0xf;
2221 rd1
= (insn
>> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2223 switch ((insn
>> 20) & 0xf) {
2225 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2228 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2231 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2234 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2237 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2240 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2243 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2246 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2249 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2254 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2255 gen_op_iwmmxt_set_mup();
2256 gen_op_iwmmxt_set_cup();
2258 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2259 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2260 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2261 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2262 wrd
= (insn
>> 12) & 0xf;
2263 rd0
= (insn
>> 16) & 0xf;
2264 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2265 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2266 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2267 tcg_temp_free_i32(tmp
);
2268 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2269 gen_op_iwmmxt_set_mup();
2270 gen_op_iwmmxt_set_cup();
2272 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2273 case 0x418: case 0x518: case 0x618: case 0x718:
2274 case 0x818: case 0x918: case 0xa18: case 0xb18:
2275 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2276 wrd
= (insn
>> 12) & 0xf;
2277 rd0
= (insn
>> 16) & 0xf;
2278 rd1
= (insn
>> 0) & 0xf;
2279 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2280 switch ((insn
>> 20) & 0xf) {
2282 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2285 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2288 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2291 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2294 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2297 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2300 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2303 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2306 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2311 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2312 gen_op_iwmmxt_set_mup();
2313 gen_op_iwmmxt_set_cup();
2315 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2316 case 0x408: case 0x508: case 0x608: case 0x708:
2317 case 0x808: case 0x908: case 0xa08: case 0xb08:
2318 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2319 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2321 wrd
= (insn
>> 12) & 0xf;
2322 rd0
= (insn
>> 16) & 0xf;
2323 rd1
= (insn
>> 0) & 0xf;
2324 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2325 switch ((insn
>> 22) & 3) {
2327 if (insn
& (1 << 21))
2328 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2330 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2333 if (insn
& (1 << 21))
2334 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2336 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2339 if (insn
& (1 << 21))
2340 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2342 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2345 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2346 gen_op_iwmmxt_set_mup();
2347 gen_op_iwmmxt_set_cup();
2349 case 0x201: case 0x203: case 0x205: case 0x207:
2350 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2351 case 0x211: case 0x213: case 0x215: case 0x217:
2352 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2353 wrd
= (insn
>> 5) & 0xf;
2354 rd0
= (insn
>> 12) & 0xf;
2355 rd1
= (insn
>> 0) & 0xf;
2356 if (rd0
== 0xf || rd1
== 0xf)
2358 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2359 tmp
= load_reg(s
, rd0
);
2360 tmp2
= load_reg(s
, rd1
);
2361 switch ((insn
>> 16) & 0xf) {
2362 case 0x0: /* TMIA */
2363 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2365 case 0x8: /* TMIAPH */
2366 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2368 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2369 if (insn
& (1 << 16))
2370 tcg_gen_shri_i32(tmp
, tmp
, 16);
2371 if (insn
& (1 << 17))
2372 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2373 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2376 tcg_temp_free_i32(tmp2
);
2377 tcg_temp_free_i32(tmp
);
2380 tcg_temp_free_i32(tmp2
);
2381 tcg_temp_free_i32(tmp
);
2382 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2383 gen_op_iwmmxt_set_mup();
2392 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2393 (ie. an undefined instruction). */
2394 static int disas_dsp_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2396 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2399 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2400 /* Multiply with Internal Accumulate Format */
2401 rd0
= (insn
>> 12) & 0xf;
2403 acc
= (insn
>> 5) & 7;
2408 tmp
= load_reg(s
, rd0
);
2409 tmp2
= load_reg(s
, rd1
);
2410 switch ((insn
>> 16) & 0xf) {
2412 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2414 case 0x8: /* MIAPH */
2415 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2417 case 0xc: /* MIABB */
2418 case 0xd: /* MIABT */
2419 case 0xe: /* MIATB */
2420 case 0xf: /* MIATT */
2421 if (insn
& (1 << 16))
2422 tcg_gen_shri_i32(tmp
, tmp
, 16);
2423 if (insn
& (1 << 17))
2424 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2425 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2430 tcg_temp_free_i32(tmp2
);
2431 tcg_temp_free_i32(tmp
);
2433 gen_op_iwmmxt_movq_wRn_M0(acc
);
2437 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2438 /* Internal Accumulator Access Format */
2439 rdhi
= (insn
>> 16) & 0xf;
2440 rdlo
= (insn
>> 12) & 0xf;
2446 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2447 iwmmxt_load_reg(cpu_V0
, acc
);
2448 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2449 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2450 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2451 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2453 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2454 iwmmxt_store_reg(cpu_V0
, acc
);
2462 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2463 #define VFP_SREG(insn, bigbit, smallbit) \
2464 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2465 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2466 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2467 reg = (((insn) >> (bigbit)) & 0x0f) \
2468 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2470 if (insn & (1 << (smallbit))) \
2472 reg = ((insn) >> (bigbit)) & 0x0f; \
2475 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2476 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2477 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2478 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2479 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2480 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2482 /* Move between integer and VFP cores. */
2483 static TCGv_i32
gen_vfp_mrs(void)
2485 TCGv_i32 tmp
= tcg_temp_new_i32();
2486 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2490 static void gen_vfp_msr(TCGv_i32 tmp
)
2492 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2493 tcg_temp_free_i32(tmp
);
2496 static void gen_neon_dup_u8(TCGv_i32 var
, int shift
)
2498 TCGv_i32 tmp
= tcg_temp_new_i32();
2500 tcg_gen_shri_i32(var
, var
, shift
);
2501 tcg_gen_ext8u_i32(var
, var
);
2502 tcg_gen_shli_i32(tmp
, var
, 8);
2503 tcg_gen_or_i32(var
, var
, tmp
);
2504 tcg_gen_shli_i32(tmp
, var
, 16);
2505 tcg_gen_or_i32(var
, var
, tmp
);
2506 tcg_temp_free_i32(tmp
);
2509 static void gen_neon_dup_low16(TCGv_i32 var
)
2511 TCGv_i32 tmp
= tcg_temp_new_i32();
2512 tcg_gen_ext16u_i32(var
, var
);
2513 tcg_gen_shli_i32(tmp
, var
, 16);
2514 tcg_gen_or_i32(var
, var
, tmp
);
2515 tcg_temp_free_i32(tmp
);
2518 static void gen_neon_dup_high16(TCGv_i32 var
)
2520 TCGv_i32 tmp
= tcg_temp_new_i32();
2521 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2522 tcg_gen_shri_i32(tmp
, var
, 16);
2523 tcg_gen_or_i32(var
, var
, tmp
);
2524 tcg_temp_free_i32(tmp
);
2527 static TCGv_i32
gen_load_and_replicate(DisasContext
*s
, TCGv_i32 addr
, int size
)
2529 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2530 TCGv_i32 tmp
= tcg_temp_new_i32();
2533 tcg_gen_qemu_ld8u(tmp
, addr
, IS_USER(s
));
2534 gen_neon_dup_u8(tmp
, 0);
2537 tcg_gen_qemu_ld16u(tmp
, addr
, IS_USER(s
));
2538 gen_neon_dup_low16(tmp
);
2541 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
2543 default: /* Avoid compiler warnings. */
2549 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2550 (ie. an undefined instruction). */
2551 static int disas_vfp_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
2553 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2559 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2562 if (!s
->vfp_enabled
) {
2563 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2564 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2566 rn
= (insn
>> 16) & 0xf;
2567 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2568 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2571 dp
= ((insn
& 0xf00) == 0xb00);
2572 switch ((insn
>> 24) & 0xf) {
2574 if (insn
& (1 << 4)) {
2575 /* single register transfer */
2576 rd
= (insn
>> 12) & 0xf;
2581 VFP_DREG_N(rn
, insn
);
2584 if (insn
& 0x00c00060
2585 && !arm_feature(env
, ARM_FEATURE_NEON
))
2588 pass
= (insn
>> 21) & 1;
2589 if (insn
& (1 << 22)) {
2591 offset
= ((insn
>> 5) & 3) * 8;
2592 } else if (insn
& (1 << 5)) {
2594 offset
= (insn
& (1 << 6)) ? 16 : 0;
2599 if (insn
& ARM_CP_RW_BIT
) {
2601 tmp
= neon_load_reg(rn
, pass
);
2605 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2606 if (insn
& (1 << 23))
2612 if (insn
& (1 << 23)) {
2614 tcg_gen_shri_i32(tmp
, tmp
, 16);
2620 tcg_gen_sari_i32(tmp
, tmp
, 16);
2629 store_reg(s
, rd
, tmp
);
2632 tmp
= load_reg(s
, rd
);
2633 if (insn
& (1 << 23)) {
2636 gen_neon_dup_u8(tmp
, 0);
2637 } else if (size
== 1) {
2638 gen_neon_dup_low16(tmp
);
2640 for (n
= 0; n
<= pass
* 2; n
++) {
2641 tmp2
= tcg_temp_new_i32();
2642 tcg_gen_mov_i32(tmp2
, tmp
);
2643 neon_store_reg(rn
, n
, tmp2
);
2645 neon_store_reg(rn
, n
, tmp
);
2650 tmp2
= neon_load_reg(rn
, pass
);
2651 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
2652 tcg_temp_free_i32(tmp2
);
2655 tmp2
= neon_load_reg(rn
, pass
);
2656 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
2657 tcg_temp_free_i32(tmp2
);
2662 neon_store_reg(rn
, pass
, tmp
);
2666 if ((insn
& 0x6f) != 0x00)
2668 rn
= VFP_SREG_N(insn
);
2669 if (insn
& ARM_CP_RW_BIT
) {
2671 if (insn
& (1 << 21)) {
2672 /* system register */
2677 /* VFP2 allows access to FSID from userspace.
2678 VFP3 restricts all id registers to privileged
2681 && arm_feature(env
, ARM_FEATURE_VFP3
))
2683 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2688 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2690 case ARM_VFP_FPINST
:
2691 case ARM_VFP_FPINST2
:
2692 /* Not present in VFP3. */
2694 || arm_feature(env
, ARM_FEATURE_VFP3
))
2696 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2700 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2701 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2703 tmp
= tcg_temp_new_i32();
2704 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2710 || !arm_feature(env
, ARM_FEATURE_MVFR
))
2712 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2718 gen_mov_F0_vreg(0, rn
);
2719 tmp
= gen_vfp_mrs();
2722 /* Set the 4 flag bits in the CPSR. */
2724 tcg_temp_free_i32(tmp
);
2726 store_reg(s
, rd
, tmp
);
2730 if (insn
& (1 << 21)) {
2732 /* system register */
2737 /* Writes are ignored. */
2740 tmp
= load_reg(s
, rd
);
2741 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2742 tcg_temp_free_i32(tmp
);
2748 /* TODO: VFP subarchitecture support.
2749 * For now, keep the EN bit only */
2750 tmp
= load_reg(s
, rd
);
2751 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2752 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2755 case ARM_VFP_FPINST
:
2756 case ARM_VFP_FPINST2
:
2757 tmp
= load_reg(s
, rd
);
2758 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2764 tmp
= load_reg(s
, rd
);
2766 gen_mov_vreg_F0(0, rn
);
2771 /* data processing */
2772 /* The opcode is in bits 23, 21, 20 and 6. */
2773 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2777 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2779 /* rn is register number */
2780 VFP_DREG_N(rn
, insn
);
2783 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2784 /* Integer or single precision destination. */
2785 rd
= VFP_SREG_D(insn
);
2787 VFP_DREG_D(rd
, insn
);
2790 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2791 /* VCVT from int is always from S reg regardless of dp bit.
2792 * VCVT with immediate frac_bits has same format as SREG_M
2794 rm
= VFP_SREG_M(insn
);
2796 VFP_DREG_M(rm
, insn
);
2799 rn
= VFP_SREG_N(insn
);
2800 if (op
== 15 && rn
== 15) {
2801 /* Double precision destination. */
2802 VFP_DREG_D(rd
, insn
);
2804 rd
= VFP_SREG_D(insn
);
2806 /* NB that we implicitly rely on the encoding for the frac_bits
2807 * in VCVT of fixed to float being the same as that of an SREG_M
2809 rm
= VFP_SREG_M(insn
);
2812 veclen
= s
->vec_len
;
2813 if (op
== 15 && rn
> 3)
2816 /* Shut up compiler warnings. */
2827 /* Figure out what type of vector operation this is. */
2828 if ((rd
& bank_mask
) == 0) {
2833 delta_d
= (s
->vec_stride
>> 1) + 1;
2835 delta_d
= s
->vec_stride
+ 1;
2837 if ((rm
& bank_mask
) == 0) {
2838 /* mixed scalar/vector */
2847 /* Load the initial operands. */
2852 /* Integer source */
2853 gen_mov_F0_vreg(0, rm
);
2858 gen_mov_F0_vreg(dp
, rd
);
2859 gen_mov_F1_vreg(dp
, rm
);
2863 /* Compare with zero */
2864 gen_mov_F0_vreg(dp
, rd
);
2875 /* Source and destination the same. */
2876 gen_mov_F0_vreg(dp
, rd
);
2882 /* VCVTB, VCVTT: only present with the halfprec extension,
2883 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2885 if (dp
|| !arm_feature(env
, ARM_FEATURE_VFP_FP16
)) {
2888 /* Otherwise fall through */
2890 /* One source operand. */
2891 gen_mov_F0_vreg(dp
, rm
);
2895 /* Two source operands. */
2896 gen_mov_F0_vreg(dp
, rn
);
2897 gen_mov_F1_vreg(dp
, rm
);
2901 /* Perform the calculation. */
2903 case 0: /* VMLA: fd + (fn * fm) */
2904 /* Note that order of inputs to the add matters for NaNs */
2906 gen_mov_F0_vreg(dp
, rd
);
2909 case 1: /* VMLS: fd + -(fn * fm) */
2912 gen_mov_F0_vreg(dp
, rd
);
2915 case 2: /* VNMLS: -fd + (fn * fm) */
2916 /* Note that it isn't valid to replace (-A + B) with (B - A)
2917 * or similar plausible looking simplifications
2918 * because this will give wrong results for NaNs.
2921 gen_mov_F0_vreg(dp
, rd
);
2925 case 3: /* VNMLA: -fd + -(fn * fm) */
2928 gen_mov_F0_vreg(dp
, rd
);
2932 case 4: /* mul: fn * fm */
2935 case 5: /* nmul: -(fn * fm) */
2939 case 6: /* add: fn + fm */
2942 case 7: /* sub: fn - fm */
2945 case 8: /* div: fn / fm */
2948 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2949 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2950 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2951 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2952 /* These are fused multiply-add, and must be done as one
2953 * floating point operation with no rounding between the
2954 * multiplication and addition steps.
2955 * NB that doing the negations here as separate steps is
2956 * correct : an input NaN should come out with its sign bit
2957 * flipped if it is a negated-input.
2959 if (!arm_feature(env
, ARM_FEATURE_VFP4
)) {
2967 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
2969 frd
= tcg_temp_new_i64();
2970 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
2973 gen_helper_vfp_negd(frd
, frd
);
2975 fpst
= get_fpstatus_ptr(0);
2976 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
2977 cpu_F1d
, frd
, fpst
);
2978 tcg_temp_free_ptr(fpst
);
2979 tcg_temp_free_i64(frd
);
2985 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
2987 frd
= tcg_temp_new_i32();
2988 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
2990 gen_helper_vfp_negs(frd
, frd
);
2992 fpst
= get_fpstatus_ptr(0);
2993 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
2994 cpu_F1s
, frd
, fpst
);
2995 tcg_temp_free_ptr(fpst
);
2996 tcg_temp_free_i32(frd
);
2999 case 14: /* fconst */
3000 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3003 n
= (insn
<< 12) & 0x80000000;
3004 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3011 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3018 tcg_gen_movi_i32(cpu_F0s
, n
);
3021 case 15: /* extension space */
3035 case 4: /* vcvtb.f32.f16 */
3036 tmp
= gen_vfp_mrs();
3037 tcg_gen_ext16u_i32(tmp
, tmp
);
3038 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3039 tcg_temp_free_i32(tmp
);
3041 case 5: /* vcvtt.f32.f16 */
3042 tmp
= gen_vfp_mrs();
3043 tcg_gen_shri_i32(tmp
, tmp
, 16);
3044 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3045 tcg_temp_free_i32(tmp
);
3047 case 6: /* vcvtb.f16.f32 */
3048 tmp
= tcg_temp_new_i32();
3049 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3050 gen_mov_F0_vreg(0, rd
);
3051 tmp2
= gen_vfp_mrs();
3052 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3053 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3054 tcg_temp_free_i32(tmp2
);
3057 case 7: /* vcvtt.f16.f32 */
3058 tmp
= tcg_temp_new_i32();
3059 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3060 tcg_gen_shli_i32(tmp
, tmp
, 16);
3061 gen_mov_F0_vreg(0, rd
);
3062 tmp2
= gen_vfp_mrs();
3063 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3064 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3065 tcg_temp_free_i32(tmp2
);
3077 case 11: /* cmpez */
3081 case 15: /* single<->double conversion */
3083 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3085 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3087 case 16: /* fuito */
3088 gen_vfp_uito(dp
, 0);
3090 case 17: /* fsito */
3091 gen_vfp_sito(dp
, 0);
3093 case 20: /* fshto */
3094 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3096 gen_vfp_shto(dp
, 16 - rm
, 0);
3098 case 21: /* fslto */
3099 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3101 gen_vfp_slto(dp
, 32 - rm
, 0);
3103 case 22: /* fuhto */
3104 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3106 gen_vfp_uhto(dp
, 16 - rm
, 0);
3108 case 23: /* fulto */
3109 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3111 gen_vfp_ulto(dp
, 32 - rm
, 0);
3113 case 24: /* ftoui */
3114 gen_vfp_toui(dp
, 0);
3116 case 25: /* ftouiz */
3117 gen_vfp_touiz(dp
, 0);
3119 case 26: /* ftosi */
3120 gen_vfp_tosi(dp
, 0);
3122 case 27: /* ftosiz */
3123 gen_vfp_tosiz(dp
, 0);
3125 case 28: /* ftosh */
3126 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3128 gen_vfp_tosh(dp
, 16 - rm
, 0);
3130 case 29: /* ftosl */
3131 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3133 gen_vfp_tosl(dp
, 32 - rm
, 0);
3135 case 30: /* ftouh */
3136 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3138 gen_vfp_touh(dp
, 16 - rm
, 0);
3140 case 31: /* ftoul */
3141 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3143 gen_vfp_toul(dp
, 32 - rm
, 0);
3145 default: /* undefined */
3149 default: /* undefined */
3153 /* Write back the result. */
3154 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3155 ; /* Comparison, do nothing. */
3156 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3157 /* VCVT double to int: always integer result. */
3158 gen_mov_vreg_F0(0, rd
);
3159 else if (op
== 15 && rn
== 15)
3161 gen_mov_vreg_F0(!dp
, rd
);
3163 gen_mov_vreg_F0(dp
, rd
);
3165 /* break out of the loop if we have finished */
3169 if (op
== 15 && delta_m
== 0) {
3170 /* single source one-many */
3172 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3174 gen_mov_vreg_F0(dp
, rd
);
3178 /* Setup the next operands. */
3180 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3184 /* One source operand. */
3185 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3187 gen_mov_F0_vreg(dp
, rm
);
3189 /* Two source operands. */
3190 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3192 gen_mov_F0_vreg(dp
, rn
);
3194 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3196 gen_mov_F1_vreg(dp
, rm
);
3204 if ((insn
& 0x03e00000) == 0x00400000) {
3205 /* two-register transfer */
3206 rn
= (insn
>> 16) & 0xf;
3207 rd
= (insn
>> 12) & 0xf;
3209 VFP_DREG_M(rm
, insn
);
3211 rm
= VFP_SREG_M(insn
);
3214 if (insn
& ARM_CP_RW_BIT
) {
3217 gen_mov_F0_vreg(0, rm
* 2);
3218 tmp
= gen_vfp_mrs();
3219 store_reg(s
, rd
, tmp
);
3220 gen_mov_F0_vreg(0, rm
* 2 + 1);
3221 tmp
= gen_vfp_mrs();
3222 store_reg(s
, rn
, tmp
);
3224 gen_mov_F0_vreg(0, rm
);
3225 tmp
= gen_vfp_mrs();
3226 store_reg(s
, rd
, tmp
);
3227 gen_mov_F0_vreg(0, rm
+ 1);
3228 tmp
= gen_vfp_mrs();
3229 store_reg(s
, rn
, tmp
);
3234 tmp
= load_reg(s
, rd
);
3236 gen_mov_vreg_F0(0, rm
* 2);
3237 tmp
= load_reg(s
, rn
);
3239 gen_mov_vreg_F0(0, rm
* 2 + 1);
3241 tmp
= load_reg(s
, rd
);
3243 gen_mov_vreg_F0(0, rm
);
3244 tmp
= load_reg(s
, rn
);
3246 gen_mov_vreg_F0(0, rm
+ 1);
3251 rn
= (insn
>> 16) & 0xf;
3253 VFP_DREG_D(rd
, insn
);
3255 rd
= VFP_SREG_D(insn
);
3256 if ((insn
& 0x01200000) == 0x01000000) {
3257 /* Single load/store */
3258 offset
= (insn
& 0xff) << 2;
3259 if ((insn
& (1 << 23)) == 0)
3261 if (s
->thumb
&& rn
== 15) {
3262 /* This is actually UNPREDICTABLE */
3263 addr
= tcg_temp_new_i32();
3264 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3266 addr
= load_reg(s
, rn
);
3268 tcg_gen_addi_i32(addr
, addr
, offset
);
3269 if (insn
& (1 << 20)) {
3270 gen_vfp_ld(s
, dp
, addr
);
3271 gen_mov_vreg_F0(dp
, rd
);
3273 gen_mov_F0_vreg(dp
, rd
);
3274 gen_vfp_st(s
, dp
, addr
);
3276 tcg_temp_free_i32(addr
);
3278 /* load/store multiple */
3279 int w
= insn
& (1 << 21);
3281 n
= (insn
>> 1) & 0x7f;
3285 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
3286 /* P == U , W == 1 => UNDEF */
3289 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
3290 /* UNPREDICTABLE cases for bad immediates: we choose to
3291 * UNDEF to avoid generating huge numbers of TCG ops
3295 if (rn
== 15 && w
) {
3296 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3300 if (s
->thumb
&& rn
== 15) {
3301 /* This is actually UNPREDICTABLE */
3302 addr
= tcg_temp_new_i32();
3303 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3305 addr
= load_reg(s
, rn
);
3307 if (insn
& (1 << 24)) /* pre-decrement */
3308 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3314 for (i
= 0; i
< n
; i
++) {
3315 if (insn
& ARM_CP_RW_BIT
) {
3317 gen_vfp_ld(s
, dp
, addr
);
3318 gen_mov_vreg_F0(dp
, rd
+ i
);
3321 gen_mov_F0_vreg(dp
, rd
+ i
);
3322 gen_vfp_st(s
, dp
, addr
);
3324 tcg_gen_addi_i32(addr
, addr
, offset
);
3328 if (insn
& (1 << 24))
3329 offset
= -offset
* n
;
3330 else if (dp
&& (insn
& 1))
3336 tcg_gen_addi_i32(addr
, addr
, offset
);
3337 store_reg(s
, rn
, addr
);
3339 tcg_temp_free_i32(addr
);
3345 /* Should never happen. */
3351 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3353 TranslationBlock
*tb
;
3356 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3358 gen_set_pc_im(dest
);
3359 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
3361 gen_set_pc_im(dest
);
3366 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3368 if (unlikely(s
->singlestep_enabled
)) {
3369 /* An indirect jump so that we still trigger the debug exception. */
3374 gen_goto_tb(s
, 0, dest
);
3375 s
->is_jmp
= DISAS_TB_JUMP
;
3379 static inline void gen_mulxy(TCGv_i32 t0
, TCGv_i32 t1
, int x
, int y
)
3382 tcg_gen_sari_i32(t0
, t0
, 16);
3386 tcg_gen_sari_i32(t1
, t1
, 16);
3389 tcg_gen_mul_i32(t0
, t0
, t1
);
3392 /* Return the mask of PSR bits set by a MSR instruction. */
3393 static uint32_t msr_mask(CPUARMState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3397 if (flags
& (1 << 0))
3399 if (flags
& (1 << 1))
3401 if (flags
& (1 << 2))
3403 if (flags
& (1 << 3))
3406 /* Mask out undefined bits. */
3407 mask
&= ~CPSR_RESERVED
;
3408 if (!arm_feature(env
, ARM_FEATURE_V4T
))
3410 if (!arm_feature(env
, ARM_FEATURE_V5
))
3411 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3412 if (!arm_feature(env
, ARM_FEATURE_V6
))
3413 mask
&= ~(CPSR_E
| CPSR_GE
);
3414 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3416 /* Mask out execution state bits. */
3419 /* Mask out privileged bits. */
3425 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3426 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv_i32 t0
)
3430 /* ??? This is also undefined in system mode. */
3434 tmp
= load_cpu_field(spsr
);
3435 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3436 tcg_gen_andi_i32(t0
, t0
, mask
);
3437 tcg_gen_or_i32(tmp
, tmp
, t0
);
3438 store_cpu_field(tmp
, spsr
);
3440 gen_set_cpsr(t0
, mask
);
3442 tcg_temp_free_i32(t0
);
3447 /* Returns nonzero if access to the PSR is not permitted. */
3448 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3451 tmp
= tcg_temp_new_i32();
3452 tcg_gen_movi_i32(tmp
, val
);
3453 return gen_set_psr(s
, mask
, spsr
, tmp
);
3456 /* Generate an old-style exception return. Marks pc as dead. */
3457 static void gen_exception_return(DisasContext
*s
, TCGv_i32 pc
)
3460 store_reg(s
, 15, pc
);
3461 tmp
= load_cpu_field(spsr
);
3462 gen_set_cpsr(tmp
, 0xffffffff);
3463 tcg_temp_free_i32(tmp
);
3464 s
->is_jmp
= DISAS_UPDATE
;
3467 /* Generate a v6 exception return. Marks both values as dead. */
3468 static void gen_rfe(DisasContext
*s
, TCGv_i32 pc
, TCGv_i32 cpsr
)
3470 gen_set_cpsr(cpsr
, 0xffffffff);
3471 tcg_temp_free_i32(cpsr
);
3472 store_reg(s
, 15, pc
);
3473 s
->is_jmp
= DISAS_UPDATE
;
3477 gen_set_condexec (DisasContext
*s
)
3479 if (s
->condexec_mask
) {
3480 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3481 TCGv_i32 tmp
= tcg_temp_new_i32();
3482 tcg_gen_movi_i32(tmp
, val
);
3483 store_cpu_field(tmp
, condexec_bits
);
3487 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3489 gen_set_condexec(s
);
3490 gen_set_pc_im(s
->pc
- offset
);
3491 gen_exception(excp
);
3492 s
->is_jmp
= DISAS_JUMP
;
3495 static void gen_nop_hint(DisasContext
*s
, int val
)
3499 gen_set_pc_im(s
->pc
);
3500 s
->is_jmp
= DISAS_WFI
;
3505 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
3511 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3513 static inline void gen_neon_add(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
3516 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3517 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3518 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3523 static inline void gen_neon_rsb(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
3526 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3527 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3528 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3533 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3534 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3535 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3536 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3537 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3539 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3540 switch ((size << 1) | u) { \
3542 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3545 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3548 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3551 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3554 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3557 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3559 default: return 1; \
3562 #define GEN_NEON_INTEGER_OP(name) do { \
3563 switch ((size << 1) | u) { \
3565 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3568 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3571 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3574 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3577 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3580 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3582 default: return 1; \
3585 static TCGv_i32
neon_load_scratch(int scratch
)
3587 TCGv_i32 tmp
= tcg_temp_new_i32();
3588 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3592 static void neon_store_scratch(int scratch
, TCGv_i32 var
)
3594 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3595 tcg_temp_free_i32(var
);
3598 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
3602 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3604 gen_neon_dup_high16(tmp
);
3606 gen_neon_dup_low16(tmp
);
3609 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3614 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3617 if (!q
&& size
== 2) {
3620 tmp
= tcg_const_i32(rd
);
3621 tmp2
= tcg_const_i32(rm
);
3625 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
3628 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
3631 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
3639 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
3642 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
3648 tcg_temp_free_i32(tmp
);
3649 tcg_temp_free_i32(tmp2
);
3653 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3656 if (!q
&& size
== 2) {
3659 tmp
= tcg_const_i32(rd
);
3660 tmp2
= tcg_const_i32(rm
);
3664 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
3667 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
3670 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
3678 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
3681 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
3687 tcg_temp_free_i32(tmp
);
3688 tcg_temp_free_i32(tmp2
);
3692 static void gen_neon_trn_u8(TCGv_i32 t0
, TCGv_i32 t1
)
3696 rd
= tcg_temp_new_i32();
3697 tmp
= tcg_temp_new_i32();
3699 tcg_gen_shli_i32(rd
, t0
, 8);
3700 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3701 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3702 tcg_gen_or_i32(rd
, rd
, tmp
);
3704 tcg_gen_shri_i32(t1
, t1
, 8);
3705 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3706 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3707 tcg_gen_or_i32(t1
, t1
, tmp
);
3708 tcg_gen_mov_i32(t0
, rd
);
3710 tcg_temp_free_i32(tmp
);
3711 tcg_temp_free_i32(rd
);
3714 static void gen_neon_trn_u16(TCGv_i32 t0
, TCGv_i32 t1
)
3718 rd
= tcg_temp_new_i32();
3719 tmp
= tcg_temp_new_i32();
3721 tcg_gen_shli_i32(rd
, t0
, 16);
3722 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3723 tcg_gen_or_i32(rd
, rd
, tmp
);
3724 tcg_gen_shri_i32(t1
, t1
, 16);
3725 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3726 tcg_gen_or_i32(t1
, t1
, tmp
);
3727 tcg_gen_mov_i32(t0
, rd
);
3729 tcg_temp_free_i32(tmp
);
3730 tcg_temp_free_i32(rd
);
3738 } neon_ls_element_type
[11] = {
3752 /* Translate a NEON load/store element instruction. Return nonzero if the
3753 instruction is invalid. */
3754 static int disas_neon_ls_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
3773 if (!s
->vfp_enabled
)
3775 VFP_DREG_D(rd
, insn
);
3776 rn
= (insn
>> 16) & 0xf;
3778 load
= (insn
& (1 << 21)) != 0;
3779 if ((insn
& (1 << 23)) == 0) {
3780 /* Load store all elements. */
3781 op
= (insn
>> 8) & 0xf;
3782 size
= (insn
>> 6) & 3;
3785 /* Catch UNDEF cases for bad values of align field */
3788 if (((insn
>> 5) & 1) == 1) {
3793 if (((insn
>> 4) & 3) == 3) {
3800 nregs
= neon_ls_element_type
[op
].nregs
;
3801 interleave
= neon_ls_element_type
[op
].interleave
;
3802 spacing
= neon_ls_element_type
[op
].spacing
;
3803 if (size
== 3 && (interleave
| spacing
) != 1)
3805 addr
= tcg_temp_new_i32();
3806 load_reg_var(s
, addr
, rn
);
3807 stride
= (1 << size
) * interleave
;
3808 for (reg
= 0; reg
< nregs
; reg
++) {
3809 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3810 load_reg_var(s
, addr
, rn
);
3811 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3812 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3813 load_reg_var(s
, addr
, rn
);
3814 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3817 tmp64
= tcg_temp_new_i64();
3819 tcg_gen_qemu_ld64(tmp64
, addr
, IS_USER(s
));
3820 neon_store_reg64(tmp64
, rd
);
3822 neon_load_reg64(tmp64
, rd
);
3823 tcg_gen_qemu_st64(tmp64
, addr
, IS_USER(s
));
3825 tcg_temp_free_i64(tmp64
);
3826 tcg_gen_addi_i32(addr
, addr
, stride
);
3828 for (pass
= 0; pass
< 2; pass
++) {
3831 tmp
= tcg_temp_new_i32();
3832 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
3833 neon_store_reg(rd
, pass
, tmp
);
3835 tmp
= neon_load_reg(rd
, pass
);
3836 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
3837 tcg_temp_free_i32(tmp
);
3839 tcg_gen_addi_i32(addr
, addr
, stride
);
3840 } else if (size
== 1) {
3842 tmp
= tcg_temp_new_i32();
3843 tcg_gen_qemu_ld16u(tmp
, addr
, IS_USER(s
));
3844 tcg_gen_addi_i32(addr
, addr
, stride
);
3845 tmp2
= tcg_temp_new_i32();
3846 tcg_gen_qemu_ld16u(tmp2
, addr
, IS_USER(s
));
3847 tcg_gen_addi_i32(addr
, addr
, stride
);
3848 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3849 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3850 tcg_temp_free_i32(tmp2
);
3851 neon_store_reg(rd
, pass
, tmp
);
3853 tmp
= neon_load_reg(rd
, pass
);
3854 tmp2
= tcg_temp_new_i32();
3855 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3856 tcg_gen_qemu_st16(tmp
, addr
, IS_USER(s
));
3857 tcg_temp_free_i32(tmp
);
3858 tcg_gen_addi_i32(addr
, addr
, stride
);
3859 tcg_gen_qemu_st16(tmp2
, addr
, IS_USER(s
));
3860 tcg_temp_free_i32(tmp2
);
3861 tcg_gen_addi_i32(addr
, addr
, stride
);
3863 } else /* size == 0 */ {
3865 TCGV_UNUSED_I32(tmp2
);
3866 for (n
= 0; n
< 4; n
++) {
3867 tmp
= tcg_temp_new_i32();
3868 tcg_gen_qemu_ld8u(tmp
, addr
, IS_USER(s
));
3869 tcg_gen_addi_i32(addr
, addr
, stride
);
3873 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
3874 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3875 tcg_temp_free_i32(tmp
);
3878 neon_store_reg(rd
, pass
, tmp2
);
3880 tmp2
= neon_load_reg(rd
, pass
);
3881 for (n
= 0; n
< 4; n
++) {
3882 tmp
= tcg_temp_new_i32();
3884 tcg_gen_mov_i32(tmp
, tmp2
);
3886 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3888 tcg_gen_qemu_st8(tmp
, addr
, IS_USER(s
));
3889 tcg_temp_free_i32(tmp
);
3890 tcg_gen_addi_i32(addr
, addr
, stride
);
3892 tcg_temp_free_i32(tmp2
);
3899 tcg_temp_free_i32(addr
);
3902 size
= (insn
>> 10) & 3;
3904 /* Load single element to all lanes. */
3905 int a
= (insn
>> 4) & 1;
3909 size
= (insn
>> 6) & 3;
3910 nregs
= ((insn
>> 8) & 3) + 1;
3913 if (nregs
!= 4 || a
== 0) {
3916 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3919 if (nregs
== 1 && a
== 1 && size
== 0) {
3922 if (nregs
== 3 && a
== 1) {
3925 addr
= tcg_temp_new_i32();
3926 load_reg_var(s
, addr
, rn
);
3928 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3929 tmp
= gen_load_and_replicate(s
, addr
, size
);
3930 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3931 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3932 if (insn
& (1 << 5)) {
3933 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
3934 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
3936 tcg_temp_free_i32(tmp
);
3938 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3939 stride
= (insn
& (1 << 5)) ? 2 : 1;
3940 for (reg
= 0; reg
< nregs
; reg
++) {
3941 tmp
= gen_load_and_replicate(s
, addr
, size
);
3942 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3943 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3944 tcg_temp_free_i32(tmp
);
3945 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3949 tcg_temp_free_i32(addr
);
3950 stride
= (1 << size
) * nregs
;
3952 /* Single element. */
3953 int idx
= (insn
>> 4) & 0xf;
3954 pass
= (insn
>> 7) & 1;
3957 shift
= ((insn
>> 5) & 3) * 8;
3961 shift
= ((insn
>> 6) & 1) * 16;
3962 stride
= (insn
& (1 << 5)) ? 2 : 1;
3966 stride
= (insn
& (1 << 6)) ? 2 : 1;
3971 nregs
= ((insn
>> 8) & 3) + 1;
3972 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3975 if (((idx
& (1 << size
)) != 0) ||
3976 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
3981 if ((idx
& 1) != 0) {
3986 if (size
== 2 && (idx
& 2) != 0) {
3991 if ((size
== 2) && ((idx
& 3) == 3)) {
3998 if ((rd
+ stride
* (nregs
- 1)) > 31) {
3999 /* Attempts to write off the end of the register file
4000 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4001 * the neon_load_reg() would write off the end of the array.
4005 addr
= tcg_temp_new_i32();
4006 load_reg_var(s
, addr
, rn
);
4007 for (reg
= 0; reg
< nregs
; reg
++) {
4009 tmp
= tcg_temp_new_i32();
4012 tcg_gen_qemu_ld8u(tmp
, addr
, IS_USER(s
));
4015 tcg_gen_qemu_ld16u(tmp
, addr
, IS_USER(s
));
4018 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
4020 default: /* Avoid compiler warnings. */
4024 tmp2
= neon_load_reg(rd
, pass
);
4025 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
,
4026 shift
, size
? 16 : 8);
4027 tcg_temp_free_i32(tmp2
);
4029 neon_store_reg(rd
, pass
, tmp
);
4030 } else { /* Store */
4031 tmp
= neon_load_reg(rd
, pass
);
4033 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4036 tcg_gen_qemu_st8(tmp
, addr
, IS_USER(s
));
4039 tcg_gen_qemu_st16(tmp
, addr
, IS_USER(s
));
4042 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
4045 tcg_temp_free_i32(tmp
);
4048 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4050 tcg_temp_free_i32(addr
);
4051 stride
= nregs
* (1 << size
);
4057 base
= load_reg(s
, rn
);
4059 tcg_gen_addi_i32(base
, base
, stride
);
4062 index
= load_reg(s
, rm
);
4063 tcg_gen_add_i32(base
, base
, index
);
4064 tcg_temp_free_i32(index
);
4066 store_reg(s
, rn
, base
);
4071 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4072 static void gen_neon_bsl(TCGv_i32 dest
, TCGv_i32 t
, TCGv_i32 f
, TCGv_i32 c
)
4074 tcg_gen_and_i32(t
, t
, c
);
4075 tcg_gen_andc_i32(f
, f
, c
);
4076 tcg_gen_or_i32(dest
, t
, f
);
4079 static inline void gen_neon_narrow(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4082 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4083 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4084 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4089 static inline void gen_neon_narrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4092 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4093 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4094 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4099 static inline void gen_neon_narrow_satu(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4102 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4103 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4104 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4109 static inline void gen_neon_unarrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4112 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4113 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4114 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4119 static inline void gen_neon_shift_narrow(int size
, TCGv_i32 var
, TCGv_i32 shift
,
4125 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4126 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4131 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4132 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4139 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4140 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4145 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4146 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4153 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv_i32 src
, int size
, int u
)
4157 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4158 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4159 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4164 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4165 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4166 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4170 tcg_temp_free_i32(src
);
4173 static inline void gen_neon_addl(int size
)
4176 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4177 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4178 case 2: tcg_gen_add_i64(CPU_V001
); break;
4183 static inline void gen_neon_subl(int size
)
4186 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4187 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4188 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4193 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4196 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4197 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4199 tcg_gen_neg_i64(var
, var
);
4205 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4208 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4209 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4214 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv_i32 a
, TCGv_i32 b
,
4219 switch ((size
<< 1) | u
) {
4220 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4221 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4222 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4223 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4225 tmp
= gen_muls_i64_i32(a
, b
);
4226 tcg_gen_mov_i64(dest
, tmp
);
4227 tcg_temp_free_i64(tmp
);
4230 tmp
= gen_mulu_i64_i32(a
, b
);
4231 tcg_gen_mov_i64(dest
, tmp
);
4232 tcg_temp_free_i64(tmp
);
4237 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4238 Don't forget to clean them now. */
4240 tcg_temp_free_i32(a
);
4241 tcg_temp_free_i32(b
);
4245 static void gen_neon_narrow_op(int op
, int u
, int size
,
4246 TCGv_i32 dest
, TCGv_i64 src
)
4250 gen_neon_unarrow_sats(size
, dest
, src
);
4252 gen_neon_narrow(size
, dest
, src
);
4256 gen_neon_narrow_satu(size
, dest
, src
);
4258 gen_neon_narrow_sats(size
, dest
, src
);
4263 /* Symbolic constants for op fields for Neon 3-register same-length.
4264 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4267 #define NEON_3R_VHADD 0
4268 #define NEON_3R_VQADD 1
4269 #define NEON_3R_VRHADD 2
4270 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4271 #define NEON_3R_VHSUB 4
4272 #define NEON_3R_VQSUB 5
4273 #define NEON_3R_VCGT 6
4274 #define NEON_3R_VCGE 7
4275 #define NEON_3R_VSHL 8
4276 #define NEON_3R_VQSHL 9
4277 #define NEON_3R_VRSHL 10
4278 #define NEON_3R_VQRSHL 11
4279 #define NEON_3R_VMAX 12
4280 #define NEON_3R_VMIN 13
4281 #define NEON_3R_VABD 14
4282 #define NEON_3R_VABA 15
4283 #define NEON_3R_VADD_VSUB 16
4284 #define NEON_3R_VTST_VCEQ 17
4285 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4286 #define NEON_3R_VMUL 19
4287 #define NEON_3R_VPMAX 20
4288 #define NEON_3R_VPMIN 21
4289 #define NEON_3R_VQDMULH_VQRDMULH 22
4290 #define NEON_3R_VPADD 23
4291 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4292 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4293 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4294 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4295 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4296 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4297 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4299 static const uint8_t neon_3r_sizes
[] = {
4300 [NEON_3R_VHADD
] = 0x7,
4301 [NEON_3R_VQADD
] = 0xf,
4302 [NEON_3R_VRHADD
] = 0x7,
4303 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4304 [NEON_3R_VHSUB
] = 0x7,
4305 [NEON_3R_VQSUB
] = 0xf,
4306 [NEON_3R_VCGT
] = 0x7,
4307 [NEON_3R_VCGE
] = 0x7,
4308 [NEON_3R_VSHL
] = 0xf,
4309 [NEON_3R_VQSHL
] = 0xf,
4310 [NEON_3R_VRSHL
] = 0xf,
4311 [NEON_3R_VQRSHL
] = 0xf,
4312 [NEON_3R_VMAX
] = 0x7,
4313 [NEON_3R_VMIN
] = 0x7,
4314 [NEON_3R_VABD
] = 0x7,
4315 [NEON_3R_VABA
] = 0x7,
4316 [NEON_3R_VADD_VSUB
] = 0xf,
4317 [NEON_3R_VTST_VCEQ
] = 0x7,
4318 [NEON_3R_VML
] = 0x7,
4319 [NEON_3R_VMUL
] = 0x7,
4320 [NEON_3R_VPMAX
] = 0x7,
4321 [NEON_3R_VPMIN
] = 0x7,
4322 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4323 [NEON_3R_VPADD
] = 0x7,
4324 [NEON_3R_VFM
] = 0x5, /* size bit 1 encodes op */
4325 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4326 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4327 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4328 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4329 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4330 [NEON_3R_VRECPS_VRSQRTS
] = 0x5, /* size bit 1 encodes op */
4333 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4334 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4337 #define NEON_2RM_VREV64 0
4338 #define NEON_2RM_VREV32 1
4339 #define NEON_2RM_VREV16 2
4340 #define NEON_2RM_VPADDL 4
4341 #define NEON_2RM_VPADDL_U 5
4342 #define NEON_2RM_VCLS 8
4343 #define NEON_2RM_VCLZ 9
4344 #define NEON_2RM_VCNT 10
4345 #define NEON_2RM_VMVN 11
4346 #define NEON_2RM_VPADAL 12
4347 #define NEON_2RM_VPADAL_U 13
4348 #define NEON_2RM_VQABS 14
4349 #define NEON_2RM_VQNEG 15
4350 #define NEON_2RM_VCGT0 16
4351 #define NEON_2RM_VCGE0 17
4352 #define NEON_2RM_VCEQ0 18
4353 #define NEON_2RM_VCLE0 19
4354 #define NEON_2RM_VCLT0 20
4355 #define NEON_2RM_VABS 22
4356 #define NEON_2RM_VNEG 23
4357 #define NEON_2RM_VCGT0_F 24
4358 #define NEON_2RM_VCGE0_F 25
4359 #define NEON_2RM_VCEQ0_F 26
4360 #define NEON_2RM_VCLE0_F 27
4361 #define NEON_2RM_VCLT0_F 28
4362 #define NEON_2RM_VABS_F 30
4363 #define NEON_2RM_VNEG_F 31
4364 #define NEON_2RM_VSWP 32
4365 #define NEON_2RM_VTRN 33
4366 #define NEON_2RM_VUZP 34
4367 #define NEON_2RM_VZIP 35
4368 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4369 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4370 #define NEON_2RM_VSHLL 38
4371 #define NEON_2RM_VCVT_F16_F32 44
4372 #define NEON_2RM_VCVT_F32_F16 46
4373 #define NEON_2RM_VRECPE 56
4374 #define NEON_2RM_VRSQRTE 57
4375 #define NEON_2RM_VRECPE_F 58
4376 #define NEON_2RM_VRSQRTE_F 59
4377 #define NEON_2RM_VCVT_FS 60
4378 #define NEON_2RM_VCVT_FU 61
4379 #define NEON_2RM_VCVT_SF 62
4380 #define NEON_2RM_VCVT_UF 63
4382 static int neon_2rm_is_float_op(int op
)
4384 /* Return true if this neon 2reg-misc op is float-to-float */
4385 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
4386 op
>= NEON_2RM_VRECPE_F
);
4389 /* Each entry in this array has bit n set if the insn allows
4390 * size value n (otherwise it will UNDEF). Since unallocated
4391 * op values will have no bits set they always UNDEF.
4393 static const uint8_t neon_2rm_sizes
[] = {
4394 [NEON_2RM_VREV64
] = 0x7,
4395 [NEON_2RM_VREV32
] = 0x3,
4396 [NEON_2RM_VREV16
] = 0x1,
4397 [NEON_2RM_VPADDL
] = 0x7,
4398 [NEON_2RM_VPADDL_U
] = 0x7,
4399 [NEON_2RM_VCLS
] = 0x7,
4400 [NEON_2RM_VCLZ
] = 0x7,
4401 [NEON_2RM_VCNT
] = 0x1,
4402 [NEON_2RM_VMVN
] = 0x1,
4403 [NEON_2RM_VPADAL
] = 0x7,
4404 [NEON_2RM_VPADAL_U
] = 0x7,
4405 [NEON_2RM_VQABS
] = 0x7,
4406 [NEON_2RM_VQNEG
] = 0x7,
4407 [NEON_2RM_VCGT0
] = 0x7,
4408 [NEON_2RM_VCGE0
] = 0x7,
4409 [NEON_2RM_VCEQ0
] = 0x7,
4410 [NEON_2RM_VCLE0
] = 0x7,
4411 [NEON_2RM_VCLT0
] = 0x7,
4412 [NEON_2RM_VABS
] = 0x7,
4413 [NEON_2RM_VNEG
] = 0x7,
4414 [NEON_2RM_VCGT0_F
] = 0x4,
4415 [NEON_2RM_VCGE0_F
] = 0x4,
4416 [NEON_2RM_VCEQ0_F
] = 0x4,
4417 [NEON_2RM_VCLE0_F
] = 0x4,
4418 [NEON_2RM_VCLT0_F
] = 0x4,
4419 [NEON_2RM_VABS_F
] = 0x4,
4420 [NEON_2RM_VNEG_F
] = 0x4,
4421 [NEON_2RM_VSWP
] = 0x1,
4422 [NEON_2RM_VTRN
] = 0x7,
4423 [NEON_2RM_VUZP
] = 0x7,
4424 [NEON_2RM_VZIP
] = 0x7,
4425 [NEON_2RM_VMOVN
] = 0x7,
4426 [NEON_2RM_VQMOVN
] = 0x7,
4427 [NEON_2RM_VSHLL
] = 0x7,
4428 [NEON_2RM_VCVT_F16_F32
] = 0x2,
4429 [NEON_2RM_VCVT_F32_F16
] = 0x2,
4430 [NEON_2RM_VRECPE
] = 0x4,
4431 [NEON_2RM_VRSQRTE
] = 0x4,
4432 [NEON_2RM_VRECPE_F
] = 0x4,
4433 [NEON_2RM_VRSQRTE_F
] = 0x4,
4434 [NEON_2RM_VCVT_FS
] = 0x4,
4435 [NEON_2RM_VCVT_FU
] = 0x4,
4436 [NEON_2RM_VCVT_SF
] = 0x4,
4437 [NEON_2RM_VCVT_UF
] = 0x4,
4440 /* Translate a NEON data processing instruction. Return nonzero if the
4441 instruction is invalid.
4442 We process data in a mixture of 32-bit and 64-bit chunks.
4443 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4445 static int disas_neon_data_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
4457 TCGv_i32 tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4460 if (!s
->vfp_enabled
)
4462 q
= (insn
& (1 << 6)) != 0;
4463 u
= (insn
>> 24) & 1;
4464 VFP_DREG_D(rd
, insn
);
4465 VFP_DREG_N(rn
, insn
);
4466 VFP_DREG_M(rm
, insn
);
4467 size
= (insn
>> 20) & 3;
4468 if ((insn
& (1 << 23)) == 0) {
4469 /* Three register same length. */
4470 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4471 /* Catch invalid op and bad size combinations: UNDEF */
4472 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4475 /* All insns of this form UNDEF for either this condition or the
4476 * superset of cases "Q==1"; we catch the latter later.
4478 if (q
&& ((rd
| rn
| rm
) & 1)) {
4481 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
4482 /* 64-bit element instructions. */
4483 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4484 neon_load_reg64(cpu_V0
, rn
+ pass
);
4485 neon_load_reg64(cpu_V1
, rm
+ pass
);
4489 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
4492 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
4498 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
4501 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
4507 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4509 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4514 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4517 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4523 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4525 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4528 case NEON_3R_VQRSHL
:
4530 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4533 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4537 case NEON_3R_VADD_VSUB
:
4539 tcg_gen_sub_i64(CPU_V001
);
4541 tcg_gen_add_i64(CPU_V001
);
4547 neon_store_reg64(cpu_V0
, rd
+ pass
);
4556 case NEON_3R_VQRSHL
:
4559 /* Shift instruction operands are reversed. */
4574 case NEON_3R_FLOAT_ARITH
:
4575 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
4577 case NEON_3R_FLOAT_MINMAX
:
4578 pairwise
= u
; /* if VPMIN/VPMAX (float) */
4580 case NEON_3R_FLOAT_CMP
:
4582 /* no encoding for U=0 C=1x */
4586 case NEON_3R_FLOAT_ACMP
:
4591 case NEON_3R_VRECPS_VRSQRTS
:
4597 if (u
&& (size
!= 0)) {
4598 /* UNDEF on invalid size for polynomial subcase */
4603 if (!arm_feature(env
, ARM_FEATURE_VFP4
) || u
) {
4611 if (pairwise
&& q
) {
4612 /* All the pairwise insns UNDEF if Q is set */
4616 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4621 tmp
= neon_load_reg(rn
, 0);
4622 tmp2
= neon_load_reg(rn
, 1);
4624 tmp
= neon_load_reg(rm
, 0);
4625 tmp2
= neon_load_reg(rm
, 1);
4629 tmp
= neon_load_reg(rn
, pass
);
4630 tmp2
= neon_load_reg(rm
, pass
);
4634 GEN_NEON_INTEGER_OP(hadd
);
4637 GEN_NEON_INTEGER_OP_ENV(qadd
);
4639 case NEON_3R_VRHADD
:
4640 GEN_NEON_INTEGER_OP(rhadd
);
4642 case NEON_3R_LOGIC
: /* Logic ops. */
4643 switch ((u
<< 2) | size
) {
4645 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4648 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4651 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4654 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4657 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4660 tmp3
= neon_load_reg(rd
, pass
);
4661 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4662 tcg_temp_free_i32(tmp3
);
4665 tmp3
= neon_load_reg(rd
, pass
);
4666 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4667 tcg_temp_free_i32(tmp3
);
4670 tmp3
= neon_load_reg(rd
, pass
);
4671 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4672 tcg_temp_free_i32(tmp3
);
4677 GEN_NEON_INTEGER_OP(hsub
);
4680 GEN_NEON_INTEGER_OP_ENV(qsub
);
4683 GEN_NEON_INTEGER_OP(cgt
);
4686 GEN_NEON_INTEGER_OP(cge
);
4689 GEN_NEON_INTEGER_OP(shl
);
4692 GEN_NEON_INTEGER_OP_ENV(qshl
);
4695 GEN_NEON_INTEGER_OP(rshl
);
4697 case NEON_3R_VQRSHL
:
4698 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4701 GEN_NEON_INTEGER_OP(max
);
4704 GEN_NEON_INTEGER_OP(min
);
4707 GEN_NEON_INTEGER_OP(abd
);
4710 GEN_NEON_INTEGER_OP(abd
);
4711 tcg_temp_free_i32(tmp2
);
4712 tmp2
= neon_load_reg(rd
, pass
);
4713 gen_neon_add(size
, tmp
, tmp2
);
4715 case NEON_3R_VADD_VSUB
:
4716 if (!u
) { /* VADD */
4717 gen_neon_add(size
, tmp
, tmp2
);
4720 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4721 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4722 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4727 case NEON_3R_VTST_VCEQ
:
4728 if (!u
) { /* VTST */
4730 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4731 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4732 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4737 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4738 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4739 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4744 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
4746 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4747 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4748 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4751 tcg_temp_free_i32(tmp2
);
4752 tmp2
= neon_load_reg(rd
, pass
);
4754 gen_neon_rsb(size
, tmp
, tmp2
);
4756 gen_neon_add(size
, tmp
, tmp2
);
4760 if (u
) { /* polynomial */
4761 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4762 } else { /* Integer */
4764 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4765 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4766 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4772 GEN_NEON_INTEGER_OP(pmax
);
4775 GEN_NEON_INTEGER_OP(pmin
);
4777 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
4778 if (!u
) { /* VQDMULH */
4781 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4784 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4788 } else { /* VQRDMULH */
4791 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4794 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4802 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4803 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4804 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4808 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
4810 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4811 switch ((u
<< 2) | size
) {
4814 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4817 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
4820 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
4825 tcg_temp_free_ptr(fpstatus
);
4828 case NEON_3R_FLOAT_MULTIPLY
:
4830 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4831 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
4833 tcg_temp_free_i32(tmp2
);
4834 tmp2
= neon_load_reg(rd
, pass
);
4836 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4838 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
4841 tcg_temp_free_ptr(fpstatus
);
4844 case NEON_3R_FLOAT_CMP
:
4846 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4848 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
4851 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4853 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4856 tcg_temp_free_ptr(fpstatus
);
4859 case NEON_3R_FLOAT_ACMP
:
4861 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4863 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4865 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4867 tcg_temp_free_ptr(fpstatus
);
4870 case NEON_3R_FLOAT_MINMAX
:
4872 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4874 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
, fpstatus
);
4876 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
, fpstatus
);
4878 tcg_temp_free_ptr(fpstatus
);
4881 case NEON_3R_VRECPS_VRSQRTS
:
4883 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4885 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4889 /* VFMA, VFMS: fused multiply-add */
4890 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4891 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
4894 gen_helper_vfp_negs(tmp
, tmp
);
4896 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
4897 tcg_temp_free_i32(tmp3
);
4898 tcg_temp_free_ptr(fpstatus
);
4904 tcg_temp_free_i32(tmp2
);
4906 /* Save the result. For elementwise operations we can put it
4907 straight into the destination register. For pairwise operations
4908 we have to be careful to avoid clobbering the source operands. */
4909 if (pairwise
&& rd
== rm
) {
4910 neon_store_scratch(pass
, tmp
);
4912 neon_store_reg(rd
, pass
, tmp
);
4916 if (pairwise
&& rd
== rm
) {
4917 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4918 tmp
= neon_load_scratch(pass
);
4919 neon_store_reg(rd
, pass
, tmp
);
4922 /* End of 3 register same size operations. */
4923 } else if (insn
& (1 << 4)) {
4924 if ((insn
& 0x00380080) != 0) {
4925 /* Two registers and shift. */
4926 op
= (insn
>> 8) & 0xf;
4927 if (insn
& (1 << 7)) {
4935 while ((insn
& (1 << (size
+ 19))) == 0)
4938 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4939 /* To avoid excessive duplication of ops we implement shift
4940 by immediate using the variable shift operations. */
4942 /* Shift by immediate:
4943 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4944 if (q
&& ((rd
| rm
) & 1)) {
4947 if (!u
&& (op
== 4 || op
== 6)) {
4950 /* Right shifts are encoded as N - shift, where N is the
4951 element size in bits. */
4953 shift
= shift
- (1 << (size
+ 3));
4961 imm
= (uint8_t) shift
;
4966 imm
= (uint16_t) shift
;
4977 for (pass
= 0; pass
< count
; pass
++) {
4979 neon_load_reg64(cpu_V0
, rm
+ pass
);
4980 tcg_gen_movi_i64(cpu_V1
, imm
);
4985 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4987 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4992 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4994 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4997 case 5: /* VSHL, VSLI */
4998 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5000 case 6: /* VQSHLU */
5001 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
5006 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5009 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5014 if (op
== 1 || op
== 3) {
5016 neon_load_reg64(cpu_V1
, rd
+ pass
);
5017 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5018 } else if (op
== 4 || (op
== 5 && u
)) {
5020 neon_load_reg64(cpu_V1
, rd
+ pass
);
5022 if (shift
< -63 || shift
> 63) {
5026 mask
= 0xffffffffffffffffull
>> -shift
;
5028 mask
= 0xffffffffffffffffull
<< shift
;
5031 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
5032 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5034 neon_store_reg64(cpu_V0
, rd
+ pass
);
5035 } else { /* size < 3 */
5036 /* Operands in T0 and T1. */
5037 tmp
= neon_load_reg(rm
, pass
);
5038 tmp2
= tcg_temp_new_i32();
5039 tcg_gen_movi_i32(tmp2
, imm
);
5043 GEN_NEON_INTEGER_OP(shl
);
5047 GEN_NEON_INTEGER_OP(rshl
);
5050 case 5: /* VSHL, VSLI */
5052 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
5053 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
5054 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
5058 case 6: /* VQSHLU */
5061 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5065 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5069 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5077 GEN_NEON_INTEGER_OP_ENV(qshl
);
5080 tcg_temp_free_i32(tmp2
);
5082 if (op
== 1 || op
== 3) {
5084 tmp2
= neon_load_reg(rd
, pass
);
5085 gen_neon_add(size
, tmp
, tmp2
);
5086 tcg_temp_free_i32(tmp2
);
5087 } else if (op
== 4 || (op
== 5 && u
)) {
5092 mask
= 0xff >> -shift
;
5094 mask
= (uint8_t)(0xff << shift
);
5100 mask
= 0xffff >> -shift
;
5102 mask
= (uint16_t)(0xffff << shift
);
5106 if (shift
< -31 || shift
> 31) {
5110 mask
= 0xffffffffu
>> -shift
;
5112 mask
= 0xffffffffu
<< shift
;
5118 tmp2
= neon_load_reg(rd
, pass
);
5119 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5120 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5121 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5122 tcg_temp_free_i32(tmp2
);
5124 neon_store_reg(rd
, pass
, tmp
);
5127 } else if (op
< 10) {
5128 /* Shift by immediate and narrow:
5129 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5130 int input_unsigned
= (op
== 8) ? !u
: u
;
5134 shift
= shift
- (1 << (size
+ 3));
5137 tmp64
= tcg_const_i64(shift
);
5138 neon_load_reg64(cpu_V0
, rm
);
5139 neon_load_reg64(cpu_V1
, rm
+ 1);
5140 for (pass
= 0; pass
< 2; pass
++) {
5148 if (input_unsigned
) {
5149 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5151 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5154 if (input_unsigned
) {
5155 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5157 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5160 tmp
= tcg_temp_new_i32();
5161 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5162 neon_store_reg(rd
, pass
, tmp
);
5164 tcg_temp_free_i64(tmp64
);
5167 imm
= (uint16_t)shift
;
5171 imm
= (uint32_t)shift
;
5173 tmp2
= tcg_const_i32(imm
);
5174 tmp4
= neon_load_reg(rm
+ 1, 0);
5175 tmp5
= neon_load_reg(rm
+ 1, 1);
5176 for (pass
= 0; pass
< 2; pass
++) {
5178 tmp
= neon_load_reg(rm
, 0);
5182 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5185 tmp3
= neon_load_reg(rm
, 1);
5189 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5191 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5192 tcg_temp_free_i32(tmp
);
5193 tcg_temp_free_i32(tmp3
);
5194 tmp
= tcg_temp_new_i32();
5195 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5196 neon_store_reg(rd
, pass
, tmp
);
5198 tcg_temp_free_i32(tmp2
);
5200 } else if (op
== 10) {
5202 if (q
|| (rd
& 1)) {
5205 tmp
= neon_load_reg(rm
, 0);
5206 tmp2
= neon_load_reg(rm
, 1);
5207 for (pass
= 0; pass
< 2; pass
++) {
5211 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5214 /* The shift is less than the width of the source
5215 type, so we can just shift the whole register. */
5216 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5217 /* Widen the result of shift: we need to clear
5218 * the potential overflow bits resulting from
5219 * left bits of the narrow input appearing as
5220 * right bits of left the neighbour narrow
5222 if (size
< 2 || !u
) {
5225 imm
= (0xffu
>> (8 - shift
));
5227 } else if (size
== 1) {
5228 imm
= 0xffff >> (16 - shift
);
5231 imm
= 0xffffffff >> (32 - shift
);
5234 imm64
= imm
| (((uint64_t)imm
) << 32);
5238 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5241 neon_store_reg64(cpu_V0
, rd
+ pass
);
5243 } else if (op
>= 14) {
5244 /* VCVT fixed-point. */
5245 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5248 /* We have already masked out the must-be-1 top bit of imm6,
5249 * hence this 32-shift where the ARM ARM has 64-imm6.
5252 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5253 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5256 gen_vfp_ulto(0, shift
, 1);
5258 gen_vfp_slto(0, shift
, 1);
5261 gen_vfp_toul(0, shift
, 1);
5263 gen_vfp_tosl(0, shift
, 1);
5265 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
5270 } else { /* (insn & 0x00380080) == 0 */
5272 if (q
&& (rd
& 1)) {
5276 op
= (insn
>> 8) & 0xf;
5277 /* One register and immediate. */
5278 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5279 invert
= (insn
& (1 << 5)) != 0;
5280 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5281 * We choose to not special-case this and will behave as if a
5282 * valid constant encoding of 0 had been given.
5301 imm
= (imm
<< 8) | (imm
<< 24);
5304 imm
= (imm
<< 8) | 0xff;
5307 imm
= (imm
<< 16) | 0xffff;
5310 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5318 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5319 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5325 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5326 if (op
& 1 && op
< 12) {
5327 tmp
= neon_load_reg(rd
, pass
);
5329 /* The immediate value has already been inverted, so
5331 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5333 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5337 tmp
= tcg_temp_new_i32();
5338 if (op
== 14 && invert
) {
5342 for (n
= 0; n
< 4; n
++) {
5343 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5344 val
|= 0xff << (n
* 8);
5346 tcg_gen_movi_i32(tmp
, val
);
5348 tcg_gen_movi_i32(tmp
, imm
);
5351 neon_store_reg(rd
, pass
, tmp
);
5354 } else { /* (insn & 0x00800010 == 0x00800000) */
5356 op
= (insn
>> 8) & 0xf;
5357 if ((insn
& (1 << 6)) == 0) {
5358 /* Three registers of different lengths. */
5362 /* undefreq: bit 0 : UNDEF if size != 0
5363 * bit 1 : UNDEF if size == 0
5364 * bit 2 : UNDEF if U == 1
5365 * Note that [1:0] set implies 'always UNDEF'
5368 /* prewiden, src1_wide, src2_wide, undefreq */
5369 static const int neon_3reg_wide
[16][4] = {
5370 {1, 0, 0, 0}, /* VADDL */
5371 {1, 1, 0, 0}, /* VADDW */
5372 {1, 0, 0, 0}, /* VSUBL */
5373 {1, 1, 0, 0}, /* VSUBW */
5374 {0, 1, 1, 0}, /* VADDHN */
5375 {0, 0, 0, 0}, /* VABAL */
5376 {0, 1, 1, 0}, /* VSUBHN */
5377 {0, 0, 0, 0}, /* VABDL */
5378 {0, 0, 0, 0}, /* VMLAL */
5379 {0, 0, 0, 6}, /* VQDMLAL */
5380 {0, 0, 0, 0}, /* VMLSL */
5381 {0, 0, 0, 6}, /* VQDMLSL */
5382 {0, 0, 0, 0}, /* Integer VMULL */
5383 {0, 0, 0, 2}, /* VQDMULL */
5384 {0, 0, 0, 5}, /* Polynomial VMULL */
5385 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5388 prewiden
= neon_3reg_wide
[op
][0];
5389 src1_wide
= neon_3reg_wide
[op
][1];
5390 src2_wide
= neon_3reg_wide
[op
][2];
5391 undefreq
= neon_3reg_wide
[op
][3];
5393 if (((undefreq
& 1) && (size
!= 0)) ||
5394 ((undefreq
& 2) && (size
== 0)) ||
5395 ((undefreq
& 4) && u
)) {
5398 if ((src1_wide
&& (rn
& 1)) ||
5399 (src2_wide
&& (rm
& 1)) ||
5400 (!src2_wide
&& (rd
& 1))) {
5404 /* Avoid overlapping operands. Wide source operands are
5405 always aligned so will never overlap with wide
5406 destinations in problematic ways. */
5407 if (rd
== rm
&& !src2_wide
) {
5408 tmp
= neon_load_reg(rm
, 1);
5409 neon_store_scratch(2, tmp
);
5410 } else if (rd
== rn
&& !src1_wide
) {
5411 tmp
= neon_load_reg(rn
, 1);
5412 neon_store_scratch(2, tmp
);
5414 TCGV_UNUSED_I32(tmp3
);
5415 for (pass
= 0; pass
< 2; pass
++) {
5417 neon_load_reg64(cpu_V0
, rn
+ pass
);
5418 TCGV_UNUSED_I32(tmp
);
5420 if (pass
== 1 && rd
== rn
) {
5421 tmp
= neon_load_scratch(2);
5423 tmp
= neon_load_reg(rn
, pass
);
5426 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5430 neon_load_reg64(cpu_V1
, rm
+ pass
);
5431 TCGV_UNUSED_I32(tmp2
);
5433 if (pass
== 1 && rd
== rm
) {
5434 tmp2
= neon_load_scratch(2);
5436 tmp2
= neon_load_reg(rm
, pass
);
5439 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5443 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5444 gen_neon_addl(size
);
5446 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5447 gen_neon_subl(size
);
5449 case 5: case 7: /* VABAL, VABDL */
5450 switch ((size
<< 1) | u
) {
5452 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5455 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5458 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5461 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5464 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5467 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5471 tcg_temp_free_i32(tmp2
);
5472 tcg_temp_free_i32(tmp
);
5474 case 8: case 9: case 10: case 11: case 12: case 13:
5475 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5476 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5478 case 14: /* Polynomial VMULL */
5479 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5480 tcg_temp_free_i32(tmp2
);
5481 tcg_temp_free_i32(tmp
);
5483 default: /* 15 is RESERVED: caught earlier */
5488 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5489 neon_store_reg64(cpu_V0
, rd
+ pass
);
5490 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5492 neon_load_reg64(cpu_V1
, rd
+ pass
);
5494 case 10: /* VMLSL */
5495 gen_neon_negl(cpu_V0
, size
);
5497 case 5: case 8: /* VABAL, VMLAL */
5498 gen_neon_addl(size
);
5500 case 9: case 11: /* VQDMLAL, VQDMLSL */
5501 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5503 gen_neon_negl(cpu_V0
, size
);
5505 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5510 neon_store_reg64(cpu_V0
, rd
+ pass
);
5511 } else if (op
== 4 || op
== 6) {
5512 /* Narrowing operation. */
5513 tmp
= tcg_temp_new_i32();
5517 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5520 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5523 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5524 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5531 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5534 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5537 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5538 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5539 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5547 neon_store_reg(rd
, 0, tmp3
);
5548 neon_store_reg(rd
, 1, tmp
);
5551 /* Write back the result. */
5552 neon_store_reg64(cpu_V0
, rd
+ pass
);
5556 /* Two registers and a scalar. NB that for ops of this form
5557 * the ARM ARM labels bit 24 as Q, but it is in our variable
5564 case 1: /* Float VMLA scalar */
5565 case 5: /* Floating point VMLS scalar */
5566 case 9: /* Floating point VMUL scalar */
5571 case 0: /* Integer VMLA scalar */
5572 case 4: /* Integer VMLS scalar */
5573 case 8: /* Integer VMUL scalar */
5574 case 12: /* VQDMULH scalar */
5575 case 13: /* VQRDMULH scalar */
5576 if (u
&& ((rd
| rn
) & 1)) {
5579 tmp
= neon_get_scalar(size
, rm
);
5580 neon_store_scratch(0, tmp
);
5581 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5582 tmp
= neon_load_scratch(0);
5583 tmp2
= neon_load_reg(rn
, pass
);
5586 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5588 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5590 } else if (op
== 13) {
5592 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5594 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5596 } else if (op
& 1) {
5597 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5598 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5599 tcg_temp_free_ptr(fpstatus
);
5602 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5603 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5604 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5608 tcg_temp_free_i32(tmp2
);
5611 tmp2
= neon_load_reg(rd
, pass
);
5614 gen_neon_add(size
, tmp
, tmp2
);
5618 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5619 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5620 tcg_temp_free_ptr(fpstatus
);
5624 gen_neon_rsb(size
, tmp
, tmp2
);
5628 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5629 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5630 tcg_temp_free_ptr(fpstatus
);
5636 tcg_temp_free_i32(tmp2
);
5638 neon_store_reg(rd
, pass
, tmp
);
5641 case 3: /* VQDMLAL scalar */
5642 case 7: /* VQDMLSL scalar */
5643 case 11: /* VQDMULL scalar */
5648 case 2: /* VMLAL sclar */
5649 case 6: /* VMLSL scalar */
5650 case 10: /* VMULL scalar */
5654 tmp2
= neon_get_scalar(size
, rm
);
5655 /* We need a copy of tmp2 because gen_neon_mull
5656 * deletes it during pass 0. */
5657 tmp4
= tcg_temp_new_i32();
5658 tcg_gen_mov_i32(tmp4
, tmp2
);
5659 tmp3
= neon_load_reg(rn
, 1);
5661 for (pass
= 0; pass
< 2; pass
++) {
5663 tmp
= neon_load_reg(rn
, 0);
5668 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5670 neon_load_reg64(cpu_V1
, rd
+ pass
);
5674 gen_neon_negl(cpu_V0
, size
);
5677 gen_neon_addl(size
);
5680 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5682 gen_neon_negl(cpu_V0
, size
);
5684 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5690 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5695 neon_store_reg64(cpu_V0
, rd
+ pass
);
5700 default: /* 14 and 15 are RESERVED */
5704 } else { /* size == 3 */
5707 imm
= (insn
>> 8) & 0xf;
5712 if (q
&& ((rd
| rn
| rm
) & 1)) {
5717 neon_load_reg64(cpu_V0
, rn
);
5719 neon_load_reg64(cpu_V1
, rn
+ 1);
5721 } else if (imm
== 8) {
5722 neon_load_reg64(cpu_V0
, rn
+ 1);
5724 neon_load_reg64(cpu_V1
, rm
);
5727 tmp64
= tcg_temp_new_i64();
5729 neon_load_reg64(cpu_V0
, rn
);
5730 neon_load_reg64(tmp64
, rn
+ 1);
5732 neon_load_reg64(cpu_V0
, rn
+ 1);
5733 neon_load_reg64(tmp64
, rm
);
5735 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5736 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5737 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5739 neon_load_reg64(cpu_V1
, rm
);
5741 neon_load_reg64(cpu_V1
, rm
+ 1);
5744 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5745 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5746 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5747 tcg_temp_free_i64(tmp64
);
5750 neon_load_reg64(cpu_V0
, rn
);
5751 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5752 neon_load_reg64(cpu_V1
, rm
);
5753 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5754 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5756 neon_store_reg64(cpu_V0
, rd
);
5758 neon_store_reg64(cpu_V1
, rd
+ 1);
5760 } else if ((insn
& (1 << 11)) == 0) {
5761 /* Two register misc. */
5762 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5763 size
= (insn
>> 18) & 3;
5764 /* UNDEF for unknown op values and bad op-size combinations */
5765 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
5768 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
5769 q
&& ((rm
| rd
) & 1)) {
5773 case NEON_2RM_VREV64
:
5774 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5775 tmp
= neon_load_reg(rm
, pass
* 2);
5776 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5778 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5779 case 1: gen_swap_half(tmp
); break;
5780 case 2: /* no-op */ break;
5783 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5785 neon_store_reg(rd
, pass
* 2, tmp2
);
5788 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5789 case 1: gen_swap_half(tmp2
); break;
5792 neon_store_reg(rd
, pass
* 2, tmp2
);
5796 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
5797 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
5798 for (pass
= 0; pass
< q
+ 1; pass
++) {
5799 tmp
= neon_load_reg(rm
, pass
* 2);
5800 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5801 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5802 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5804 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5805 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5806 case 2: tcg_gen_add_i64(CPU_V001
); break;
5809 if (op
>= NEON_2RM_VPADAL
) {
5811 neon_load_reg64(cpu_V1
, rd
+ pass
);
5812 gen_neon_addl(size
);
5814 neon_store_reg64(cpu_V0
, rd
+ pass
);
5820 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5821 tmp
= neon_load_reg(rm
, n
);
5822 tmp2
= neon_load_reg(rd
, n
+ 1);
5823 neon_store_reg(rm
, n
, tmp2
);
5824 neon_store_reg(rd
, n
+ 1, tmp
);
5831 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
5836 if (gen_neon_zip(rd
, rm
, size
, q
)) {
5840 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
5841 /* also VQMOVUN; op field and mnemonics don't line up */
5845 TCGV_UNUSED_I32(tmp2
);
5846 for (pass
= 0; pass
< 2; pass
++) {
5847 neon_load_reg64(cpu_V0
, rm
+ pass
);
5848 tmp
= tcg_temp_new_i32();
5849 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
5854 neon_store_reg(rd
, 0, tmp2
);
5855 neon_store_reg(rd
, 1, tmp
);
5859 case NEON_2RM_VSHLL
:
5860 if (q
|| (rd
& 1)) {
5863 tmp
= neon_load_reg(rm
, 0);
5864 tmp2
= neon_load_reg(rm
, 1);
5865 for (pass
= 0; pass
< 2; pass
++) {
5868 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5869 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5870 neon_store_reg64(cpu_V0
, rd
+ pass
);
5873 case NEON_2RM_VCVT_F16_F32
:
5874 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5878 tmp
= tcg_temp_new_i32();
5879 tmp2
= tcg_temp_new_i32();
5880 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5881 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5882 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5883 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5884 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5885 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5886 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5887 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5888 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5889 neon_store_reg(rd
, 0, tmp2
);
5890 tmp2
= tcg_temp_new_i32();
5891 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5892 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5893 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5894 neon_store_reg(rd
, 1, tmp2
);
5895 tcg_temp_free_i32(tmp
);
5897 case NEON_2RM_VCVT_F32_F16
:
5898 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5902 tmp3
= tcg_temp_new_i32();
5903 tmp
= neon_load_reg(rm
, 0);
5904 tmp2
= neon_load_reg(rm
, 1);
5905 tcg_gen_ext16u_i32(tmp3
, tmp
);
5906 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5907 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5908 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5909 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5910 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5911 tcg_temp_free_i32(tmp
);
5912 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5913 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5914 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5915 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5916 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5917 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5918 tcg_temp_free_i32(tmp2
);
5919 tcg_temp_free_i32(tmp3
);
5923 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5924 if (neon_2rm_is_float_op(op
)) {
5925 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5926 neon_reg_offset(rm
, pass
));
5927 TCGV_UNUSED_I32(tmp
);
5929 tmp
= neon_load_reg(rm
, pass
);
5932 case NEON_2RM_VREV32
:
5934 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5935 case 1: gen_swap_half(tmp
); break;
5939 case NEON_2RM_VREV16
:
5944 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5945 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5946 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5952 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5953 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5954 case 2: gen_helper_clz(tmp
, tmp
); break;
5959 gen_helper_neon_cnt_u8(tmp
, tmp
);
5962 tcg_gen_not_i32(tmp
, tmp
);
5964 case NEON_2RM_VQABS
:
5967 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
5970 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
5973 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
5978 case NEON_2RM_VQNEG
:
5981 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
5984 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
5987 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
5992 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
5993 tmp2
= tcg_const_i32(0);
5995 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5996 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5997 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
6000 tcg_temp_free_i32(tmp2
);
6001 if (op
== NEON_2RM_VCLE0
) {
6002 tcg_gen_not_i32(tmp
, tmp
);
6005 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
6006 tmp2
= tcg_const_i32(0);
6008 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
6009 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
6010 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
6013 tcg_temp_free_i32(tmp2
);
6014 if (op
== NEON_2RM_VCLT0
) {
6015 tcg_gen_not_i32(tmp
, tmp
);
6018 case NEON_2RM_VCEQ0
:
6019 tmp2
= tcg_const_i32(0);
6021 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
6022 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
6023 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
6026 tcg_temp_free_i32(tmp2
);
6030 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
6031 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
6032 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
6037 tmp2
= tcg_const_i32(0);
6038 gen_neon_rsb(size
, tmp
, tmp2
);
6039 tcg_temp_free_i32(tmp2
);
6041 case NEON_2RM_VCGT0_F
:
6043 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6044 tmp2
= tcg_const_i32(0);
6045 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6046 tcg_temp_free_i32(tmp2
);
6047 tcg_temp_free_ptr(fpstatus
);
6050 case NEON_2RM_VCGE0_F
:
6052 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6053 tmp2
= tcg_const_i32(0);
6054 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6055 tcg_temp_free_i32(tmp2
);
6056 tcg_temp_free_ptr(fpstatus
);
6059 case NEON_2RM_VCEQ0_F
:
6061 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6062 tmp2
= tcg_const_i32(0);
6063 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6064 tcg_temp_free_i32(tmp2
);
6065 tcg_temp_free_ptr(fpstatus
);
6068 case NEON_2RM_VCLE0_F
:
6070 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6071 tmp2
= tcg_const_i32(0);
6072 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6073 tcg_temp_free_i32(tmp2
);
6074 tcg_temp_free_ptr(fpstatus
);
6077 case NEON_2RM_VCLT0_F
:
6079 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6080 tmp2
= tcg_const_i32(0);
6081 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6082 tcg_temp_free_i32(tmp2
);
6083 tcg_temp_free_ptr(fpstatus
);
6086 case NEON_2RM_VABS_F
:
6089 case NEON_2RM_VNEG_F
:
6093 tmp2
= neon_load_reg(rd
, pass
);
6094 neon_store_reg(rm
, pass
, tmp2
);
6097 tmp2
= neon_load_reg(rd
, pass
);
6099 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6100 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6103 neon_store_reg(rm
, pass
, tmp2
);
6105 case NEON_2RM_VRECPE
:
6106 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
6108 case NEON_2RM_VRSQRTE
:
6109 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
6111 case NEON_2RM_VRECPE_F
:
6112 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6114 case NEON_2RM_VRSQRTE_F
:
6115 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6117 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6120 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6123 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6124 gen_vfp_tosiz(0, 1);
6126 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6127 gen_vfp_touiz(0, 1);
6130 /* Reserved op values were caught by the
6131 * neon_2rm_sizes[] check earlier.
6135 if (neon_2rm_is_float_op(op
)) {
6136 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
6137 neon_reg_offset(rd
, pass
));
6139 neon_store_reg(rd
, pass
, tmp
);
6144 } else if ((insn
& (1 << 10)) == 0) {
6146 int n
= ((insn
>> 8) & 3) + 1;
6147 if ((rn
+ n
) > 32) {
6148 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6149 * helper function running off the end of the register file.
6154 if (insn
& (1 << 6)) {
6155 tmp
= neon_load_reg(rd
, 0);
6157 tmp
= tcg_temp_new_i32();
6158 tcg_gen_movi_i32(tmp
, 0);
6160 tmp2
= neon_load_reg(rm
, 0);
6161 tmp4
= tcg_const_i32(rn
);
6162 tmp5
= tcg_const_i32(n
);
6163 gen_helper_neon_tbl(tmp2
, cpu_env
, tmp2
, tmp
, tmp4
, tmp5
);
6164 tcg_temp_free_i32(tmp
);
6165 if (insn
& (1 << 6)) {
6166 tmp
= neon_load_reg(rd
, 1);
6168 tmp
= tcg_temp_new_i32();
6169 tcg_gen_movi_i32(tmp
, 0);
6171 tmp3
= neon_load_reg(rm
, 1);
6172 gen_helper_neon_tbl(tmp3
, cpu_env
, tmp3
, tmp
, tmp4
, tmp5
);
6173 tcg_temp_free_i32(tmp5
);
6174 tcg_temp_free_i32(tmp4
);
6175 neon_store_reg(rd
, 0, tmp2
);
6176 neon_store_reg(rd
, 1, tmp3
);
6177 tcg_temp_free_i32(tmp
);
6178 } else if ((insn
& 0x380) == 0) {
6180 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6183 if (insn
& (1 << 19)) {
6184 tmp
= neon_load_reg(rm
, 1);
6186 tmp
= neon_load_reg(rm
, 0);
6188 if (insn
& (1 << 16)) {
6189 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
6190 } else if (insn
& (1 << 17)) {
6191 if ((insn
>> 18) & 1)
6192 gen_neon_dup_high16(tmp
);
6194 gen_neon_dup_low16(tmp
);
6196 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6197 tmp2
= tcg_temp_new_i32();
6198 tcg_gen_mov_i32(tmp2
, tmp
);
6199 neon_store_reg(rd
, pass
, tmp2
);
6201 tcg_temp_free_i32(tmp
);
6210 static int disas_coproc_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
6212 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
6213 const ARMCPRegInfo
*ri
;
6214 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6216 cpnum
= (insn
>> 8) & 0xf;
6217 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
6218 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
6221 /* First check for coprocessor space used for actual instructions */
6225 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6226 return disas_iwmmxt_insn(env
, s
, insn
);
6227 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6228 return disas_dsp_insn(env
, s
, insn
);
6233 return disas_vfp_insn (env
, s
, insn
);
6238 /* Otherwise treat as a generic register access */
6239 is64
= (insn
& (1 << 25)) == 0;
6240 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
6248 opc1
= (insn
>> 4) & 0xf;
6250 rt2
= (insn
>> 16) & 0xf;
6252 crn
= (insn
>> 16) & 0xf;
6253 opc1
= (insn
>> 21) & 7;
6254 opc2
= (insn
>> 5) & 7;
6257 isread
= (insn
>> 20) & 1;
6258 rt
= (insn
>> 12) & 0xf;
6260 ri
= get_arm_cp_reginfo(cpu
,
6261 ENCODE_CP_REG(cpnum
, is64
, crn
, crm
, opc1
, opc2
));
6263 /* Check access permissions */
6264 if (!cp_access_ok(env
, ri
, isread
)) {
6268 /* Handle special cases first */
6269 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
6276 gen_set_pc_im(s
->pc
);
6277 s
->is_jmp
= DISAS_WFI
;
6288 if (ri
->type
& ARM_CP_CONST
) {
6289 tmp64
= tcg_const_i64(ri
->resetvalue
);
6290 } else if (ri
->readfn
) {
6292 gen_set_pc_im(s
->pc
);
6293 tmp64
= tcg_temp_new_i64();
6294 tmpptr
= tcg_const_ptr(ri
);
6295 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
6296 tcg_temp_free_ptr(tmpptr
);
6298 tmp64
= tcg_temp_new_i64();
6299 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6301 tmp
= tcg_temp_new_i32();
6302 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6303 store_reg(s
, rt
, tmp
);
6304 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6305 tmp
= tcg_temp_new_i32();
6306 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6307 tcg_temp_free_i64(tmp64
);
6308 store_reg(s
, rt2
, tmp
);
6311 if (ri
->type
& ARM_CP_CONST
) {
6312 tmp
= tcg_const_i32(ri
->resetvalue
);
6313 } else if (ri
->readfn
) {
6315 gen_set_pc_im(s
->pc
);
6316 tmp
= tcg_temp_new_i32();
6317 tmpptr
= tcg_const_ptr(ri
);
6318 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
6319 tcg_temp_free_ptr(tmpptr
);
6321 tmp
= load_cpu_offset(ri
->fieldoffset
);
6324 /* Destination register of r15 for 32 bit loads sets
6325 * the condition codes from the high 4 bits of the value
6328 tcg_temp_free_i32(tmp
);
6330 store_reg(s
, rt
, tmp
);
6335 if (ri
->type
& ARM_CP_CONST
) {
6336 /* If not forbidden by access permissions, treat as WI */
6341 TCGv_i32 tmplo
, tmphi
;
6342 TCGv_i64 tmp64
= tcg_temp_new_i64();
6343 tmplo
= load_reg(s
, rt
);
6344 tmphi
= load_reg(s
, rt2
);
6345 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
6346 tcg_temp_free_i32(tmplo
);
6347 tcg_temp_free_i32(tmphi
);
6349 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
6350 gen_set_pc_im(s
->pc
);
6351 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
6352 tcg_temp_free_ptr(tmpptr
);
6354 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6356 tcg_temp_free_i64(tmp64
);
6361 gen_set_pc_im(s
->pc
);
6362 tmp
= load_reg(s
, rt
);
6363 tmpptr
= tcg_const_ptr(ri
);
6364 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
6365 tcg_temp_free_ptr(tmpptr
);
6366 tcg_temp_free_i32(tmp
);
6368 TCGv_i32 tmp
= load_reg(s
, rt
);
6369 store_cpu_offset(tmp
, ri
->fieldoffset
);
6372 /* We default to ending the TB on a coprocessor register write,
6373 * but allow this to be suppressed by the register definition
6374 * (usually only necessary to work around guest bugs).
6376 if (!(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
6387 /* Store a 64-bit value to a register pair. Clobbers val. */
6388 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
6391 tmp
= tcg_temp_new_i32();
6392 tcg_gen_trunc_i64_i32(tmp
, val
);
6393 store_reg(s
, rlow
, tmp
);
6394 tmp
= tcg_temp_new_i32();
6395 tcg_gen_shri_i64(val
, val
, 32);
6396 tcg_gen_trunc_i64_i32(tmp
, val
);
6397 store_reg(s
, rhigh
, tmp
);
6400 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6401 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
6406 /* Load value and extend to 64 bits. */
6407 tmp
= tcg_temp_new_i64();
6408 tmp2
= load_reg(s
, rlow
);
6409 tcg_gen_extu_i32_i64(tmp
, tmp2
);
6410 tcg_temp_free_i32(tmp2
);
6411 tcg_gen_add_i64(val
, val
, tmp
);
6412 tcg_temp_free_i64(tmp
);
6415 /* load and add a 64-bit value from a register pair. */
6416 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
6422 /* Load 64-bit value rd:rn. */
6423 tmpl
= load_reg(s
, rlow
);
6424 tmph
= load_reg(s
, rhigh
);
6425 tmp
= tcg_temp_new_i64();
6426 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
6427 tcg_temp_free_i32(tmpl
);
6428 tcg_temp_free_i32(tmph
);
6429 tcg_gen_add_i64(val
, val
, tmp
);
6430 tcg_temp_free_i64(tmp
);
6433 /* Set N and Z flags from hi|lo. */
6434 static void gen_logicq_cc(TCGv_i32 lo
, TCGv_i32 hi
)
6436 tcg_gen_mov_i32(cpu_NF
, hi
);
6437 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
6440 /* Load/Store exclusive instructions are implemented by remembering
6441 the value/address loaded, and seeing if these are the same
6442 when the store is performed. This should be sufficient to implement
6443 the architecturally mandated semantics, and avoids having to monitor
6446 In system emulation mode only one CPU will be running at once, so
6447 this sequence is effectively atomic. In user emulation mode we
6448 throw an exception and handle the atomic operation elsewhere. */
6449 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6450 TCGv_i32 addr
, int size
)
6452 TCGv_i32 tmp
= tcg_temp_new_i32();
6456 tcg_gen_qemu_ld8u(tmp
, addr
, IS_USER(s
));
6459 tcg_gen_qemu_ld16u(tmp
, addr
, IS_USER(s
));
6463 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
6468 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6469 store_reg(s
, rt
, tmp
);
6471 TCGv_i32 tmp2
= tcg_temp_new_i32();
6472 tcg_gen_addi_i32(tmp2
, addr
, 4);
6473 tmp
= tcg_temp_new_i32();
6474 tcg_gen_qemu_ld32u(tmp
, tmp2
, IS_USER(s
));
6475 tcg_temp_free_i32(tmp2
);
6476 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6477 store_reg(s
, rt2
, tmp
);
6479 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6482 static void gen_clrex(DisasContext
*s
)
6484 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6487 #ifdef CONFIG_USER_ONLY
6488 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6489 TCGv_i32 addr
, int size
)
6491 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6492 tcg_gen_movi_i32(cpu_exclusive_info
,
6493 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6494 gen_exception_insn(s
, 4, EXCP_STREX
);
6497 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6498 TCGv_i32 addr
, int size
)
6504 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6510 fail_label
= gen_new_label();
6511 done_label
= gen_new_label();
6512 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6513 tmp
= tcg_temp_new_i32();
6516 tcg_gen_qemu_ld8u(tmp
, addr
, IS_USER(s
));
6519 tcg_gen_qemu_ld16u(tmp
, addr
, IS_USER(s
));
6523 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
6528 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6529 tcg_temp_free_i32(tmp
);
6531 TCGv_i32 tmp2
= tcg_temp_new_i32();
6532 tcg_gen_addi_i32(tmp2
, addr
, 4);
6533 tmp
= tcg_temp_new_i32();
6534 tcg_gen_qemu_ld32u(tmp
, tmp2
, IS_USER(s
));
6535 tcg_temp_free_i32(tmp2
);
6536 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6537 tcg_temp_free_i32(tmp
);
6539 tmp
= load_reg(s
, rt
);
6542 tcg_gen_qemu_st8(tmp
, addr
, IS_USER(s
));
6545 tcg_gen_qemu_st16(tmp
, addr
, IS_USER(s
));
6549 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
6554 tcg_temp_free_i32(tmp
);
6556 tcg_gen_addi_i32(addr
, addr
, 4);
6557 tmp
= load_reg(s
, rt2
);
6558 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
6559 tcg_temp_free_i32(tmp
);
6561 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6562 tcg_gen_br(done_label
);
6563 gen_set_label(fail_label
);
6564 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6565 gen_set_label(done_label
);
6566 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6573 * @mode: mode field from insn (which stack to store to)
6574 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
6575 * @writeback: true if writeback bit set
6577 * Generate code for the SRS (Store Return State) insn.
6579 static void gen_srs(DisasContext
*s
,
6580 uint32_t mode
, uint32_t amode
, bool writeback
)
6583 TCGv_i32 addr
= tcg_temp_new_i32();
6584 TCGv_i32 tmp
= tcg_const_i32(mode
);
6585 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6586 tcg_temp_free_i32(tmp
);
6603 tcg_gen_addi_i32(addr
, addr
, offset
);
6604 tmp
= load_reg(s
, 14);
6605 tcg_gen_qemu_st32(tmp
, addr
, 0);
6606 tcg_temp_free_i32(tmp
);
6607 tmp
= load_cpu_field(spsr
);
6608 tcg_gen_addi_i32(addr
, addr
, 4);
6609 tcg_gen_qemu_st32(tmp
, addr
, 0);
6610 tcg_temp_free_i32(tmp
);
6628 tcg_gen_addi_i32(addr
, addr
, offset
);
6629 tmp
= tcg_const_i32(mode
);
6630 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6631 tcg_temp_free_i32(tmp
);
6633 tcg_temp_free_i32(addr
);
6636 static void disas_arm_insn(CPUARMState
* env
, DisasContext
*s
)
6638 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6645 insn
= arm_ldl_code(env
, s
->pc
, s
->bswap_code
);
6648 /* M variants do not implement ARM mode. */
6653 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6654 * choose to UNDEF. In ARMv5 and above the space is used
6655 * for miscellaneous unconditional instructions.
6659 /* Unconditional instructions. */
6660 if (((insn
>> 25) & 7) == 1) {
6661 /* NEON Data processing. */
6662 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6665 if (disas_neon_data_insn(env
, s
, insn
))
6669 if ((insn
& 0x0f100000) == 0x04000000) {
6670 /* NEON load/store. */
6671 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6674 if (disas_neon_ls_insn(env
, s
, insn
))
6678 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6679 ((insn
& 0x0f30f010) == 0x0710f000)) {
6680 if ((insn
& (1 << 22)) == 0) {
6682 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6686 /* Otherwise PLD; v5TE+ */
6690 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6691 ((insn
& 0x0f70f010) == 0x0650f000)) {
6693 return; /* PLI; V7 */
6695 if (((insn
& 0x0f700000) == 0x04100000) ||
6696 ((insn
& 0x0f700010) == 0x06100000)) {
6697 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6700 return; /* v7MP: Unallocated memory hint: must NOP */
6703 if ((insn
& 0x0ffffdff) == 0x01010000) {
6706 if (((insn
>> 9) & 1) != s
->bswap_code
) {
6707 /* Dynamic endianness switching not implemented. */
6711 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6712 switch ((insn
>> 4) & 0xf) {
6721 /* We don't emulate caches so these are a no-op. */
6726 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6732 gen_srs(s
, (insn
& 0x1f), (insn
>> 23) & 3, insn
& (1 << 21));
6734 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6740 rn
= (insn
>> 16) & 0xf;
6741 addr
= load_reg(s
, rn
);
6742 i
= (insn
>> 23) & 3;
6744 case 0: offset
= -4; break; /* DA */
6745 case 1: offset
= 0; break; /* IA */
6746 case 2: offset
= -8; break; /* DB */
6747 case 3: offset
= 4; break; /* IB */
6751 tcg_gen_addi_i32(addr
, addr
, offset
);
6752 /* Load PC into tmp and CPSR into tmp2. */
6753 tmp
= tcg_temp_new_i32();
6754 tcg_gen_qemu_ld32u(tmp
, addr
, 0);
6755 tcg_gen_addi_i32(addr
, addr
, 4);
6756 tmp2
= tcg_temp_new_i32();
6757 tcg_gen_qemu_ld32u(tmp2
, addr
, 0);
6758 if (insn
& (1 << 21)) {
6759 /* Base writeback. */
6761 case 0: offset
= -8; break;
6762 case 1: offset
= 4; break;
6763 case 2: offset
= -4; break;
6764 case 3: offset
= 0; break;
6768 tcg_gen_addi_i32(addr
, addr
, offset
);
6769 store_reg(s
, rn
, addr
);
6771 tcg_temp_free_i32(addr
);
6773 gen_rfe(s
, tmp
, tmp2
);
6775 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6776 /* branch link and change to thumb (blx <offset>) */
6779 val
= (uint32_t)s
->pc
;
6780 tmp
= tcg_temp_new_i32();
6781 tcg_gen_movi_i32(tmp
, val
);
6782 store_reg(s
, 14, tmp
);
6783 /* Sign-extend the 24-bit offset */
6784 offset
= (((int32_t)insn
) << 8) >> 8;
6785 /* offset * 4 + bit24 * 2 + (thumb bit) */
6786 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6787 /* pipeline offset */
6789 /* protected by ARCH(5); above, near the start of uncond block */
6792 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6793 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6794 /* iWMMXt register transfer. */
6795 if (env
->cp15
.c15_cpar
& (1 << 1))
6796 if (!disas_iwmmxt_insn(env
, s
, insn
))
6799 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6800 /* Coprocessor double register transfer. */
6802 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6803 /* Additional coprocessor register transfer. */
6804 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6807 /* cps (privileged) */
6811 if (insn
& (1 << 19)) {
6812 if (insn
& (1 << 8))
6814 if (insn
& (1 << 7))
6816 if (insn
& (1 << 6))
6818 if (insn
& (1 << 18))
6821 if (insn
& (1 << 17)) {
6823 val
|= (insn
& 0x1f);
6826 gen_set_psr_im(s
, mask
, 0, val
);
6833 /* if not always execute, we generate a conditional jump to
6835 s
->condlabel
= gen_new_label();
6836 gen_test_cc(cond
^ 1, s
->condlabel
);
6839 if ((insn
& 0x0f900000) == 0x03000000) {
6840 if ((insn
& (1 << 21)) == 0) {
6842 rd
= (insn
>> 12) & 0xf;
6843 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6844 if ((insn
& (1 << 22)) == 0) {
6846 tmp
= tcg_temp_new_i32();
6847 tcg_gen_movi_i32(tmp
, val
);
6850 tmp
= load_reg(s
, rd
);
6851 tcg_gen_ext16u_i32(tmp
, tmp
);
6852 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6854 store_reg(s
, rd
, tmp
);
6856 if (((insn
>> 12) & 0xf) != 0xf)
6858 if (((insn
>> 16) & 0xf) == 0) {
6859 gen_nop_hint(s
, insn
& 0xff);
6861 /* CPSR = immediate */
6863 shift
= ((insn
>> 8) & 0xf) * 2;
6865 val
= (val
>> shift
) | (val
<< (32 - shift
));
6866 i
= ((insn
& (1 << 22)) != 0);
6867 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6871 } else if ((insn
& 0x0f900000) == 0x01000000
6872 && (insn
& 0x00000090) != 0x00000090) {
6873 /* miscellaneous instructions */
6874 op1
= (insn
>> 21) & 3;
6875 sh
= (insn
>> 4) & 0xf;
6878 case 0x0: /* move program status register */
6881 tmp
= load_reg(s
, rm
);
6882 i
= ((op1
& 2) != 0);
6883 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6887 rd
= (insn
>> 12) & 0xf;
6891 tmp
= load_cpu_field(spsr
);
6893 tmp
= tcg_temp_new_i32();
6894 gen_helper_cpsr_read(tmp
, cpu_env
);
6896 store_reg(s
, rd
, tmp
);
6901 /* branch/exchange thumb (bx). */
6903 tmp
= load_reg(s
, rm
);
6905 } else if (op1
== 3) {
6908 rd
= (insn
>> 12) & 0xf;
6909 tmp
= load_reg(s
, rm
);
6910 gen_helper_clz(tmp
, tmp
);
6911 store_reg(s
, rd
, tmp
);
6919 /* Trivial implementation equivalent to bx. */
6920 tmp
= load_reg(s
, rm
);
6931 /* branch link/exchange thumb (blx) */
6932 tmp
= load_reg(s
, rm
);
6933 tmp2
= tcg_temp_new_i32();
6934 tcg_gen_movi_i32(tmp2
, s
->pc
);
6935 store_reg(s
, 14, tmp2
);
6938 case 0x5: /* saturating add/subtract */
6940 rd
= (insn
>> 12) & 0xf;
6941 rn
= (insn
>> 16) & 0xf;
6942 tmp
= load_reg(s
, rm
);
6943 tmp2
= load_reg(s
, rn
);
6945 gen_helper_double_saturate(tmp2
, cpu_env
, tmp2
);
6947 gen_helper_sub_saturate(tmp
, cpu_env
, tmp
, tmp2
);
6949 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
6950 tcg_temp_free_i32(tmp2
);
6951 store_reg(s
, rd
, tmp
);
6954 /* SMC instruction (op1 == 3)
6955 and undefined instructions (op1 == 0 || op1 == 2)
6962 gen_exception_insn(s
, 4, EXCP_BKPT
);
6964 case 0x8: /* signed multiply */
6969 rs
= (insn
>> 8) & 0xf;
6970 rn
= (insn
>> 12) & 0xf;
6971 rd
= (insn
>> 16) & 0xf;
6973 /* (32 * 16) >> 16 */
6974 tmp
= load_reg(s
, rm
);
6975 tmp2
= load_reg(s
, rs
);
6977 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6980 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6981 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6982 tmp
= tcg_temp_new_i32();
6983 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6984 tcg_temp_free_i64(tmp64
);
6985 if ((sh
& 2) == 0) {
6986 tmp2
= load_reg(s
, rn
);
6987 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
6988 tcg_temp_free_i32(tmp2
);
6990 store_reg(s
, rd
, tmp
);
6993 tmp
= load_reg(s
, rm
);
6994 tmp2
= load_reg(s
, rs
);
6995 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6996 tcg_temp_free_i32(tmp2
);
6998 tmp64
= tcg_temp_new_i64();
6999 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7000 tcg_temp_free_i32(tmp
);
7001 gen_addq(s
, tmp64
, rn
, rd
);
7002 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7003 tcg_temp_free_i64(tmp64
);
7006 tmp2
= load_reg(s
, rn
);
7007 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
7008 tcg_temp_free_i32(tmp2
);
7010 store_reg(s
, rd
, tmp
);
7017 } else if (((insn
& 0x0e000000) == 0 &&
7018 (insn
& 0x00000090) != 0x90) ||
7019 ((insn
& 0x0e000000) == (1 << 25))) {
7020 int set_cc
, logic_cc
, shiftop
;
7022 op1
= (insn
>> 21) & 0xf;
7023 set_cc
= (insn
>> 20) & 1;
7024 logic_cc
= table_logic_cc
[op1
] & set_cc
;
7026 /* data processing instruction */
7027 if (insn
& (1 << 25)) {
7028 /* immediate operand */
7030 shift
= ((insn
>> 8) & 0xf) * 2;
7032 val
= (val
>> shift
) | (val
<< (32 - shift
));
7034 tmp2
= tcg_temp_new_i32();
7035 tcg_gen_movi_i32(tmp2
, val
);
7036 if (logic_cc
&& shift
) {
7037 gen_set_CF_bit31(tmp2
);
7042 tmp2
= load_reg(s
, rm
);
7043 shiftop
= (insn
>> 5) & 3;
7044 if (!(insn
& (1 << 4))) {
7045 shift
= (insn
>> 7) & 0x1f;
7046 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7048 rs
= (insn
>> 8) & 0xf;
7049 tmp
= load_reg(s
, rs
);
7050 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
7053 if (op1
!= 0x0f && op1
!= 0x0d) {
7054 rn
= (insn
>> 16) & 0xf;
7055 tmp
= load_reg(s
, rn
);
7057 TCGV_UNUSED_I32(tmp
);
7059 rd
= (insn
>> 12) & 0xf;
7062 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7066 store_reg_bx(env
, s
, rd
, tmp
);
7069 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7073 store_reg_bx(env
, s
, rd
, tmp
);
7076 if (set_cc
&& rd
== 15) {
7077 /* SUBS r15, ... is used for exception return. */
7081 gen_sub_CC(tmp
, tmp
, tmp2
);
7082 gen_exception_return(s
, tmp
);
7085 gen_sub_CC(tmp
, tmp
, tmp2
);
7087 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7089 store_reg_bx(env
, s
, rd
, tmp
);
7094 gen_sub_CC(tmp
, tmp2
, tmp
);
7096 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7098 store_reg_bx(env
, s
, rd
, tmp
);
7102 gen_add_CC(tmp
, tmp
, tmp2
);
7104 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7106 store_reg_bx(env
, s
, rd
, tmp
);
7110 gen_adc_CC(tmp
, tmp
, tmp2
);
7112 gen_add_carry(tmp
, tmp
, tmp2
);
7114 store_reg_bx(env
, s
, rd
, tmp
);
7118 gen_sbc_CC(tmp
, tmp
, tmp2
);
7120 gen_sub_carry(tmp
, tmp
, tmp2
);
7122 store_reg_bx(env
, s
, rd
, tmp
);
7126 gen_sbc_CC(tmp
, tmp2
, tmp
);
7128 gen_sub_carry(tmp
, tmp2
, tmp
);
7130 store_reg_bx(env
, s
, rd
, tmp
);
7134 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7137 tcg_temp_free_i32(tmp
);
7141 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7144 tcg_temp_free_i32(tmp
);
7148 gen_sub_CC(tmp
, tmp
, tmp2
);
7150 tcg_temp_free_i32(tmp
);
7154 gen_add_CC(tmp
, tmp
, tmp2
);
7156 tcg_temp_free_i32(tmp
);
7159 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7163 store_reg_bx(env
, s
, rd
, tmp
);
7166 if (logic_cc
&& rd
== 15) {
7167 /* MOVS r15, ... is used for exception return. */
7171 gen_exception_return(s
, tmp2
);
7176 store_reg_bx(env
, s
, rd
, tmp2
);
7180 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
7184 store_reg_bx(env
, s
, rd
, tmp
);
7188 tcg_gen_not_i32(tmp2
, tmp2
);
7192 store_reg_bx(env
, s
, rd
, tmp2
);
7195 if (op1
!= 0x0f && op1
!= 0x0d) {
7196 tcg_temp_free_i32(tmp2
);
7199 /* other instructions */
7200 op1
= (insn
>> 24) & 0xf;
7204 /* multiplies, extra load/stores */
7205 sh
= (insn
>> 5) & 3;
7208 rd
= (insn
>> 16) & 0xf;
7209 rn
= (insn
>> 12) & 0xf;
7210 rs
= (insn
>> 8) & 0xf;
7212 op1
= (insn
>> 20) & 0xf;
7214 case 0: case 1: case 2: case 3: case 6:
7216 tmp
= load_reg(s
, rs
);
7217 tmp2
= load_reg(s
, rm
);
7218 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7219 tcg_temp_free_i32(tmp2
);
7220 if (insn
& (1 << 22)) {
7221 /* Subtract (mls) */
7223 tmp2
= load_reg(s
, rn
);
7224 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7225 tcg_temp_free_i32(tmp2
);
7226 } else if (insn
& (1 << 21)) {
7228 tmp2
= load_reg(s
, rn
);
7229 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7230 tcg_temp_free_i32(tmp2
);
7232 if (insn
& (1 << 20))
7234 store_reg(s
, rd
, tmp
);
7237 /* 64 bit mul double accumulate (UMAAL) */
7239 tmp
= load_reg(s
, rs
);
7240 tmp2
= load_reg(s
, rm
);
7241 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7242 gen_addq_lo(s
, tmp64
, rn
);
7243 gen_addq_lo(s
, tmp64
, rd
);
7244 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7245 tcg_temp_free_i64(tmp64
);
7247 case 8: case 9: case 10: case 11:
7248 case 12: case 13: case 14: case 15:
7249 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7250 tmp
= load_reg(s
, rs
);
7251 tmp2
= load_reg(s
, rm
);
7252 if (insn
& (1 << 22)) {
7253 tcg_gen_muls2_i32(tmp
, tmp2
, tmp
, tmp2
);
7255 tcg_gen_mulu2_i32(tmp
, tmp2
, tmp
, tmp2
);
7257 if (insn
& (1 << 21)) { /* mult accumulate */
7258 TCGv_i32 al
= load_reg(s
, rn
);
7259 TCGv_i32 ah
= load_reg(s
, rd
);
7260 tcg_gen_add2_i32(tmp
, tmp2
, tmp
, tmp2
, al
, ah
);
7261 tcg_temp_free_i32(al
);
7262 tcg_temp_free_i32(ah
);
7264 if (insn
& (1 << 20)) {
7265 gen_logicq_cc(tmp
, tmp2
);
7267 store_reg(s
, rn
, tmp
);
7268 store_reg(s
, rd
, tmp2
);
7274 rn
= (insn
>> 16) & 0xf;
7275 rd
= (insn
>> 12) & 0xf;
7276 if (insn
& (1 << 23)) {
7277 /* load/store exclusive */
7278 int op2
= (insn
>> 8) & 3;
7279 op1
= (insn
>> 21) & 0x3;
7282 case 0: /* lda/stl */
7288 case 1: /* reserved */
7290 case 2: /* ldaex/stlex */
7293 case 3: /* ldrex/strex */
7302 addr
= tcg_temp_local_new_i32();
7303 load_reg_var(s
, addr
, rn
);
7305 /* Since the emulation does not have barriers,
7306 the acquire/release semantics need no special
7309 if (insn
& (1 << 20)) {
7310 tmp
= tcg_temp_new_i32();
7313 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
7316 tcg_gen_qemu_ld8u(tmp
, addr
, IS_USER(s
));
7319 tcg_gen_qemu_ld16u(tmp
, addr
, IS_USER(s
));
7324 store_reg(s
, rd
, tmp
);
7327 tmp
= load_reg(s
, rm
);
7330 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
7333 tcg_gen_qemu_st8(tmp
, addr
, IS_USER(s
));
7336 tcg_gen_qemu_st16(tmp
, addr
, IS_USER(s
));
7341 tcg_temp_free_i32(tmp
);
7343 } else if (insn
& (1 << 20)) {
7346 gen_load_exclusive(s
, rd
, 15, addr
, 2);
7348 case 1: /* ldrexd */
7349 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
7351 case 2: /* ldrexb */
7352 gen_load_exclusive(s
, rd
, 15, addr
, 0);
7354 case 3: /* ldrexh */
7355 gen_load_exclusive(s
, rd
, 15, addr
, 1);
7364 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
7366 case 1: /* strexd */
7367 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
7369 case 2: /* strexb */
7370 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
7372 case 3: /* strexh */
7373 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
7379 tcg_temp_free_i32(addr
);
7381 /* SWP instruction */
7384 /* ??? This is not really atomic. However we know
7385 we never have multiple CPUs running in parallel,
7386 so it is good enough. */
7387 addr
= load_reg(s
, rn
);
7388 tmp
= load_reg(s
, rm
);
7389 tmp2
= tcg_temp_new_i32();
7390 if (insn
& (1 << 22)) {
7391 tcg_gen_qemu_ld8u(tmp2
, addr
, IS_USER(s
));
7392 tcg_gen_qemu_st8(tmp
, addr
, IS_USER(s
));
7394 tcg_gen_qemu_ld32u(tmp2
, addr
, IS_USER(s
));
7395 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
7397 tcg_temp_free_i32(tmp
);
7398 tcg_temp_free_i32(addr
);
7399 store_reg(s
, rd
, tmp2
);
7405 /* Misc load/store */
7406 rn
= (insn
>> 16) & 0xf;
7407 rd
= (insn
>> 12) & 0xf;
7408 addr
= load_reg(s
, rn
);
7409 if (insn
& (1 << 24))
7410 gen_add_datah_offset(s
, insn
, 0, addr
);
7412 if (insn
& (1 << 20)) {
7414 tmp
= tcg_temp_new_i32();
7417 tcg_gen_qemu_ld16u(tmp
, addr
, IS_USER(s
));
7420 tcg_gen_qemu_ld8s(tmp
, addr
, IS_USER(s
));
7424 tcg_gen_qemu_ld16s(tmp
, addr
, IS_USER(s
));
7428 } else if (sh
& 2) {
7433 tmp
= load_reg(s
, rd
);
7434 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
7435 tcg_temp_free_i32(tmp
);
7436 tcg_gen_addi_i32(addr
, addr
, 4);
7437 tmp
= load_reg(s
, rd
+ 1);
7438 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
7439 tcg_temp_free_i32(tmp
);
7443 tmp
= tcg_temp_new_i32();
7444 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
7445 store_reg(s
, rd
, tmp
);
7446 tcg_gen_addi_i32(addr
, addr
, 4);
7447 tmp
= tcg_temp_new_i32();
7448 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
7452 address_offset
= -4;
7455 tmp
= load_reg(s
, rd
);
7456 tcg_gen_qemu_st16(tmp
, addr
, IS_USER(s
));
7457 tcg_temp_free_i32(tmp
);
7460 /* Perform base writeback before the loaded value to
7461 ensure correct behavior with overlapping index registers.
7462 ldrd with base writeback is is undefined if the
7463 destination and index registers overlap. */
7464 if (!(insn
& (1 << 24))) {
7465 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
7466 store_reg(s
, rn
, addr
);
7467 } else if (insn
& (1 << 21)) {
7469 tcg_gen_addi_i32(addr
, addr
, address_offset
);
7470 store_reg(s
, rn
, addr
);
7472 tcg_temp_free_i32(addr
);
7475 /* Complete the load. */
7476 store_reg(s
, rd
, tmp
);
7485 if (insn
& (1 << 4)) {
7487 /* Armv6 Media instructions. */
7489 rn
= (insn
>> 16) & 0xf;
7490 rd
= (insn
>> 12) & 0xf;
7491 rs
= (insn
>> 8) & 0xf;
7492 switch ((insn
>> 23) & 3) {
7493 case 0: /* Parallel add/subtract. */
7494 op1
= (insn
>> 20) & 7;
7495 tmp
= load_reg(s
, rn
);
7496 tmp2
= load_reg(s
, rm
);
7497 sh
= (insn
>> 5) & 7;
7498 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
7500 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
7501 tcg_temp_free_i32(tmp2
);
7502 store_reg(s
, rd
, tmp
);
7505 if ((insn
& 0x00700020) == 0) {
7506 /* Halfword pack. */
7507 tmp
= load_reg(s
, rn
);
7508 tmp2
= load_reg(s
, rm
);
7509 shift
= (insn
>> 7) & 0x1f;
7510 if (insn
& (1 << 6)) {
7514 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7515 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7516 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7520 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7521 tcg_gen_ext16u_i32(tmp
, tmp
);
7522 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7524 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7525 tcg_temp_free_i32(tmp2
);
7526 store_reg(s
, rd
, tmp
);
7527 } else if ((insn
& 0x00200020) == 0x00200000) {
7529 tmp
= load_reg(s
, rm
);
7530 shift
= (insn
>> 7) & 0x1f;
7531 if (insn
& (1 << 6)) {
7534 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7536 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7538 sh
= (insn
>> 16) & 0x1f;
7539 tmp2
= tcg_const_i32(sh
);
7540 if (insn
& (1 << 22))
7541 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
7543 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
7544 tcg_temp_free_i32(tmp2
);
7545 store_reg(s
, rd
, tmp
);
7546 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
7548 tmp
= load_reg(s
, rm
);
7549 sh
= (insn
>> 16) & 0x1f;
7550 tmp2
= tcg_const_i32(sh
);
7551 if (insn
& (1 << 22))
7552 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
7554 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
7555 tcg_temp_free_i32(tmp2
);
7556 store_reg(s
, rd
, tmp
);
7557 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
7559 tmp
= load_reg(s
, rn
);
7560 tmp2
= load_reg(s
, rm
);
7561 tmp3
= tcg_temp_new_i32();
7562 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
7563 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7564 tcg_temp_free_i32(tmp3
);
7565 tcg_temp_free_i32(tmp2
);
7566 store_reg(s
, rd
, tmp
);
7567 } else if ((insn
& 0x000003e0) == 0x00000060) {
7568 tmp
= load_reg(s
, rm
);
7569 shift
= (insn
>> 10) & 3;
7570 /* ??? In many cases it's not necessary to do a
7571 rotate, a shift is sufficient. */
7573 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7574 op1
= (insn
>> 20) & 7;
7576 case 0: gen_sxtb16(tmp
); break;
7577 case 2: gen_sxtb(tmp
); break;
7578 case 3: gen_sxth(tmp
); break;
7579 case 4: gen_uxtb16(tmp
); break;
7580 case 6: gen_uxtb(tmp
); break;
7581 case 7: gen_uxth(tmp
); break;
7582 default: goto illegal_op
;
7585 tmp2
= load_reg(s
, rn
);
7586 if ((op1
& 3) == 0) {
7587 gen_add16(tmp
, tmp2
);
7589 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7590 tcg_temp_free_i32(tmp2
);
7593 store_reg(s
, rd
, tmp
);
7594 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7596 tmp
= load_reg(s
, rm
);
7597 if (insn
& (1 << 22)) {
7598 if (insn
& (1 << 7)) {
7602 gen_helper_rbit(tmp
, tmp
);
7605 if (insn
& (1 << 7))
7608 tcg_gen_bswap32_i32(tmp
, tmp
);
7610 store_reg(s
, rd
, tmp
);
7615 case 2: /* Multiplies (Type 3). */
7616 switch ((insn
>> 20) & 0x7) {
7618 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
7619 /* op2 not 00x or 11x : UNDEF */
7622 /* Signed multiply most significant [accumulate].
7623 (SMMUL, SMMLA, SMMLS) */
7624 tmp
= load_reg(s
, rm
);
7625 tmp2
= load_reg(s
, rs
);
7626 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7629 tmp
= load_reg(s
, rd
);
7630 if (insn
& (1 << 6)) {
7631 tmp64
= gen_subq_msw(tmp64
, tmp
);
7633 tmp64
= gen_addq_msw(tmp64
, tmp
);
7636 if (insn
& (1 << 5)) {
7637 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7639 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7640 tmp
= tcg_temp_new_i32();
7641 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7642 tcg_temp_free_i64(tmp64
);
7643 store_reg(s
, rn
, tmp
);
7647 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7648 if (insn
& (1 << 7)) {
7651 tmp
= load_reg(s
, rm
);
7652 tmp2
= load_reg(s
, rs
);
7653 if (insn
& (1 << 5))
7654 gen_swap_half(tmp2
);
7655 gen_smul_dual(tmp
, tmp2
);
7656 if (insn
& (1 << 6)) {
7657 /* This subtraction cannot overflow. */
7658 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7660 /* This addition cannot overflow 32 bits;
7661 * however it may overflow considered as a signed
7662 * operation, in which case we must set the Q flag.
7664 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
7666 tcg_temp_free_i32(tmp2
);
7667 if (insn
& (1 << 22)) {
7668 /* smlald, smlsld */
7669 tmp64
= tcg_temp_new_i64();
7670 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7671 tcg_temp_free_i32(tmp
);
7672 gen_addq(s
, tmp64
, rd
, rn
);
7673 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7674 tcg_temp_free_i64(tmp64
);
7676 /* smuad, smusd, smlad, smlsd */
7679 tmp2
= load_reg(s
, rd
);
7680 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
7681 tcg_temp_free_i32(tmp2
);
7683 store_reg(s
, rn
, tmp
);
7689 if (!arm_feature(env
, ARM_FEATURE_ARM_DIV
)) {
7692 if (((insn
>> 5) & 7) || (rd
!= 15)) {
7695 tmp
= load_reg(s
, rm
);
7696 tmp2
= load_reg(s
, rs
);
7697 if (insn
& (1 << 21)) {
7698 gen_helper_udiv(tmp
, tmp
, tmp2
);
7700 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7702 tcg_temp_free_i32(tmp2
);
7703 store_reg(s
, rn
, tmp
);
7710 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7712 case 0: /* Unsigned sum of absolute differences. */
7714 tmp
= load_reg(s
, rm
);
7715 tmp2
= load_reg(s
, rs
);
7716 gen_helper_usad8(tmp
, tmp
, tmp2
);
7717 tcg_temp_free_i32(tmp2
);
7719 tmp2
= load_reg(s
, rd
);
7720 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7721 tcg_temp_free_i32(tmp2
);
7723 store_reg(s
, rn
, tmp
);
7725 case 0x20: case 0x24: case 0x28: case 0x2c:
7726 /* Bitfield insert/clear. */
7728 shift
= (insn
>> 7) & 0x1f;
7729 i
= (insn
>> 16) & 0x1f;
7732 tmp
= tcg_temp_new_i32();
7733 tcg_gen_movi_i32(tmp
, 0);
7735 tmp
= load_reg(s
, rm
);
7738 tmp2
= load_reg(s
, rd
);
7739 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, i
);
7740 tcg_temp_free_i32(tmp2
);
7742 store_reg(s
, rd
, tmp
);
7744 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7745 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7747 tmp
= load_reg(s
, rm
);
7748 shift
= (insn
>> 7) & 0x1f;
7749 i
= ((insn
>> 16) & 0x1f) + 1;
7754 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7756 gen_sbfx(tmp
, shift
, i
);
7759 store_reg(s
, rd
, tmp
);
7769 /* Check for undefined extension instructions
7770 * per the ARM Bible IE:
7771 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7773 sh
= (0xf << 20) | (0xf << 4);
7774 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7778 /* load/store byte/word */
7779 rn
= (insn
>> 16) & 0xf;
7780 rd
= (insn
>> 12) & 0xf;
7781 tmp2
= load_reg(s
, rn
);
7782 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7783 if (insn
& (1 << 24))
7784 gen_add_data_offset(s
, insn
, tmp2
);
7785 if (insn
& (1 << 20)) {
7787 tmp
= tcg_temp_new_i32();
7788 if (insn
& (1 << 22)) {
7789 tcg_gen_qemu_ld8u(tmp
, tmp2
, i
);
7791 tcg_gen_qemu_ld32u(tmp
, tmp2
, i
);
7795 tmp
= load_reg(s
, rd
);
7796 if (insn
& (1 << 22)) {
7797 tcg_gen_qemu_st8(tmp
, tmp2
, i
);
7799 tcg_gen_qemu_st32(tmp
, tmp2
, i
);
7801 tcg_temp_free_i32(tmp
);
7803 if (!(insn
& (1 << 24))) {
7804 gen_add_data_offset(s
, insn
, tmp2
);
7805 store_reg(s
, rn
, tmp2
);
7806 } else if (insn
& (1 << 21)) {
7807 store_reg(s
, rn
, tmp2
);
7809 tcg_temp_free_i32(tmp2
);
7811 if (insn
& (1 << 20)) {
7812 /* Complete the load. */
7813 store_reg_from_load(env
, s
, rd
, tmp
);
7819 int j
, n
, user
, loaded_base
;
7820 TCGv_i32 loaded_var
;
7821 /* load/store multiple words */
7822 /* XXX: store correct base if write back */
7824 if (insn
& (1 << 22)) {
7826 goto illegal_op
; /* only usable in supervisor mode */
7828 if ((insn
& (1 << 15)) == 0)
7831 rn
= (insn
>> 16) & 0xf;
7832 addr
= load_reg(s
, rn
);
7834 /* compute total size */
7836 TCGV_UNUSED_I32(loaded_var
);
7839 if (insn
& (1 << i
))
7842 /* XXX: test invalid n == 0 case ? */
7843 if (insn
& (1 << 23)) {
7844 if (insn
& (1 << 24)) {
7846 tcg_gen_addi_i32(addr
, addr
, 4);
7848 /* post increment */
7851 if (insn
& (1 << 24)) {
7853 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7855 /* post decrement */
7857 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7862 if (insn
& (1 << i
)) {
7863 if (insn
& (1 << 20)) {
7865 tmp
= tcg_temp_new_i32();
7866 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
7868 tmp2
= tcg_const_i32(i
);
7869 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
7870 tcg_temp_free_i32(tmp2
);
7871 tcg_temp_free_i32(tmp
);
7872 } else if (i
== rn
) {
7876 store_reg_from_load(env
, s
, i
, tmp
);
7881 /* special case: r15 = PC + 8 */
7882 val
= (long)s
->pc
+ 4;
7883 tmp
= tcg_temp_new_i32();
7884 tcg_gen_movi_i32(tmp
, val
);
7886 tmp
= tcg_temp_new_i32();
7887 tmp2
= tcg_const_i32(i
);
7888 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
7889 tcg_temp_free_i32(tmp2
);
7891 tmp
= load_reg(s
, i
);
7893 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
7894 tcg_temp_free_i32(tmp
);
7897 /* no need to add after the last transfer */
7899 tcg_gen_addi_i32(addr
, addr
, 4);
7902 if (insn
& (1 << 21)) {
7904 if (insn
& (1 << 23)) {
7905 if (insn
& (1 << 24)) {
7908 /* post increment */
7909 tcg_gen_addi_i32(addr
, addr
, 4);
7912 if (insn
& (1 << 24)) {
7915 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7917 /* post decrement */
7918 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7921 store_reg(s
, rn
, addr
);
7923 tcg_temp_free_i32(addr
);
7926 store_reg(s
, rn
, loaded_var
);
7928 if ((insn
& (1 << 22)) && !user
) {
7929 /* Restore CPSR from SPSR. */
7930 tmp
= load_cpu_field(spsr
);
7931 gen_set_cpsr(tmp
, 0xffffffff);
7932 tcg_temp_free_i32(tmp
);
7933 s
->is_jmp
= DISAS_UPDATE
;
7942 /* branch (and link) */
7943 val
= (int32_t)s
->pc
;
7944 if (insn
& (1 << 24)) {
7945 tmp
= tcg_temp_new_i32();
7946 tcg_gen_movi_i32(tmp
, val
);
7947 store_reg(s
, 14, tmp
);
7949 offset
= (((int32_t)insn
<< 8) >> 8);
7950 val
+= (offset
<< 2) + 4;
7958 if (disas_coproc_insn(env
, s
, insn
))
7963 gen_set_pc_im(s
->pc
);
7964 s
->is_jmp
= DISAS_SWI
;
7968 gen_exception_insn(s
, 4, EXCP_UDEF
);
7974 /* Return true if this is a Thumb-2 logical op. */
7976 thumb2_logic_op(int op
)
7981 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7982 then set condition code flags based on the result of the operation.
7983 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7984 to the high bit of T1.
7985 Returns zero if the opcode is valid. */
7988 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
,
7989 TCGv_i32 t0
, TCGv_i32 t1
)
7996 tcg_gen_and_i32(t0
, t0
, t1
);
8000 tcg_gen_andc_i32(t0
, t0
, t1
);
8004 tcg_gen_or_i32(t0
, t0
, t1
);
8008 tcg_gen_orc_i32(t0
, t0
, t1
);
8012 tcg_gen_xor_i32(t0
, t0
, t1
);
8017 gen_add_CC(t0
, t0
, t1
);
8019 tcg_gen_add_i32(t0
, t0
, t1
);
8023 gen_adc_CC(t0
, t0
, t1
);
8029 gen_sbc_CC(t0
, t0
, t1
);
8031 gen_sub_carry(t0
, t0
, t1
);
8036 gen_sub_CC(t0
, t0
, t1
);
8038 tcg_gen_sub_i32(t0
, t0
, t1
);
8042 gen_sub_CC(t0
, t1
, t0
);
8044 tcg_gen_sub_i32(t0
, t1
, t0
);
8046 default: /* 5, 6, 7, 9, 12, 15. */
8052 gen_set_CF_bit31(t1
);
8057 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8059 static int disas_thumb2_insn(CPUARMState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
8061 uint32_t insn
, imm
, shift
, offset
;
8062 uint32_t rd
, rn
, rm
, rs
;
8073 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
8074 || arm_feature (env
, ARM_FEATURE_M
))) {
8075 /* Thumb-1 cores may need to treat bl and blx as a pair of
8076 16-bit instructions to get correct prefetch abort behavior. */
8078 if ((insn
& (1 << 12)) == 0) {
8080 /* Second half of blx. */
8081 offset
= ((insn
& 0x7ff) << 1);
8082 tmp
= load_reg(s
, 14);
8083 tcg_gen_addi_i32(tmp
, tmp
, offset
);
8084 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
8086 tmp2
= tcg_temp_new_i32();
8087 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
8088 store_reg(s
, 14, tmp2
);
8092 if (insn
& (1 << 11)) {
8093 /* Second half of bl. */
8094 offset
= ((insn
& 0x7ff) << 1) | 1;
8095 tmp
= load_reg(s
, 14);
8096 tcg_gen_addi_i32(tmp
, tmp
, offset
);
8098 tmp2
= tcg_temp_new_i32();
8099 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
8100 store_reg(s
, 14, tmp2
);
8104 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
8105 /* Instruction spans a page boundary. Implement it as two
8106 16-bit instructions in case the second half causes an
8108 offset
= ((int32_t)insn
<< 21) >> 9;
8109 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
8112 /* Fall through to 32-bit decode. */
8115 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
8117 insn
|= (uint32_t)insn_hw1
<< 16;
8119 if ((insn
& 0xf800e800) != 0xf000e800) {
8123 rn
= (insn
>> 16) & 0xf;
8124 rs
= (insn
>> 12) & 0xf;
8125 rd
= (insn
>> 8) & 0xf;
8127 switch ((insn
>> 25) & 0xf) {
8128 case 0: case 1: case 2: case 3:
8129 /* 16-bit instructions. Should never happen. */
8132 if (insn
& (1 << 22)) {
8133 /* Other load/store, table branch. */
8134 if (insn
& 0x01200000) {
8135 /* Load/store doubleword. */
8137 addr
= tcg_temp_new_i32();
8138 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
8140 addr
= load_reg(s
, rn
);
8142 offset
= (insn
& 0xff) * 4;
8143 if ((insn
& (1 << 23)) == 0)
8145 if (insn
& (1 << 24)) {
8146 tcg_gen_addi_i32(addr
, addr
, offset
);
8149 if (insn
& (1 << 20)) {
8151 tmp
= tcg_temp_new_i32();
8152 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
8153 store_reg(s
, rs
, tmp
);
8154 tcg_gen_addi_i32(addr
, addr
, 4);
8155 tmp
= tcg_temp_new_i32();
8156 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
8157 store_reg(s
, rd
, tmp
);
8160 tmp
= load_reg(s
, rs
);
8161 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
8162 tcg_temp_free_i32(tmp
);
8163 tcg_gen_addi_i32(addr
, addr
, 4);
8164 tmp
= load_reg(s
, rd
);
8165 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
8166 tcg_temp_free_i32(tmp
);
8168 if (insn
& (1 << 21)) {
8169 /* Base writeback. */
8172 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
8173 store_reg(s
, rn
, addr
);
8175 tcg_temp_free_i32(addr
);
8177 } else if ((insn
& (1 << 23)) == 0) {
8178 /* Load/store exclusive word. */
8179 addr
= tcg_temp_local_new_i32();
8180 load_reg_var(s
, addr
, rn
);
8181 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
8182 if (insn
& (1 << 20)) {
8183 gen_load_exclusive(s
, rs
, 15, addr
, 2);
8185 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
8187 tcg_temp_free_i32(addr
);
8188 } else if ((insn
& (7 << 5)) == 0) {
8191 addr
= tcg_temp_new_i32();
8192 tcg_gen_movi_i32(addr
, s
->pc
);
8194 addr
= load_reg(s
, rn
);
8196 tmp
= load_reg(s
, rm
);
8197 tcg_gen_add_i32(addr
, addr
, tmp
);
8198 if (insn
& (1 << 4)) {
8200 tcg_gen_add_i32(addr
, addr
, tmp
);
8201 tcg_temp_free_i32(tmp
);
8202 tmp
= tcg_temp_new_i32();
8203 tcg_gen_qemu_ld16u(tmp
, addr
, IS_USER(s
));
8205 tcg_temp_free_i32(tmp
);
8206 tmp
= tcg_temp_new_i32();
8207 tcg_gen_qemu_ld8u(tmp
, addr
, IS_USER(s
));
8209 tcg_temp_free_i32(addr
);
8210 tcg_gen_shli_i32(tmp
, tmp
, 1);
8211 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
8212 store_reg(s
, 15, tmp
);
8214 int op2
= (insn
>> 6) & 0x3;
8215 op
= (insn
>> 4) & 0x3;
8220 /* Load/store exclusive byte/halfword/doubleword */
8227 /* Load-acquire/store-release */
8233 /* Load-acquire/store-release exclusive */
8237 addr
= tcg_temp_local_new_i32();
8238 load_reg_var(s
, addr
, rn
);
8240 if (insn
& (1 << 20)) {
8241 tmp
= tcg_temp_new_i32();
8244 tcg_gen_qemu_ld8u(tmp
, addr
, IS_USER(s
));
8247 tcg_gen_qemu_ld16u(tmp
, addr
, IS_USER(s
));
8250 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
8255 store_reg(s
, rs
, tmp
);
8257 tmp
= load_reg(s
, rs
);
8260 tcg_gen_qemu_st8(tmp
, addr
, IS_USER(s
));
8263 tcg_gen_qemu_st16(tmp
, addr
, IS_USER(s
));
8266 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
8271 tcg_temp_free_i32(tmp
);
8273 } else if (insn
& (1 << 20)) {
8274 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
8276 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
8278 tcg_temp_free_i32(addr
);
8281 /* Load/store multiple, RFE, SRS. */
8282 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
8283 /* RFE, SRS: not available in user mode or on M profile */
8284 if (IS_USER(s
) || IS_M(env
)) {
8287 if (insn
& (1 << 20)) {
8289 addr
= load_reg(s
, rn
);
8290 if ((insn
& (1 << 24)) == 0)
8291 tcg_gen_addi_i32(addr
, addr
, -8);
8292 /* Load PC into tmp and CPSR into tmp2. */
8293 tmp
= tcg_temp_new_i32();
8294 tcg_gen_qemu_ld32u(tmp
, addr
, 0);
8295 tcg_gen_addi_i32(addr
, addr
, 4);
8296 tmp2
= tcg_temp_new_i32();
8297 tcg_gen_qemu_ld32u(tmp2
, addr
, 0);
8298 if (insn
& (1 << 21)) {
8299 /* Base writeback. */
8300 if (insn
& (1 << 24)) {
8301 tcg_gen_addi_i32(addr
, addr
, 4);
8303 tcg_gen_addi_i32(addr
, addr
, -4);
8305 store_reg(s
, rn
, addr
);
8307 tcg_temp_free_i32(addr
);
8309 gen_rfe(s
, tmp
, tmp2
);
8312 gen_srs(s
, (insn
& 0x1f), (insn
& (1 << 24)) ? 1 : 2,
8316 int i
, loaded_base
= 0;
8317 TCGv_i32 loaded_var
;
8318 /* Load/store multiple. */
8319 addr
= load_reg(s
, rn
);
8321 for (i
= 0; i
< 16; i
++) {
8322 if (insn
& (1 << i
))
8325 if (insn
& (1 << 24)) {
8326 tcg_gen_addi_i32(addr
, addr
, -offset
);
8329 TCGV_UNUSED_I32(loaded_var
);
8330 for (i
= 0; i
< 16; i
++) {
8331 if ((insn
& (1 << i
)) == 0)
8333 if (insn
& (1 << 20)) {
8335 tmp
= tcg_temp_new_i32();
8336 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
8339 } else if (i
== rn
) {
8343 store_reg(s
, i
, tmp
);
8347 tmp
= load_reg(s
, i
);
8348 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
8349 tcg_temp_free_i32(tmp
);
8351 tcg_gen_addi_i32(addr
, addr
, 4);
8354 store_reg(s
, rn
, loaded_var
);
8356 if (insn
& (1 << 21)) {
8357 /* Base register writeback. */
8358 if (insn
& (1 << 24)) {
8359 tcg_gen_addi_i32(addr
, addr
, -offset
);
8361 /* Fault if writeback register is in register list. */
8362 if (insn
& (1 << rn
))
8364 store_reg(s
, rn
, addr
);
8366 tcg_temp_free_i32(addr
);
8373 op
= (insn
>> 21) & 0xf;
8375 /* Halfword pack. */
8376 tmp
= load_reg(s
, rn
);
8377 tmp2
= load_reg(s
, rm
);
8378 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
8379 if (insn
& (1 << 5)) {
8383 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8384 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8385 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8389 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8390 tcg_gen_ext16u_i32(tmp
, tmp
);
8391 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8393 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8394 tcg_temp_free_i32(tmp2
);
8395 store_reg(s
, rd
, tmp
);
8397 /* Data processing register constant shift. */
8399 tmp
= tcg_temp_new_i32();
8400 tcg_gen_movi_i32(tmp
, 0);
8402 tmp
= load_reg(s
, rn
);
8404 tmp2
= load_reg(s
, rm
);
8406 shiftop
= (insn
>> 4) & 3;
8407 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8408 conds
= (insn
& (1 << 20)) != 0;
8409 logic_cc
= (conds
&& thumb2_logic_op(op
));
8410 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8411 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
8413 tcg_temp_free_i32(tmp2
);
8415 store_reg(s
, rd
, tmp
);
8417 tcg_temp_free_i32(tmp
);
8421 case 13: /* Misc data processing. */
8422 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
8423 if (op
< 4 && (insn
& 0xf000) != 0xf000)
8426 case 0: /* Register controlled shift. */
8427 tmp
= load_reg(s
, rn
);
8428 tmp2
= load_reg(s
, rm
);
8429 if ((insn
& 0x70) != 0)
8431 op
= (insn
>> 21) & 3;
8432 logic_cc
= (insn
& (1 << 20)) != 0;
8433 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
8436 store_reg_bx(env
, s
, rd
, tmp
);
8438 case 1: /* Sign/zero extend. */
8439 tmp
= load_reg(s
, rm
);
8440 shift
= (insn
>> 4) & 3;
8441 /* ??? In many cases it's not necessary to do a
8442 rotate, a shift is sufficient. */
8444 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8445 op
= (insn
>> 20) & 7;
8447 case 0: gen_sxth(tmp
); break;
8448 case 1: gen_uxth(tmp
); break;
8449 case 2: gen_sxtb16(tmp
); break;
8450 case 3: gen_uxtb16(tmp
); break;
8451 case 4: gen_sxtb(tmp
); break;
8452 case 5: gen_uxtb(tmp
); break;
8453 default: goto illegal_op
;
8456 tmp2
= load_reg(s
, rn
);
8457 if ((op
>> 1) == 1) {
8458 gen_add16(tmp
, tmp2
);
8460 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8461 tcg_temp_free_i32(tmp2
);
8464 store_reg(s
, rd
, tmp
);
8466 case 2: /* SIMD add/subtract. */
8467 op
= (insn
>> 20) & 7;
8468 shift
= (insn
>> 4) & 7;
8469 if ((op
& 3) == 3 || (shift
& 3) == 3)
8471 tmp
= load_reg(s
, rn
);
8472 tmp2
= load_reg(s
, rm
);
8473 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
8474 tcg_temp_free_i32(tmp2
);
8475 store_reg(s
, rd
, tmp
);
8477 case 3: /* Other data processing. */
8478 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
8480 /* Saturating add/subtract. */
8481 tmp
= load_reg(s
, rn
);
8482 tmp2
= load_reg(s
, rm
);
8484 gen_helper_double_saturate(tmp
, cpu_env
, tmp
);
8486 gen_helper_sub_saturate(tmp
, cpu_env
, tmp2
, tmp
);
8488 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8489 tcg_temp_free_i32(tmp2
);
8491 tmp
= load_reg(s
, rn
);
8493 case 0x0a: /* rbit */
8494 gen_helper_rbit(tmp
, tmp
);
8496 case 0x08: /* rev */
8497 tcg_gen_bswap32_i32(tmp
, tmp
);
8499 case 0x09: /* rev16 */
8502 case 0x0b: /* revsh */
8505 case 0x10: /* sel */
8506 tmp2
= load_reg(s
, rm
);
8507 tmp3
= tcg_temp_new_i32();
8508 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
8509 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8510 tcg_temp_free_i32(tmp3
);
8511 tcg_temp_free_i32(tmp2
);
8513 case 0x18: /* clz */
8514 gen_helper_clz(tmp
, tmp
);
8520 store_reg(s
, rd
, tmp
);
8522 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8523 op
= (insn
>> 4) & 0xf;
8524 tmp
= load_reg(s
, rn
);
8525 tmp2
= load_reg(s
, rm
);
8526 switch ((insn
>> 20) & 7) {
8527 case 0: /* 32 x 32 -> 32 */
8528 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8529 tcg_temp_free_i32(tmp2
);
8531 tmp2
= load_reg(s
, rs
);
8533 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8535 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8536 tcg_temp_free_i32(tmp2
);
8539 case 1: /* 16 x 16 -> 32 */
8540 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8541 tcg_temp_free_i32(tmp2
);
8543 tmp2
= load_reg(s
, rs
);
8544 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8545 tcg_temp_free_i32(tmp2
);
8548 case 2: /* Dual multiply add. */
8549 case 4: /* Dual multiply subtract. */
8551 gen_swap_half(tmp2
);
8552 gen_smul_dual(tmp
, tmp2
);
8553 if (insn
& (1 << 22)) {
8554 /* This subtraction cannot overflow. */
8555 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8557 /* This addition cannot overflow 32 bits;
8558 * however it may overflow considered as a signed
8559 * operation, in which case we must set the Q flag.
8561 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8563 tcg_temp_free_i32(tmp2
);
8566 tmp2
= load_reg(s
, rs
);
8567 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8568 tcg_temp_free_i32(tmp2
);
8571 case 3: /* 32 * 16 -> 32msb */
8573 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8576 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8577 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8578 tmp
= tcg_temp_new_i32();
8579 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8580 tcg_temp_free_i64(tmp64
);
8583 tmp2
= load_reg(s
, rs
);
8584 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8585 tcg_temp_free_i32(tmp2
);
8588 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8589 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8591 tmp
= load_reg(s
, rs
);
8592 if (insn
& (1 << 20)) {
8593 tmp64
= gen_addq_msw(tmp64
, tmp
);
8595 tmp64
= gen_subq_msw(tmp64
, tmp
);
8598 if (insn
& (1 << 4)) {
8599 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8601 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8602 tmp
= tcg_temp_new_i32();
8603 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8604 tcg_temp_free_i64(tmp64
);
8606 case 7: /* Unsigned sum of absolute differences. */
8607 gen_helper_usad8(tmp
, tmp
, tmp2
);
8608 tcg_temp_free_i32(tmp2
);
8610 tmp2
= load_reg(s
, rs
);
8611 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8612 tcg_temp_free_i32(tmp2
);
8616 store_reg(s
, rd
, tmp
);
8618 case 6: case 7: /* 64-bit multiply, Divide. */
8619 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
8620 tmp
= load_reg(s
, rn
);
8621 tmp2
= load_reg(s
, rm
);
8622 if ((op
& 0x50) == 0x10) {
8624 if (!arm_feature(env
, ARM_FEATURE_THUMB_DIV
)) {
8628 gen_helper_udiv(tmp
, tmp
, tmp2
);
8630 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8631 tcg_temp_free_i32(tmp2
);
8632 store_reg(s
, rd
, tmp
);
8633 } else if ((op
& 0xe) == 0xc) {
8634 /* Dual multiply accumulate long. */
8636 gen_swap_half(tmp2
);
8637 gen_smul_dual(tmp
, tmp2
);
8639 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8641 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8643 tcg_temp_free_i32(tmp2
);
8645 tmp64
= tcg_temp_new_i64();
8646 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8647 tcg_temp_free_i32(tmp
);
8648 gen_addq(s
, tmp64
, rs
, rd
);
8649 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8650 tcg_temp_free_i64(tmp64
);
8653 /* Unsigned 64-bit multiply */
8654 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8658 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8659 tcg_temp_free_i32(tmp2
);
8660 tmp64
= tcg_temp_new_i64();
8661 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8662 tcg_temp_free_i32(tmp
);
8664 /* Signed 64-bit multiply */
8665 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8670 gen_addq_lo(s
, tmp64
, rs
);
8671 gen_addq_lo(s
, tmp64
, rd
);
8672 } else if (op
& 0x40) {
8673 /* 64-bit accumulate. */
8674 gen_addq(s
, tmp64
, rs
, rd
);
8676 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8677 tcg_temp_free_i64(tmp64
);
8682 case 6: case 7: case 14: case 15:
8684 if (((insn
>> 24) & 3) == 3) {
8685 /* Translate into the equivalent ARM encoding. */
8686 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8687 if (disas_neon_data_insn(env
, s
, insn
))
8690 if (insn
& (1 << 28))
8692 if (disas_coproc_insn (env
, s
, insn
))
8696 case 8: case 9: case 10: case 11:
8697 if (insn
& (1 << 15)) {
8698 /* Branches, misc control. */
8699 if (insn
& 0x5000) {
8700 /* Unconditional branch. */
8701 /* signextend(hw1[10:0]) -> offset[:12]. */
8702 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8703 /* hw1[10:0] -> offset[11:1]. */
8704 offset
|= (insn
& 0x7ff) << 1;
8705 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8706 offset[24:22] already have the same value because of the
8707 sign extension above. */
8708 offset
^= ((~insn
) & (1 << 13)) << 10;
8709 offset
^= ((~insn
) & (1 << 11)) << 11;
8711 if (insn
& (1 << 14)) {
8712 /* Branch and link. */
8713 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8717 if (insn
& (1 << 12)) {
8722 offset
&= ~(uint32_t)2;
8723 /* thumb2 bx, no need to check */
8724 gen_bx_im(s
, offset
);
8726 } else if (((insn
>> 23) & 7) == 7) {
8728 if (insn
& (1 << 13))
8731 if (insn
& (1 << 26)) {
8732 /* Secure monitor call (v6Z) */
8733 goto illegal_op
; /* not implemented. */
8735 op
= (insn
>> 20) & 7;
8737 case 0: /* msr cpsr. */
8739 tmp
= load_reg(s
, rn
);
8740 addr
= tcg_const_i32(insn
& 0xff);
8741 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8742 tcg_temp_free_i32(addr
);
8743 tcg_temp_free_i32(tmp
);
8748 case 1: /* msr spsr. */
8751 tmp
= load_reg(s
, rn
);
8753 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8757 case 2: /* cps, nop-hint. */
8758 if (((insn
>> 8) & 7) == 0) {
8759 gen_nop_hint(s
, insn
& 0xff);
8761 /* Implemented as NOP in user mode. */
8766 if (insn
& (1 << 10)) {
8767 if (insn
& (1 << 7))
8769 if (insn
& (1 << 6))
8771 if (insn
& (1 << 5))
8773 if (insn
& (1 << 9))
8774 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8776 if (insn
& (1 << 8)) {
8778 imm
|= (insn
& 0x1f);
8781 gen_set_psr_im(s
, offset
, 0, imm
);
8784 case 3: /* Special control operations. */
8786 op
= (insn
>> 4) & 0xf;
8794 /* These execute as NOPs. */
8801 /* Trivial implementation equivalent to bx. */
8802 tmp
= load_reg(s
, rn
);
8805 case 5: /* Exception return. */
8809 if (rn
!= 14 || rd
!= 15) {
8812 tmp
= load_reg(s
, rn
);
8813 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8814 gen_exception_return(s
, tmp
);
8816 case 6: /* mrs cpsr. */
8817 tmp
= tcg_temp_new_i32();
8819 addr
= tcg_const_i32(insn
& 0xff);
8820 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8821 tcg_temp_free_i32(addr
);
8823 gen_helper_cpsr_read(tmp
, cpu_env
);
8825 store_reg(s
, rd
, tmp
);
8827 case 7: /* mrs spsr. */
8828 /* Not accessible in user mode. */
8829 if (IS_USER(s
) || IS_M(env
))
8831 tmp
= load_cpu_field(spsr
);
8832 store_reg(s
, rd
, tmp
);
8837 /* Conditional branch. */
8838 op
= (insn
>> 22) & 0xf;
8839 /* Generate a conditional jump to next instruction. */
8840 s
->condlabel
= gen_new_label();
8841 gen_test_cc(op
^ 1, s
->condlabel
);
8844 /* offset[11:1] = insn[10:0] */
8845 offset
= (insn
& 0x7ff) << 1;
8846 /* offset[17:12] = insn[21:16]. */
8847 offset
|= (insn
& 0x003f0000) >> 4;
8848 /* offset[31:20] = insn[26]. */
8849 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8850 /* offset[18] = insn[13]. */
8851 offset
|= (insn
& (1 << 13)) << 5;
8852 /* offset[19] = insn[11]. */
8853 offset
|= (insn
& (1 << 11)) << 8;
8855 /* jump to the offset */
8856 gen_jmp(s
, s
->pc
+ offset
);
8859 /* Data processing immediate. */
8860 if (insn
& (1 << 25)) {
8861 if (insn
& (1 << 24)) {
8862 if (insn
& (1 << 20))
8864 /* Bitfield/Saturate. */
8865 op
= (insn
>> 21) & 7;
8867 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8869 tmp
= tcg_temp_new_i32();
8870 tcg_gen_movi_i32(tmp
, 0);
8872 tmp
= load_reg(s
, rn
);
8875 case 2: /* Signed bitfield extract. */
8877 if (shift
+ imm
> 32)
8880 gen_sbfx(tmp
, shift
, imm
);
8882 case 6: /* Unsigned bitfield extract. */
8884 if (shift
+ imm
> 32)
8887 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8889 case 3: /* Bitfield insert/clear. */
8892 imm
= imm
+ 1 - shift
;
8894 tmp2
= load_reg(s
, rd
);
8895 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, imm
);
8896 tcg_temp_free_i32(tmp2
);
8901 default: /* Saturate. */
8904 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8906 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8908 tmp2
= tcg_const_i32(imm
);
8911 if ((op
& 1) && shift
== 0)
8912 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
8914 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
8917 if ((op
& 1) && shift
== 0)
8918 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
8920 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
8922 tcg_temp_free_i32(tmp2
);
8925 store_reg(s
, rd
, tmp
);
8927 imm
= ((insn
& 0x04000000) >> 15)
8928 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8929 if (insn
& (1 << 22)) {
8930 /* 16-bit immediate. */
8931 imm
|= (insn
>> 4) & 0xf000;
8932 if (insn
& (1 << 23)) {
8934 tmp
= load_reg(s
, rd
);
8935 tcg_gen_ext16u_i32(tmp
, tmp
);
8936 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8939 tmp
= tcg_temp_new_i32();
8940 tcg_gen_movi_i32(tmp
, imm
);
8943 /* Add/sub 12-bit immediate. */
8945 offset
= s
->pc
& ~(uint32_t)3;
8946 if (insn
& (1 << 23))
8950 tmp
= tcg_temp_new_i32();
8951 tcg_gen_movi_i32(tmp
, offset
);
8953 tmp
= load_reg(s
, rn
);
8954 if (insn
& (1 << 23))
8955 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8957 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8960 store_reg(s
, rd
, tmp
);
8963 int shifter_out
= 0;
8964 /* modified 12-bit immediate. */
8965 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8966 imm
= (insn
& 0xff);
8969 /* Nothing to do. */
8971 case 1: /* 00XY00XY */
8974 case 2: /* XY00XY00 */
8978 case 3: /* XYXYXYXY */
8982 default: /* Rotated constant. */
8983 shift
= (shift
<< 1) | (imm
>> 7);
8985 imm
= imm
<< (32 - shift
);
8989 tmp2
= tcg_temp_new_i32();
8990 tcg_gen_movi_i32(tmp2
, imm
);
8991 rn
= (insn
>> 16) & 0xf;
8993 tmp
= tcg_temp_new_i32();
8994 tcg_gen_movi_i32(tmp
, 0);
8996 tmp
= load_reg(s
, rn
);
8998 op
= (insn
>> 21) & 0xf;
8999 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
9000 shifter_out
, tmp
, tmp2
))
9002 tcg_temp_free_i32(tmp2
);
9003 rd
= (insn
>> 8) & 0xf;
9005 store_reg(s
, rd
, tmp
);
9007 tcg_temp_free_i32(tmp
);
9012 case 12: /* Load/store single data item. */
9017 if ((insn
& 0x01100000) == 0x01000000) {
9018 if (disas_neon_ls_insn(env
, s
, insn
))
9022 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
9024 if (!(insn
& (1 << 20))) {
9028 /* Byte or halfword load space with dest == r15 : memory hints.
9029 * Catch them early so we don't emit pointless addressing code.
9030 * This space is a mix of:
9031 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9032 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9034 * unallocated hints, which must be treated as NOPs
9035 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9036 * which is easiest for the decoding logic
9037 * Some space which must UNDEF
9039 int op1
= (insn
>> 23) & 3;
9040 int op2
= (insn
>> 6) & 0x3f;
9045 /* UNPREDICTABLE, unallocated hint or
9046 * PLD/PLDW/PLI (literal)
9051 return 0; /* PLD/PLDW/PLI or unallocated hint */
9053 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
9054 return 0; /* PLD/PLDW/PLI or unallocated hint */
9056 /* UNDEF space, or an UNPREDICTABLE */
9062 addr
= tcg_temp_new_i32();
9064 /* s->pc has already been incremented by 4. */
9065 imm
= s
->pc
& 0xfffffffc;
9066 if (insn
& (1 << 23))
9067 imm
+= insn
& 0xfff;
9069 imm
-= insn
& 0xfff;
9070 tcg_gen_movi_i32(addr
, imm
);
9072 addr
= load_reg(s
, rn
);
9073 if (insn
& (1 << 23)) {
9074 /* Positive offset. */
9076 tcg_gen_addi_i32(addr
, addr
, imm
);
9079 switch ((insn
>> 8) & 0xf) {
9080 case 0x0: /* Shifted Register. */
9081 shift
= (insn
>> 4) & 0xf;
9083 tcg_temp_free_i32(addr
);
9086 tmp
= load_reg(s
, rm
);
9088 tcg_gen_shli_i32(tmp
, tmp
, shift
);
9089 tcg_gen_add_i32(addr
, addr
, tmp
);
9090 tcg_temp_free_i32(tmp
);
9092 case 0xc: /* Negative offset. */
9093 tcg_gen_addi_i32(addr
, addr
, -imm
);
9095 case 0xe: /* User privilege. */
9096 tcg_gen_addi_i32(addr
, addr
, imm
);
9099 case 0x9: /* Post-decrement. */
9102 case 0xb: /* Post-increment. */
9106 case 0xd: /* Pre-decrement. */
9109 case 0xf: /* Pre-increment. */
9110 tcg_gen_addi_i32(addr
, addr
, imm
);
9114 tcg_temp_free_i32(addr
);
9119 if (insn
& (1 << 20)) {
9121 tmp
= tcg_temp_new_i32();
9124 tcg_gen_qemu_ld8u(tmp
, addr
, user
);
9127 tcg_gen_qemu_ld8s(tmp
, addr
, user
);
9130 tcg_gen_qemu_ld16u(tmp
, addr
, user
);
9133 tcg_gen_qemu_ld16s(tmp
, addr
, user
);
9136 tcg_gen_qemu_ld32u(tmp
, addr
, user
);
9139 tcg_temp_free_i32(tmp
);
9140 tcg_temp_free_i32(addr
);
9146 store_reg(s
, rs
, tmp
);
9150 tmp
= load_reg(s
, rs
);
9153 tcg_gen_qemu_st8(tmp
, addr
, user
);
9156 tcg_gen_qemu_st16(tmp
, addr
, user
);
9159 tcg_gen_qemu_st32(tmp
, addr
, user
);
9162 tcg_temp_free_i32(tmp
);
9163 tcg_temp_free_i32(addr
);
9166 tcg_temp_free_i32(tmp
);
9169 tcg_gen_addi_i32(addr
, addr
, imm
);
9171 store_reg(s
, rn
, addr
);
9173 tcg_temp_free_i32(addr
);
9185 static void disas_thumb_insn(CPUARMState
*env
, DisasContext
*s
)
9187 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
9194 if (s
->condexec_mask
) {
9195 cond
= s
->condexec_cond
;
9196 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
9197 s
->condlabel
= gen_new_label();
9198 gen_test_cc(cond
^ 1, s
->condlabel
);
9203 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
9206 switch (insn
>> 12) {
9210 op
= (insn
>> 11) & 3;
9213 rn
= (insn
>> 3) & 7;
9214 tmp
= load_reg(s
, rn
);
9215 if (insn
& (1 << 10)) {
9217 tmp2
= tcg_temp_new_i32();
9218 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
9221 rm
= (insn
>> 6) & 7;
9222 tmp2
= load_reg(s
, rm
);
9224 if (insn
& (1 << 9)) {
9225 if (s
->condexec_mask
)
9226 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9228 gen_sub_CC(tmp
, tmp
, tmp2
);
9230 if (s
->condexec_mask
)
9231 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9233 gen_add_CC(tmp
, tmp
, tmp2
);
9235 tcg_temp_free_i32(tmp2
);
9236 store_reg(s
, rd
, tmp
);
9238 /* shift immediate */
9239 rm
= (insn
>> 3) & 7;
9240 shift
= (insn
>> 6) & 0x1f;
9241 tmp
= load_reg(s
, rm
);
9242 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
9243 if (!s
->condexec_mask
)
9245 store_reg(s
, rd
, tmp
);
9249 /* arithmetic large immediate */
9250 op
= (insn
>> 11) & 3;
9251 rd
= (insn
>> 8) & 0x7;
9252 if (op
== 0) { /* mov */
9253 tmp
= tcg_temp_new_i32();
9254 tcg_gen_movi_i32(tmp
, insn
& 0xff);
9255 if (!s
->condexec_mask
)
9257 store_reg(s
, rd
, tmp
);
9259 tmp
= load_reg(s
, rd
);
9260 tmp2
= tcg_temp_new_i32();
9261 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
9264 gen_sub_CC(tmp
, tmp
, tmp2
);
9265 tcg_temp_free_i32(tmp
);
9266 tcg_temp_free_i32(tmp2
);
9269 if (s
->condexec_mask
)
9270 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9272 gen_add_CC(tmp
, tmp
, tmp2
);
9273 tcg_temp_free_i32(tmp2
);
9274 store_reg(s
, rd
, tmp
);
9277 if (s
->condexec_mask
)
9278 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9280 gen_sub_CC(tmp
, tmp
, tmp2
);
9281 tcg_temp_free_i32(tmp2
);
9282 store_reg(s
, rd
, tmp
);
9288 if (insn
& (1 << 11)) {
9289 rd
= (insn
>> 8) & 7;
9290 /* load pc-relative. Bit 1 of PC is ignored. */
9291 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
9292 val
&= ~(uint32_t)2;
9293 addr
= tcg_temp_new_i32();
9294 tcg_gen_movi_i32(addr
, val
);
9295 tmp
= tcg_temp_new_i32();
9296 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
9297 tcg_temp_free_i32(addr
);
9298 store_reg(s
, rd
, tmp
);
9301 if (insn
& (1 << 10)) {
9302 /* data processing extended or blx */
9303 rd
= (insn
& 7) | ((insn
>> 4) & 8);
9304 rm
= (insn
>> 3) & 0xf;
9305 op
= (insn
>> 8) & 3;
9308 tmp
= load_reg(s
, rd
);
9309 tmp2
= load_reg(s
, rm
);
9310 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9311 tcg_temp_free_i32(tmp2
);
9312 store_reg(s
, rd
, tmp
);
9315 tmp
= load_reg(s
, rd
);
9316 tmp2
= load_reg(s
, rm
);
9317 gen_sub_CC(tmp
, tmp
, tmp2
);
9318 tcg_temp_free_i32(tmp2
);
9319 tcg_temp_free_i32(tmp
);
9321 case 2: /* mov/cpy */
9322 tmp
= load_reg(s
, rm
);
9323 store_reg(s
, rd
, tmp
);
9325 case 3:/* branch [and link] exchange thumb register */
9326 tmp
= load_reg(s
, rm
);
9327 if (insn
& (1 << 7)) {
9329 val
= (uint32_t)s
->pc
| 1;
9330 tmp2
= tcg_temp_new_i32();
9331 tcg_gen_movi_i32(tmp2
, val
);
9332 store_reg(s
, 14, tmp2
);
9334 /* already thumb, no need to check */
9341 /* data processing register */
9343 rm
= (insn
>> 3) & 7;
9344 op
= (insn
>> 6) & 0xf;
9345 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
9346 /* the shift/rotate ops want the operands backwards */
9355 if (op
== 9) { /* neg */
9356 tmp
= tcg_temp_new_i32();
9357 tcg_gen_movi_i32(tmp
, 0);
9358 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
9359 tmp
= load_reg(s
, rd
);
9361 TCGV_UNUSED_I32(tmp
);
9364 tmp2
= load_reg(s
, rm
);
9367 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9368 if (!s
->condexec_mask
)
9372 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
9373 if (!s
->condexec_mask
)
9377 if (s
->condexec_mask
) {
9378 gen_shl(tmp2
, tmp2
, tmp
);
9380 gen_helper_shl_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9385 if (s
->condexec_mask
) {
9386 gen_shr(tmp2
, tmp2
, tmp
);
9388 gen_helper_shr_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9393 if (s
->condexec_mask
) {
9394 gen_sar(tmp2
, tmp2
, tmp
);
9396 gen_helper_sar_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9401 if (s
->condexec_mask
) {
9404 gen_adc_CC(tmp
, tmp
, tmp2
);
9408 if (s
->condexec_mask
) {
9409 gen_sub_carry(tmp
, tmp
, tmp2
);
9411 gen_sbc_CC(tmp
, tmp
, tmp2
);
9415 if (s
->condexec_mask
) {
9416 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
9417 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
9419 gen_helper_ror_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9424 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9429 if (s
->condexec_mask
)
9430 tcg_gen_neg_i32(tmp
, tmp2
);
9432 gen_sub_CC(tmp
, tmp
, tmp2
);
9435 gen_sub_CC(tmp
, tmp
, tmp2
);
9439 gen_add_CC(tmp
, tmp
, tmp2
);
9443 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9444 if (!s
->condexec_mask
)
9448 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9449 if (!s
->condexec_mask
)
9453 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
9454 if (!s
->condexec_mask
)
9458 tcg_gen_not_i32(tmp2
, tmp2
);
9459 if (!s
->condexec_mask
)
9467 store_reg(s
, rm
, tmp2
);
9469 tcg_temp_free_i32(tmp
);
9471 store_reg(s
, rd
, tmp
);
9472 tcg_temp_free_i32(tmp2
);
9475 tcg_temp_free_i32(tmp
);
9476 tcg_temp_free_i32(tmp2
);
9481 /* load/store register offset. */
9483 rn
= (insn
>> 3) & 7;
9484 rm
= (insn
>> 6) & 7;
9485 op
= (insn
>> 9) & 7;
9486 addr
= load_reg(s
, rn
);
9487 tmp
= load_reg(s
, rm
);
9488 tcg_gen_add_i32(addr
, addr
, tmp
);
9489 tcg_temp_free_i32(tmp
);
9491 if (op
< 3) { /* store */
9492 tmp
= load_reg(s
, rd
);
9494 tmp
= tcg_temp_new_i32();
9499 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
9502 tcg_gen_qemu_st16(tmp
, addr
, IS_USER(s
));
9505 tcg_gen_qemu_st8(tmp
, addr
, IS_USER(s
));
9508 tcg_gen_qemu_ld8s(tmp
, addr
, IS_USER(s
));
9511 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
9514 tcg_gen_qemu_ld16u(tmp
, addr
, IS_USER(s
));
9517 tcg_gen_qemu_ld8u(tmp
, addr
, IS_USER(s
));
9520 tcg_gen_qemu_ld16s(tmp
, addr
, IS_USER(s
));
9523 if (op
>= 3) { /* load */
9524 store_reg(s
, rd
, tmp
);
9526 tcg_temp_free_i32(tmp
);
9528 tcg_temp_free_i32(addr
);
9532 /* load/store word immediate offset */
9534 rn
= (insn
>> 3) & 7;
9535 addr
= load_reg(s
, rn
);
9536 val
= (insn
>> 4) & 0x7c;
9537 tcg_gen_addi_i32(addr
, addr
, val
);
9539 if (insn
& (1 << 11)) {
9541 tmp
= tcg_temp_new_i32();
9542 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
9543 store_reg(s
, rd
, tmp
);
9546 tmp
= load_reg(s
, rd
);
9547 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
9548 tcg_temp_free_i32(tmp
);
9550 tcg_temp_free_i32(addr
);
9554 /* load/store byte immediate offset */
9556 rn
= (insn
>> 3) & 7;
9557 addr
= load_reg(s
, rn
);
9558 val
= (insn
>> 6) & 0x1f;
9559 tcg_gen_addi_i32(addr
, addr
, val
);
9561 if (insn
& (1 << 11)) {
9563 tmp
= tcg_temp_new_i32();
9564 tcg_gen_qemu_ld8u(tmp
, addr
, IS_USER(s
));
9565 store_reg(s
, rd
, tmp
);
9568 tmp
= load_reg(s
, rd
);
9569 tcg_gen_qemu_st8(tmp
, addr
, IS_USER(s
));
9570 tcg_temp_free_i32(tmp
);
9572 tcg_temp_free_i32(addr
);
9576 /* load/store halfword immediate offset */
9578 rn
= (insn
>> 3) & 7;
9579 addr
= load_reg(s
, rn
);
9580 val
= (insn
>> 5) & 0x3e;
9581 tcg_gen_addi_i32(addr
, addr
, val
);
9583 if (insn
& (1 << 11)) {
9585 tmp
= tcg_temp_new_i32();
9586 tcg_gen_qemu_ld16u(tmp
, addr
, IS_USER(s
));
9587 store_reg(s
, rd
, tmp
);
9590 tmp
= load_reg(s
, rd
);
9591 tcg_gen_qemu_st16(tmp
, addr
, IS_USER(s
));
9592 tcg_temp_free_i32(tmp
);
9594 tcg_temp_free_i32(addr
);
9598 /* load/store from stack */
9599 rd
= (insn
>> 8) & 7;
9600 addr
= load_reg(s
, 13);
9601 val
= (insn
& 0xff) * 4;
9602 tcg_gen_addi_i32(addr
, addr
, val
);
9604 if (insn
& (1 << 11)) {
9606 tmp
= tcg_temp_new_i32();
9607 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
9608 store_reg(s
, rd
, tmp
);
9611 tmp
= load_reg(s
, rd
);
9612 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
9613 tcg_temp_free_i32(tmp
);
9615 tcg_temp_free_i32(addr
);
9619 /* add to high reg */
9620 rd
= (insn
>> 8) & 7;
9621 if (insn
& (1 << 11)) {
9623 tmp
= load_reg(s
, 13);
9625 /* PC. bit 1 is ignored. */
9626 tmp
= tcg_temp_new_i32();
9627 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
9629 val
= (insn
& 0xff) * 4;
9630 tcg_gen_addi_i32(tmp
, tmp
, val
);
9631 store_reg(s
, rd
, tmp
);
9636 op
= (insn
>> 8) & 0xf;
9639 /* adjust stack pointer */
9640 tmp
= load_reg(s
, 13);
9641 val
= (insn
& 0x7f) * 4;
9642 if (insn
& (1 << 7))
9643 val
= -(int32_t)val
;
9644 tcg_gen_addi_i32(tmp
, tmp
, val
);
9645 store_reg(s
, 13, tmp
);
9648 case 2: /* sign/zero extend. */
9651 rm
= (insn
>> 3) & 7;
9652 tmp
= load_reg(s
, rm
);
9653 switch ((insn
>> 6) & 3) {
9654 case 0: gen_sxth(tmp
); break;
9655 case 1: gen_sxtb(tmp
); break;
9656 case 2: gen_uxth(tmp
); break;
9657 case 3: gen_uxtb(tmp
); break;
9659 store_reg(s
, rd
, tmp
);
9661 case 4: case 5: case 0xc: case 0xd:
9663 addr
= load_reg(s
, 13);
9664 if (insn
& (1 << 8))
9668 for (i
= 0; i
< 8; i
++) {
9669 if (insn
& (1 << i
))
9672 if ((insn
& (1 << 11)) == 0) {
9673 tcg_gen_addi_i32(addr
, addr
, -offset
);
9675 for (i
= 0; i
< 8; i
++) {
9676 if (insn
& (1 << i
)) {
9677 if (insn
& (1 << 11)) {
9679 tmp
= tcg_temp_new_i32();
9680 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
9681 store_reg(s
, i
, tmp
);
9684 tmp
= load_reg(s
, i
);
9685 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
9686 tcg_temp_free_i32(tmp
);
9688 /* advance to the next address. */
9689 tcg_gen_addi_i32(addr
, addr
, 4);
9692 TCGV_UNUSED_I32(tmp
);
9693 if (insn
& (1 << 8)) {
9694 if (insn
& (1 << 11)) {
9696 tmp
= tcg_temp_new_i32();
9697 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
9698 /* don't set the pc until the rest of the instruction
9702 tmp
= load_reg(s
, 14);
9703 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
9704 tcg_temp_free_i32(tmp
);
9706 tcg_gen_addi_i32(addr
, addr
, 4);
9708 if ((insn
& (1 << 11)) == 0) {
9709 tcg_gen_addi_i32(addr
, addr
, -offset
);
9711 /* write back the new stack pointer */
9712 store_reg(s
, 13, addr
);
9713 /* set the new PC value */
9714 if ((insn
& 0x0900) == 0x0900) {
9715 store_reg_from_load(env
, s
, 15, tmp
);
9719 case 1: case 3: case 9: case 11: /* czb */
9721 tmp
= load_reg(s
, rm
);
9722 s
->condlabel
= gen_new_label();
9724 if (insn
& (1 << 11))
9725 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9727 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9728 tcg_temp_free_i32(tmp
);
9729 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9730 val
= (uint32_t)s
->pc
+ 2;
9735 case 15: /* IT, nop-hint. */
9736 if ((insn
& 0xf) == 0) {
9737 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9741 s
->condexec_cond
= (insn
>> 4) & 0xe;
9742 s
->condexec_mask
= insn
& 0x1f;
9743 /* No actual code generated for this insn, just setup state. */
9746 case 0xe: /* bkpt */
9748 gen_exception_insn(s
, 2, EXCP_BKPT
);
9753 rn
= (insn
>> 3) & 0x7;
9755 tmp
= load_reg(s
, rn
);
9756 switch ((insn
>> 6) & 3) {
9757 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9758 case 1: gen_rev16(tmp
); break;
9759 case 3: gen_revsh(tmp
); break;
9760 default: goto illegal_op
;
9762 store_reg(s
, rd
, tmp
);
9766 switch ((insn
>> 5) & 7) {
9770 if (((insn
>> 3) & 1) != s
->bswap_code
) {
9771 /* Dynamic endianness switching not implemented. */
9782 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9785 addr
= tcg_const_i32(19);
9786 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9787 tcg_temp_free_i32(addr
);
9791 addr
= tcg_const_i32(16);
9792 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9793 tcg_temp_free_i32(addr
);
9795 tcg_temp_free_i32(tmp
);
9798 if (insn
& (1 << 4)) {
9799 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9803 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9818 /* load/store multiple */
9819 TCGv_i32 loaded_var
;
9820 TCGV_UNUSED_I32(loaded_var
);
9821 rn
= (insn
>> 8) & 0x7;
9822 addr
= load_reg(s
, rn
);
9823 for (i
= 0; i
< 8; i
++) {
9824 if (insn
& (1 << i
)) {
9825 if (insn
& (1 << 11)) {
9827 tmp
= tcg_temp_new_i32();
9828 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
9832 store_reg(s
, i
, tmp
);
9836 tmp
= load_reg(s
, i
);
9837 tcg_gen_qemu_st32(tmp
, addr
, IS_USER(s
));
9838 tcg_temp_free_i32(tmp
);
9840 /* advance to the next address */
9841 tcg_gen_addi_i32(addr
, addr
, 4);
9844 if ((insn
& (1 << rn
)) == 0) {
9845 /* base reg not in list: base register writeback */
9846 store_reg(s
, rn
, addr
);
9848 /* base reg in list: if load, complete it now */
9849 if (insn
& (1 << 11)) {
9850 store_reg(s
, rn
, loaded_var
);
9852 tcg_temp_free_i32(addr
);
9857 /* conditional branch or swi */
9858 cond
= (insn
>> 8) & 0xf;
9864 gen_set_pc_im(s
->pc
);
9865 s
->is_jmp
= DISAS_SWI
;
9868 /* generate a conditional jump to next instruction */
9869 s
->condlabel
= gen_new_label();
9870 gen_test_cc(cond
^ 1, s
->condlabel
);
9873 /* jump to the offset */
9874 val
= (uint32_t)s
->pc
+ 2;
9875 offset
= ((int32_t)insn
<< 24) >> 24;
9881 if (insn
& (1 << 11)) {
9882 if (disas_thumb2_insn(env
, s
, insn
))
9886 /* unconditional branch */
9887 val
= (uint32_t)s
->pc
;
9888 offset
= ((int32_t)insn
<< 21) >> 21;
9889 val
+= (offset
<< 1) + 2;
9894 if (disas_thumb2_insn(env
, s
, insn
))
9900 gen_exception_insn(s
, 4, EXCP_UDEF
);
9904 gen_exception_insn(s
, 2, EXCP_UDEF
);
9907 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9908 basic block 'tb'. If search_pc is TRUE, also generate PC
9909 information for each intermediate instruction. */
9910 static inline void gen_intermediate_code_internal(ARMCPU
*cpu
,
9911 TranslationBlock
*tb
,
9914 CPUState
*cs
= CPU(cpu
);
9915 CPUARMState
*env
= &cpu
->env
;
9916 DisasContext dc1
, *dc
= &dc1
;
9918 uint16_t *gen_opc_end
;
9920 target_ulong pc_start
;
9921 uint32_t next_page_start
;
9925 /* generate intermediate code */
9930 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
9932 dc
->is_jmp
= DISAS_NEXT
;
9934 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
9936 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9937 dc
->bswap_code
= ARM_TBFLAG_BSWAP_CODE(tb
->flags
);
9938 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9939 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9940 #if !defined(CONFIG_USER_ONLY)
9941 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9943 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9944 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9945 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9946 cpu_F0s
= tcg_temp_new_i32();
9947 cpu_F1s
= tcg_temp_new_i32();
9948 cpu_F0d
= tcg_temp_new_i64();
9949 cpu_F1d
= tcg_temp_new_i64();
9952 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9953 cpu_M0
= tcg_temp_new_i64();
9954 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9957 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9959 max_insns
= CF_COUNT_MASK
;
9963 tcg_clear_temp_count();
9965 /* A note on handling of the condexec (IT) bits:
9967 * We want to avoid the overhead of having to write the updated condexec
9968 * bits back to the CPUARMState for every instruction in an IT block. So:
9969 * (1) if the condexec bits are not already zero then we write
9970 * zero back into the CPUARMState now. This avoids complications trying
9971 * to do it at the end of the block. (For example if we don't do this
9972 * it's hard to identify whether we can safely skip writing condexec
9973 * at the end of the TB, which we definitely want to do for the case
9974 * where a TB doesn't do anything with the IT state at all.)
9975 * (2) if we are going to leave the TB then we call gen_set_condexec()
9976 * which will write the correct value into CPUARMState if zero is wrong.
9977 * This is done both for leaving the TB at the end, and for leaving
9978 * it because of an exception we know will happen, which is done in
9979 * gen_exception_insn(). The latter is necessary because we need to
9980 * leave the TB with the PC/IT state just prior to execution of the
9981 * instruction which caused the exception.
9982 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9983 * then the CPUARMState will be wrong and we need to reset it.
9984 * This is handled in the same way as restoration of the
9985 * PC in these situations: we will be called again with search_pc=1
9986 * and generate a mapping of the condexec bits for each PC in
9987 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9988 * this to restore the condexec bits.
9990 * Note that there are no instructions which can read the condexec
9991 * bits, and none which can write non-static values to them, so
9992 * we don't need to care about whether CPUARMState is correct in the
9996 /* Reset the conditional execution bits immediately. This avoids
9997 complications trying to do it at the end of the block. */
9998 if (dc
->condexec_mask
|| dc
->condexec_cond
)
10000 TCGv_i32 tmp
= tcg_temp_new_i32();
10001 tcg_gen_movi_i32(tmp
, 0);
10002 store_cpu_field(tmp
, condexec_bits
);
10005 #ifdef CONFIG_USER_ONLY
10006 /* Intercept jump to the magic kernel page. */
10007 if (dc
->pc
>= 0xffff0000) {
10008 /* We always get here via a jump, so know we are not in a
10009 conditional execution block. */
10010 gen_exception(EXCP_KERNEL_TRAP
);
10011 dc
->is_jmp
= DISAS_UPDATE
;
10015 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
10016 /* We always get here via a jump, so know we are not in a
10017 conditional execution block. */
10018 gen_exception(EXCP_EXCEPTION_EXIT
);
10019 dc
->is_jmp
= DISAS_UPDATE
;
10024 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
10025 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
10026 if (bp
->pc
== dc
->pc
) {
10027 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
10028 /* Advance PC so that clearing the breakpoint will
10029 invalidate this TB. */
10031 goto done_generating
;
10036 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
10040 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
10042 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
10043 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
10044 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
10045 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
10048 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
10051 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
10052 tcg_gen_debug_insn_start(dc
->pc
);
10056 disas_thumb_insn(env
, dc
);
10057 if (dc
->condexec_mask
) {
10058 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
10059 | ((dc
->condexec_mask
>> 4) & 1);
10060 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
10061 if (dc
->condexec_mask
== 0) {
10062 dc
->condexec_cond
= 0;
10066 disas_arm_insn(env
, dc
);
10069 if (dc
->condjmp
&& !dc
->is_jmp
) {
10070 gen_set_label(dc
->condlabel
);
10074 if (tcg_check_temp_count()) {
10075 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
10078 /* Translation stops when a conditional branch is encountered.
10079 * Otherwise the subsequent code could get translated several times.
10080 * Also stop translation when a page boundary is reached. This
10081 * ensures prefetch aborts occur at the right place. */
10083 } while (!dc
->is_jmp
&& tcg_ctx
.gen_opc_ptr
< gen_opc_end
&&
10084 !cs
->singlestep_enabled
&&
10086 dc
->pc
< next_page_start
&&
10087 num_insns
< max_insns
);
10089 if (tb
->cflags
& CF_LAST_IO
) {
10091 /* FIXME: This can theoretically happen with self-modifying
10093 cpu_abort(env
, "IO on conditional branch instruction");
10098 /* At this stage dc->condjmp will only be set when the skipped
10099 instruction was a conditional branch or trap, and the PC has
10100 already been written. */
10101 if (unlikely(cs
->singlestep_enabled
)) {
10102 /* Make sure the pc is updated, and raise a debug exception. */
10104 gen_set_condexec(dc
);
10105 if (dc
->is_jmp
== DISAS_SWI
) {
10106 gen_exception(EXCP_SWI
);
10108 gen_exception(EXCP_DEBUG
);
10110 gen_set_label(dc
->condlabel
);
10112 if (dc
->condjmp
|| !dc
->is_jmp
) {
10113 gen_set_pc_im(dc
->pc
);
10116 gen_set_condexec(dc
);
10117 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
10118 gen_exception(EXCP_SWI
);
10120 /* FIXME: Single stepping a WFI insn will not halt
10122 gen_exception(EXCP_DEBUG
);
10125 /* While branches must always occur at the end of an IT block,
10126 there are a few other things that can cause us to terminate
10127 the TB in the middle of an IT block:
10128 - Exception generating instructions (bkpt, swi, undefined).
10130 - Hardware watchpoints.
10131 Hardware breakpoints have already been handled and skip this code.
10133 gen_set_condexec(dc
);
10134 switch(dc
->is_jmp
) {
10136 gen_goto_tb(dc
, 1, dc
->pc
);
10141 /* indicate that the hash table must be used to find the next TB */
10142 tcg_gen_exit_tb(0);
10144 case DISAS_TB_JUMP
:
10145 /* nothing more to generate */
10148 gen_helper_wfi(cpu_env
);
10151 gen_exception(EXCP_SWI
);
10155 gen_set_label(dc
->condlabel
);
10156 gen_set_condexec(dc
);
10157 gen_goto_tb(dc
, 1, dc
->pc
);
10163 gen_tb_end(tb
, num_insns
);
10164 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
10167 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
10168 qemu_log("----------------\n");
10169 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
10170 log_target_disas(env
, pc_start
, dc
->pc
- pc_start
,
10171 dc
->thumb
| (dc
->bswap_code
<< 1));
10176 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
10179 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
10181 tb
->size
= dc
->pc
- pc_start
;
10182 tb
->icount
= num_insns
;
10186 void gen_intermediate_code(CPUARMState
*env
, TranslationBlock
*tb
)
10188 gen_intermediate_code_internal(arm_env_get_cpu(env
), tb
, false);
10191 void gen_intermediate_code_pc(CPUARMState
*env
, TranslationBlock
*tb
)
10193 gen_intermediate_code_internal(arm_env_get_cpu(env
), tb
, true);
10196 static const char *cpu_mode_names
[16] = {
10197 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10198 "???", "???", "???", "und", "???", "???", "???", "sys"
10201 void arm_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
10204 ARMCPU
*cpu
= ARM_CPU(cs
);
10205 CPUARMState
*env
= &cpu
->env
;
10209 for(i
=0;i
<16;i
++) {
10210 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
10212 cpu_fprintf(f
, "\n");
10214 cpu_fprintf(f
, " ");
10216 psr
= cpsr_read(env
);
10217 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
10219 psr
& (1 << 31) ? 'N' : '-',
10220 psr
& (1 << 30) ? 'Z' : '-',
10221 psr
& (1 << 29) ? 'C' : '-',
10222 psr
& (1 << 28) ? 'V' : '-',
10223 psr
& CPSR_T
? 'T' : 'A',
10224 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
10226 if (flags
& CPU_DUMP_FPU
) {
10227 int numvfpregs
= 0;
10228 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
10231 if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
10234 for (i
= 0; i
< numvfpregs
; i
++) {
10235 uint64_t v
= float64_val(env
->vfp
.regs
[i
]);
10236 cpu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
10237 i
* 2, (uint32_t)v
,
10238 i
* 2 + 1, (uint32_t)(v
>> 32),
10241 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
10245 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
, int pc_pos
)
10247 env
->regs
[15] = tcg_ctx
.gen_opc_pc
[pc_pos
];
10248 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];