4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48 /* internal defines */
49 typedef struct DisasContext
{
52 /* Nonzero if this instruction has been conditionally skipped. */
54 /* The label that will be jumped to when the instruction is skipped. */
56 /* Thumb-2 condtional execution bits. */
59 struct TranslationBlock
*tb
;
60 int singlestep_enabled
;
63 #if !defined(CONFIG_USER_ONLY)
71 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
73 #if defined(CONFIG_USER_ONLY)
76 #define IS_USER(s) (s->user)
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
84 static TCGv_ptr cpu_env
;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
87 static TCGv_i32 cpu_R
[16];
88 static TCGv_i32 cpu_exclusive_addr
;
89 static TCGv_i32 cpu_exclusive_val
;
90 static TCGv_i32 cpu_exclusive_high
;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test
;
93 static TCGv_i32 cpu_exclusive_info
;
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s
, cpu_F1s
;
98 static TCGv_i64 cpu_F0d
, cpu_F1d
;
100 #include "gen-icount.h"
102 static const char *regnames
[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
111 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
113 for (i
= 0; i
< 16; i
++) {
114 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUARMState
, regs
[i
]),
118 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
120 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
121 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
122 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
123 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
126 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
127 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
128 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
135 static inline TCGv
load_cpu_offset(int offset
)
137 TCGv tmp
= tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
144 static inline void store_cpu_offset(TCGv var
, int offset
)
146 tcg_gen_st_i32(var
, cpu_env
, offset
);
147 tcg_temp_free_i32(var
);
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUARMState, name))
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
158 /* normaly, since we updated PC, we need only to add one insn */
160 addr
= (long)s
->pc
+ 2;
162 addr
= (long)s
->pc
+ 4;
163 tcg_gen_movi_i32(var
, addr
);
165 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
172 TCGv tmp
= tcg_temp_new_i32();
173 load_reg_var(s
, tmp
, reg
);
177 /* Set a CPU register. The source must be a temporary and will be
179 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
182 tcg_gen_andi_i32(var
, var
, ~1);
183 s
->is_jmp
= DISAS_JUMP
;
185 tcg_gen_mov_i32(cpu_R
[reg
], var
);
186 tcg_temp_free_i32(var
);
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
199 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
201 TCGv tmp_mask
= tcg_const_i32(mask
);
202 gen_helper_cpsr_write(var
, tmp_mask
);
203 tcg_temp_free_i32(tmp_mask
);
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
208 static void gen_exception(int excp
)
210 TCGv tmp
= tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp
, excp
);
212 gen_helper_exception(tmp
);
213 tcg_temp_free_i32(tmp
);
216 static void gen_smul_dual(TCGv a
, TCGv b
)
218 TCGv tmp1
= tcg_temp_new_i32();
219 TCGv tmp2
= tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1
, a
);
221 tcg_gen_ext16s_i32(tmp2
, b
);
222 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
223 tcg_temp_free_i32(tmp2
);
224 tcg_gen_sari_i32(a
, a
, 16);
225 tcg_gen_sari_i32(b
, b
, 16);
226 tcg_gen_mul_i32(b
, b
, a
);
227 tcg_gen_mov_i32(a
, tmp1
);
228 tcg_temp_free_i32(tmp1
);
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var
)
234 TCGv tmp
= tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp
, var
, 8);
236 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
237 tcg_gen_shli_i32(var
, var
, 8);
238 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
239 tcg_gen_or_i32(var
, var
, tmp
);
240 tcg_temp_free_i32(tmp
);
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var
)
246 tcg_gen_ext16u_i32(var
, var
);
247 tcg_gen_bswap16_i32(var
, var
);
248 tcg_gen_ext16s_i32(var
, var
);
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
255 tcg_gen_shri_i32(var
, var
, shift
);
256 tcg_gen_andi_i32(var
, var
, mask
);
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var
, int shift
, int width
)
265 tcg_gen_sari_i32(var
, var
, shift
);
266 if (shift
+ width
< 32) {
267 signbit
= 1u << (width
- 1);
268 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
269 tcg_gen_xori_i32(var
, var
, signbit
);
270 tcg_gen_subi_i32(var
, var
, signbit
);
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
277 tcg_gen_andi_i32(val
, val
, mask
);
278 tcg_gen_shli_i32(val
, val
, shift
);
279 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
280 tcg_gen_or_i32(dest
, base
, val
);
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
286 TCGv_i64 tmp64
= tcg_temp_new_i64();
288 tcg_gen_extu_i32_i64(tmp64
, b
);
289 tcg_temp_free_i32(b
);
290 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
291 tcg_gen_add_i64(a
, tmp64
, a
);
293 tcg_temp_free_i64(tmp64
);
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
300 TCGv_i64 tmp64
= tcg_temp_new_i64();
302 tcg_gen_extu_i32_i64(tmp64
, b
);
303 tcg_temp_free_i32(b
);
304 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
305 tcg_gen_sub_i64(a
, tmp64
, a
);
307 tcg_temp_free_i64(tmp64
);
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
316 TCGv_i64 tmp1
= tcg_temp_new_i64();
317 TCGv_i64 tmp2
= tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1
, a
);
320 tcg_temp_free_i32(a
);
321 tcg_gen_extu_i32_i64(tmp2
, b
);
322 tcg_temp_free_i32(b
);
323 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
324 tcg_temp_free_i64(tmp2
);
328 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
330 TCGv_i64 tmp1
= tcg_temp_new_i64();
331 TCGv_i64 tmp2
= tcg_temp_new_i64();
333 tcg_gen_ext_i32_i64(tmp1
, a
);
334 tcg_temp_free_i32(a
);
335 tcg_gen_ext_i32_i64(tmp2
, b
);
336 tcg_temp_free_i32(b
);
337 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
338 tcg_temp_free_i64(tmp2
);
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var
)
345 TCGv tmp
= tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp
, var
, 16);
347 tcg_gen_shli_i32(var
, var
, 16);
348 tcg_gen_or_i32(var
, var
, tmp
);
349 tcg_temp_free_i32(tmp
);
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
356 t0 = (t0 + t1) ^ tmp;
359 static void gen_add16(TCGv t0
, TCGv t1
)
361 TCGv tmp
= tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp
, t0
, t1
);
363 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
364 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
365 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
366 tcg_gen_add_i32(t0
, t0
, t1
);
367 tcg_gen_xor_i32(t0
, t0
, tmp
);
368 tcg_temp_free_i32(tmp
);
369 tcg_temp_free_i32(t1
);
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF))
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var
)
377 TCGv tmp
= tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp
, var
, 31);
380 tcg_temp_free_i32(tmp
);
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var
)
386 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, NF
));
387 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, ZF
));
391 static void gen_adc(TCGv t0
, TCGv t1
)
394 tcg_gen_add_i32(t0
, t0
, t1
);
395 tmp
= load_cpu_field(CF
);
396 tcg_gen_add_i32(t0
, t0
, tmp
);
397 tcg_temp_free_i32(tmp
);
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
404 tcg_gen_add_i32(dest
, t0
, t1
);
405 tmp
= load_cpu_field(CF
);
406 tcg_gen_add_i32(dest
, dest
, tmp
);
407 tcg_temp_free_i32(tmp
);
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
414 tcg_gen_sub_i32(dest
, t0
, t1
);
415 tmp
= load_cpu_field(CF
);
416 tcg_gen_add_i32(dest
, dest
, tmp
);
417 tcg_gen_subi_i32(dest
, dest
, 1);
418 tcg_temp_free_i32(tmp
);
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
424 static void shifter_out_im(TCGv var
, int shift
)
426 TCGv tmp
= tcg_temp_new_i32();
428 tcg_gen_andi_i32(tmp
, var
, 1);
430 tcg_gen_shri_i32(tmp
, var
, shift
);
432 tcg_gen_andi_i32(tmp
, tmp
, 1);
435 tcg_temp_free_i32(tmp
);
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
445 shifter_out_im(var
, 32 - shift
);
446 tcg_gen_shli_i32(var
, var
, shift
);
452 tcg_gen_shri_i32(var
, var
, 31);
455 tcg_gen_movi_i32(var
, 0);
458 shifter_out_im(var
, shift
- 1);
459 tcg_gen_shri_i32(var
, var
, shift
);
466 shifter_out_im(var
, shift
- 1);
469 tcg_gen_sari_i32(var
, var
, shift
);
471 case 3: /* ROR/RRX */
474 shifter_out_im(var
, shift
- 1);
475 tcg_gen_rotri_i32(var
, var
, shift
); break;
477 TCGv tmp
= load_cpu_field(CF
);
479 shifter_out_im(var
, 0);
480 tcg_gen_shri_i32(var
, var
, 1);
481 tcg_gen_shli_i32(tmp
, tmp
, 31);
482 tcg_gen_or_i32(var
, var
, tmp
);
483 tcg_temp_free_i32(tmp
);
488 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
489 TCGv shift
, int flags
)
493 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
494 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
495 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
496 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
500 case 0: gen_helper_shl(var
, var
, shift
); break;
501 case 1: gen_helper_shr(var
, var
, shift
); break;
502 case 2: gen_helper_sar(var
, var
, shift
); break;
503 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
504 tcg_gen_rotr_i32(var
, var
, shift
); break;
507 tcg_temp_free_i32(shift
);
510 #define PAS_OP(pfx) \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
519 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
526 tmp
= tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
529 tcg_temp_free_ptr(tmp
);
532 tmp
= tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
535 tcg_temp_free_ptr(tmp
);
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 #undef gen_pas_helper
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
566 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
573 tmp
= tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
576 tcg_temp_free_ptr(tmp
);
579 tmp
= tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
582 tcg_temp_free_ptr(tmp
);
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 #undef gen_pas_helper
603 static void gen_test_cc(int cc
, int label
)
611 tmp
= load_cpu_field(ZF
);
612 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
615 tmp
= load_cpu_field(ZF
);
616 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
619 tmp
= load_cpu_field(CF
);
620 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
623 tmp
= load_cpu_field(CF
);
624 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
627 tmp
= load_cpu_field(NF
);
628 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
631 tmp
= load_cpu_field(NF
);
632 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
635 tmp
= load_cpu_field(VF
);
636 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
639 tmp
= load_cpu_field(VF
);
640 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
642 case 8: /* hi: C && !Z */
643 inv
= gen_new_label();
644 tmp
= load_cpu_field(CF
);
645 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
646 tcg_temp_free_i32(tmp
);
647 tmp
= load_cpu_field(ZF
);
648 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
651 case 9: /* ls: !C || Z */
652 tmp
= load_cpu_field(CF
);
653 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
654 tcg_temp_free_i32(tmp
);
655 tmp
= load_cpu_field(ZF
);
656 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp
= load_cpu_field(VF
);
660 tmp2
= load_cpu_field(NF
);
661 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
662 tcg_temp_free_i32(tmp2
);
663 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp
= load_cpu_field(VF
);
667 tmp2
= load_cpu_field(NF
);
668 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
669 tcg_temp_free_i32(tmp2
);
670 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
672 case 12: /* gt: !Z && N == V */
673 inv
= gen_new_label();
674 tmp
= load_cpu_field(ZF
);
675 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
676 tcg_temp_free_i32(tmp
);
677 tmp
= load_cpu_field(VF
);
678 tmp2
= load_cpu_field(NF
);
679 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
680 tcg_temp_free_i32(tmp2
);
681 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
684 case 13: /* le: Z || N != V */
685 tmp
= load_cpu_field(ZF
);
686 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
687 tcg_temp_free_i32(tmp
);
688 tmp
= load_cpu_field(VF
);
689 tmp2
= load_cpu_field(NF
);
690 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
691 tcg_temp_free_i32(tmp2
);
692 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
695 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
698 tcg_temp_free_i32(tmp
);
701 static const uint8_t table_logic_cc
[16] = {
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
725 s
->is_jmp
= DISAS_UPDATE
;
726 if (s
->thumb
!= (addr
& 1)) {
727 tmp
= tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp
, addr
& 1);
729 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
730 tcg_temp_free_i32(tmp
);
732 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext
*s
, TCGv var
)
738 s
->is_jmp
= DISAS_UPDATE
;
739 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
740 tcg_gen_andi_i32(var
, var
, 1);
741 store_cpu_field(var
, thumb
);
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUARMState
*env
, DisasContext
*s
,
750 if (reg
== 15 && ENABLE_ARCH_7
) {
753 store_reg(s
, reg
, var
);
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUARMState
*env
, DisasContext
*s
,
764 if (reg
== 15 && ENABLE_ARCH_5
) {
767 store_reg(s
, reg
, var
);
771 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
773 TCGv tmp
= tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
777 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
779 TCGv tmp
= tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
783 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
785 TCGv tmp
= tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
789 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
791 TCGv tmp
= tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
795 static inline TCGv
gen_ld32(TCGv addr
, int index
)
797 TCGv tmp
= tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
801 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
803 TCGv_i64 tmp
= tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp
, addr
, index
);
807 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
809 tcg_gen_qemu_st8(val
, addr
, index
);
810 tcg_temp_free_i32(val
);
812 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
814 tcg_gen_qemu_st16(val
, addr
, index
);
815 tcg_temp_free_i32(val
);
817 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
819 tcg_gen_qemu_st32(val
, addr
, index
);
820 tcg_temp_free_i32(val
);
822 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
824 tcg_gen_qemu_st64(val
, addr
, index
);
825 tcg_temp_free_i64(val
);
828 static inline void gen_set_pc_im(uint32_t val
)
830 tcg_gen_movi_i32(cpu_R
[15], val
);
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext
*s
)
836 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
837 s
->is_jmp
= DISAS_UPDATE
;
840 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
843 int val
, rm
, shift
, shiftop
;
846 if (!(insn
& (1 << 25))) {
849 if (!(insn
& (1 << 23)))
852 tcg_gen_addi_i32(var
, var
, val
);
856 shift
= (insn
>> 7) & 0x1f;
857 shiftop
= (insn
>> 5) & 3;
858 offset
= load_reg(s
, rm
);
859 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
860 if (!(insn
& (1 << 23)))
861 tcg_gen_sub_i32(var
, var
, offset
);
863 tcg_gen_add_i32(var
, var
, offset
);
864 tcg_temp_free_i32(offset
);
868 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
874 if (insn
& (1 << 22)) {
876 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
877 if (!(insn
& (1 << 23)))
881 tcg_gen_addi_i32(var
, var
, val
);
885 tcg_gen_addi_i32(var
, var
, extra
);
887 offset
= load_reg(s
, rm
);
888 if (!(insn
& (1 << 23)))
889 tcg_gen_sub_i32(var
, var
, offset
);
891 tcg_gen_add_i32(var
, var
, offset
);
892 tcg_temp_free_i32(offset
);
896 static TCGv_ptr
get_fpstatus_ptr(int neon
)
898 TCGv_ptr statusptr
= tcg_temp_new_ptr();
901 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
903 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
905 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
909 #define VFP_OP2(name) \
910 static inline void gen_vfp_##name(int dp) \
912 TCGv_ptr fpst = get_fpstatus_ptr(0); \
914 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
916 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
918 tcg_temp_free_ptr(fpst); \
928 static inline void gen_vfp_F1_mul(int dp
)
930 /* Like gen_vfp_mul() but put result in F1 */
931 TCGv_ptr fpst
= get_fpstatus_ptr(0);
933 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
935 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
937 tcg_temp_free_ptr(fpst
);
940 static inline void gen_vfp_F1_neg(int dp
)
942 /* Like gen_vfp_neg() but put result in F1 */
944 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
946 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
950 static inline void gen_vfp_abs(int dp
)
953 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
955 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
958 static inline void gen_vfp_neg(int dp
)
961 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
963 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
966 static inline void gen_vfp_sqrt(int dp
)
969 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
971 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
974 static inline void gen_vfp_cmp(int dp
)
977 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
979 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
982 static inline void gen_vfp_cmpe(int dp
)
985 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
987 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
990 static inline void gen_vfp_F1_ld0(int dp
)
993 tcg_gen_movi_i64(cpu_F1d
, 0);
995 tcg_gen_movi_i32(cpu_F1s
, 0);
998 #define VFP_GEN_ITOF(name) \
999 static inline void gen_vfp_##name(int dp, int neon) \
1001 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1003 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1005 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1007 tcg_temp_free_ptr(statusptr); \
1014 #define VFP_GEN_FTOI(name) \
1015 static inline void gen_vfp_##name(int dp, int neon) \
1017 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1019 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1021 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1023 tcg_temp_free_ptr(statusptr); \
1032 #define VFP_GEN_FIX(name) \
1033 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1035 TCGv tmp_shift = tcg_const_i32(shift); \
1036 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1038 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1040 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1042 tcg_temp_free_i32(tmp_shift); \
1043 tcg_temp_free_ptr(statusptr); \
1055 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1058 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1060 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1063 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1066 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1068 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1072 vfp_reg_offset (int dp
, int reg
)
1075 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1077 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1078 + offsetof(CPU_DoubleU
, l
.upper
);
1080 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1081 + offsetof(CPU_DoubleU
, l
.lower
);
1085 /* Return the offset of a 32-bit piece of a NEON register.
1086 zero is the least significant end of the register. */
1088 neon_reg_offset (int reg
, int n
)
1092 return vfp_reg_offset(0, sreg
);
1095 static TCGv
neon_load_reg(int reg
, int pass
)
1097 TCGv tmp
= tcg_temp_new_i32();
1098 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1102 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1104 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1105 tcg_temp_free_i32(var
);
1108 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1110 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1113 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1115 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1118 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1119 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1120 #define tcg_gen_st_f32 tcg_gen_st_i32
1121 #define tcg_gen_st_f64 tcg_gen_st_i64
1123 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1126 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1128 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1131 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1134 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1136 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1139 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1142 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1144 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1147 #define ARM_CP_RW_BIT (1 << 20)
1149 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1151 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1154 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1156 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1159 static inline TCGv
iwmmxt_load_creg(int reg
)
1161 TCGv var
= tcg_temp_new_i32();
1162 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1166 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1168 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1169 tcg_temp_free_i32(var
);
1172 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1174 iwmmxt_store_reg(cpu_M0
, rn
);
1177 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1179 iwmmxt_load_reg(cpu_M0
, rn
);
1182 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1184 iwmmxt_load_reg(cpu_V1
, rn
);
1185 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1188 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1190 iwmmxt_load_reg(cpu_V1
, rn
);
1191 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1194 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1196 iwmmxt_load_reg(cpu_V1
, rn
);
1197 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1200 #define IWMMXT_OP(name) \
1201 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1203 iwmmxt_load_reg(cpu_V1, rn); \
1204 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1207 #define IWMMXT_OP_ENV(name) \
1208 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1210 iwmmxt_load_reg(cpu_V1, rn); \
1211 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1214 #define IWMMXT_OP_ENV_SIZE(name) \
1215 IWMMXT_OP_ENV(name##b) \
1216 IWMMXT_OP_ENV(name##w) \
1217 IWMMXT_OP_ENV(name##l)
1219 #define IWMMXT_OP_ENV1(name) \
1220 static inline void gen_op_iwmmxt_##name##_M0(void) \
1222 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1236 IWMMXT_OP_ENV_SIZE(unpackl
)
1237 IWMMXT_OP_ENV_SIZE(unpackh
)
1239 IWMMXT_OP_ENV1(unpacklub
)
1240 IWMMXT_OP_ENV1(unpackluw
)
1241 IWMMXT_OP_ENV1(unpacklul
)
1242 IWMMXT_OP_ENV1(unpackhub
)
1243 IWMMXT_OP_ENV1(unpackhuw
)
1244 IWMMXT_OP_ENV1(unpackhul
)
1245 IWMMXT_OP_ENV1(unpacklsb
)
1246 IWMMXT_OP_ENV1(unpacklsw
)
1247 IWMMXT_OP_ENV1(unpacklsl
)
1248 IWMMXT_OP_ENV1(unpackhsb
)
1249 IWMMXT_OP_ENV1(unpackhsw
)
1250 IWMMXT_OP_ENV1(unpackhsl
)
1252 IWMMXT_OP_ENV_SIZE(cmpeq
)
1253 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1254 IWMMXT_OP_ENV_SIZE(cmpgts
)
1256 IWMMXT_OP_ENV_SIZE(mins
)
1257 IWMMXT_OP_ENV_SIZE(minu
)
1258 IWMMXT_OP_ENV_SIZE(maxs
)
1259 IWMMXT_OP_ENV_SIZE(maxu
)
1261 IWMMXT_OP_ENV_SIZE(subn
)
1262 IWMMXT_OP_ENV_SIZE(addn
)
1263 IWMMXT_OP_ENV_SIZE(subu
)
1264 IWMMXT_OP_ENV_SIZE(addu
)
1265 IWMMXT_OP_ENV_SIZE(subs
)
1266 IWMMXT_OP_ENV_SIZE(adds
)
1268 IWMMXT_OP_ENV(avgb0
)
1269 IWMMXT_OP_ENV(avgb1
)
1270 IWMMXT_OP_ENV(avgw0
)
1271 IWMMXT_OP_ENV(avgw1
)
1275 IWMMXT_OP_ENV(packuw
)
1276 IWMMXT_OP_ENV(packul
)
1277 IWMMXT_OP_ENV(packuq
)
1278 IWMMXT_OP_ENV(packsw
)
1279 IWMMXT_OP_ENV(packsl
)
1280 IWMMXT_OP_ENV(packsq
)
1282 static void gen_op_iwmmxt_set_mup(void)
1285 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1286 tcg_gen_ori_i32(tmp
, tmp
, 2);
1287 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1290 static void gen_op_iwmmxt_set_cup(void)
1293 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1294 tcg_gen_ori_i32(tmp
, tmp
, 1);
1295 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1298 static void gen_op_iwmmxt_setpsr_nz(void)
1300 TCGv tmp
= tcg_temp_new_i32();
1301 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1302 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1305 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1307 iwmmxt_load_reg(cpu_V1
, rn
);
1308 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1309 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1312 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1318 rd
= (insn
>> 16) & 0xf;
1319 tmp
= load_reg(s
, rd
);
1321 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1322 if (insn
& (1 << 24)) {
1324 if (insn
& (1 << 23))
1325 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1327 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1328 tcg_gen_mov_i32(dest
, tmp
);
1329 if (insn
& (1 << 21))
1330 store_reg(s
, rd
, tmp
);
1332 tcg_temp_free_i32(tmp
);
1333 } else if (insn
& (1 << 21)) {
1335 tcg_gen_mov_i32(dest
, tmp
);
1336 if (insn
& (1 << 23))
1337 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1339 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1340 store_reg(s
, rd
, tmp
);
1341 } else if (!(insn
& (1 << 23)))
1346 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1348 int rd
= (insn
>> 0) & 0xf;
1351 if (insn
& (1 << 8)) {
1352 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1355 tmp
= iwmmxt_load_creg(rd
);
1358 tmp
= tcg_temp_new_i32();
1359 iwmmxt_load_reg(cpu_V0
, rd
);
1360 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1362 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1363 tcg_gen_mov_i32(dest
, tmp
);
1364 tcg_temp_free_i32(tmp
);
1368 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1369 (ie. an undefined instruction). */
1370 static int disas_iwmmxt_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
1373 int rdhi
, rdlo
, rd0
, rd1
, i
;
1375 TCGv tmp
, tmp2
, tmp3
;
1377 if ((insn
& 0x0e000e00) == 0x0c000000) {
1378 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1380 rdlo
= (insn
>> 12) & 0xf;
1381 rdhi
= (insn
>> 16) & 0xf;
1382 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1383 iwmmxt_load_reg(cpu_V0
, wrd
);
1384 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1385 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1386 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1387 } else { /* TMCRR */
1388 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1389 iwmmxt_store_reg(cpu_V0
, wrd
);
1390 gen_op_iwmmxt_set_mup();
1395 wrd
= (insn
>> 12) & 0xf;
1396 addr
= tcg_temp_new_i32();
1397 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1398 tcg_temp_free_i32(addr
);
1401 if (insn
& ARM_CP_RW_BIT
) {
1402 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1403 tmp
= tcg_temp_new_i32();
1404 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1405 iwmmxt_store_creg(wrd
, tmp
);
1408 if (insn
& (1 << 8)) {
1409 if (insn
& (1 << 22)) { /* WLDRD */
1410 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1412 } else { /* WLDRW wRd */
1413 tmp
= gen_ld32(addr
, IS_USER(s
));
1416 if (insn
& (1 << 22)) { /* WLDRH */
1417 tmp
= gen_ld16u(addr
, IS_USER(s
));
1418 } else { /* WLDRB */
1419 tmp
= gen_ld8u(addr
, IS_USER(s
));
1423 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1424 tcg_temp_free_i32(tmp
);
1426 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1429 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1430 tmp
= iwmmxt_load_creg(wrd
);
1431 gen_st32(tmp
, addr
, IS_USER(s
));
1433 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1434 tmp
= tcg_temp_new_i32();
1435 if (insn
& (1 << 8)) {
1436 if (insn
& (1 << 22)) { /* WSTRD */
1437 tcg_temp_free_i32(tmp
);
1438 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1439 } else { /* WSTRW wRd */
1440 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1441 gen_st32(tmp
, addr
, IS_USER(s
));
1444 if (insn
& (1 << 22)) { /* WSTRH */
1445 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1446 gen_st16(tmp
, addr
, IS_USER(s
));
1447 } else { /* WSTRB */
1448 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1449 gen_st8(tmp
, addr
, IS_USER(s
));
1454 tcg_temp_free_i32(addr
);
1458 if ((insn
& 0x0f000000) != 0x0e000000)
1461 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1462 case 0x000: /* WOR */
1463 wrd
= (insn
>> 12) & 0xf;
1464 rd0
= (insn
>> 0) & 0xf;
1465 rd1
= (insn
>> 16) & 0xf;
1466 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1467 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1468 gen_op_iwmmxt_setpsr_nz();
1469 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1470 gen_op_iwmmxt_set_mup();
1471 gen_op_iwmmxt_set_cup();
1473 case 0x011: /* TMCR */
1476 rd
= (insn
>> 12) & 0xf;
1477 wrd
= (insn
>> 16) & 0xf;
1479 case ARM_IWMMXT_wCID
:
1480 case ARM_IWMMXT_wCASF
:
1482 case ARM_IWMMXT_wCon
:
1483 gen_op_iwmmxt_set_cup();
1485 case ARM_IWMMXT_wCSSF
:
1486 tmp
= iwmmxt_load_creg(wrd
);
1487 tmp2
= load_reg(s
, rd
);
1488 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1489 tcg_temp_free_i32(tmp2
);
1490 iwmmxt_store_creg(wrd
, tmp
);
1492 case ARM_IWMMXT_wCGR0
:
1493 case ARM_IWMMXT_wCGR1
:
1494 case ARM_IWMMXT_wCGR2
:
1495 case ARM_IWMMXT_wCGR3
:
1496 gen_op_iwmmxt_set_cup();
1497 tmp
= load_reg(s
, rd
);
1498 iwmmxt_store_creg(wrd
, tmp
);
1504 case 0x100: /* WXOR */
1505 wrd
= (insn
>> 12) & 0xf;
1506 rd0
= (insn
>> 0) & 0xf;
1507 rd1
= (insn
>> 16) & 0xf;
1508 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1509 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1510 gen_op_iwmmxt_setpsr_nz();
1511 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1512 gen_op_iwmmxt_set_mup();
1513 gen_op_iwmmxt_set_cup();
1515 case 0x111: /* TMRC */
1518 rd
= (insn
>> 12) & 0xf;
1519 wrd
= (insn
>> 16) & 0xf;
1520 tmp
= iwmmxt_load_creg(wrd
);
1521 store_reg(s
, rd
, tmp
);
1523 case 0x300: /* WANDN */
1524 wrd
= (insn
>> 12) & 0xf;
1525 rd0
= (insn
>> 0) & 0xf;
1526 rd1
= (insn
>> 16) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1528 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1529 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1535 case 0x200: /* WAND */
1536 wrd
= (insn
>> 12) & 0xf;
1537 rd0
= (insn
>> 0) & 0xf;
1538 rd1
= (insn
>> 16) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1540 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1541 gen_op_iwmmxt_setpsr_nz();
1542 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1546 case 0x810: case 0xa10: /* WMADD */
1547 wrd
= (insn
>> 12) & 0xf;
1548 rd0
= (insn
>> 0) & 0xf;
1549 rd1
= (insn
>> 16) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1551 if (insn
& (1 << 21))
1552 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1554 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1555 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1556 gen_op_iwmmxt_set_mup();
1558 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1559 wrd
= (insn
>> 12) & 0xf;
1560 rd0
= (insn
>> 16) & 0xf;
1561 rd1
= (insn
>> 0) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1563 switch ((insn
>> 22) & 3) {
1565 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1568 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1571 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1576 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1577 gen_op_iwmmxt_set_mup();
1578 gen_op_iwmmxt_set_cup();
1580 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1581 wrd
= (insn
>> 12) & 0xf;
1582 rd0
= (insn
>> 16) & 0xf;
1583 rd1
= (insn
>> 0) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1585 switch ((insn
>> 22) & 3) {
1587 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1590 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1593 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1598 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1599 gen_op_iwmmxt_set_mup();
1600 gen_op_iwmmxt_set_cup();
1602 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1603 wrd
= (insn
>> 12) & 0xf;
1604 rd0
= (insn
>> 16) & 0xf;
1605 rd1
= (insn
>> 0) & 0xf;
1606 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1607 if (insn
& (1 << 22))
1608 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1610 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1611 if (!(insn
& (1 << 20)))
1612 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1613 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1614 gen_op_iwmmxt_set_mup();
1616 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1617 wrd
= (insn
>> 12) & 0xf;
1618 rd0
= (insn
>> 16) & 0xf;
1619 rd1
= (insn
>> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1621 if (insn
& (1 << 21)) {
1622 if (insn
& (1 << 20))
1623 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1625 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1627 if (insn
& (1 << 20))
1628 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1630 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1632 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1633 gen_op_iwmmxt_set_mup();
1635 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1636 wrd
= (insn
>> 12) & 0xf;
1637 rd0
= (insn
>> 16) & 0xf;
1638 rd1
= (insn
>> 0) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1640 if (insn
& (1 << 21))
1641 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1643 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1644 if (!(insn
& (1 << 20))) {
1645 iwmmxt_load_reg(cpu_V1
, wrd
);
1646 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1648 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1649 gen_op_iwmmxt_set_mup();
1651 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1652 wrd
= (insn
>> 12) & 0xf;
1653 rd0
= (insn
>> 16) & 0xf;
1654 rd1
= (insn
>> 0) & 0xf;
1655 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1656 switch ((insn
>> 22) & 3) {
1658 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1661 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1664 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1669 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1670 gen_op_iwmmxt_set_mup();
1671 gen_op_iwmmxt_set_cup();
1673 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1674 wrd
= (insn
>> 12) & 0xf;
1675 rd0
= (insn
>> 16) & 0xf;
1676 rd1
= (insn
>> 0) & 0xf;
1677 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1678 if (insn
& (1 << 22)) {
1679 if (insn
& (1 << 20))
1680 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1682 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1684 if (insn
& (1 << 20))
1685 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1687 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1693 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1694 wrd
= (insn
>> 12) & 0xf;
1695 rd0
= (insn
>> 16) & 0xf;
1696 rd1
= (insn
>> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1698 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1699 tcg_gen_andi_i32(tmp
, tmp
, 7);
1700 iwmmxt_load_reg(cpu_V1
, rd1
);
1701 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1702 tcg_temp_free_i32(tmp
);
1703 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1704 gen_op_iwmmxt_set_mup();
1706 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1707 if (((insn
>> 6) & 3) == 3)
1709 rd
= (insn
>> 12) & 0xf;
1710 wrd
= (insn
>> 16) & 0xf;
1711 tmp
= load_reg(s
, rd
);
1712 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1713 switch ((insn
>> 6) & 3) {
1715 tmp2
= tcg_const_i32(0xff);
1716 tmp3
= tcg_const_i32((insn
& 7) << 3);
1719 tmp2
= tcg_const_i32(0xffff);
1720 tmp3
= tcg_const_i32((insn
& 3) << 4);
1723 tmp2
= tcg_const_i32(0xffffffff);
1724 tmp3
= tcg_const_i32((insn
& 1) << 5);
1730 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1731 tcg_temp_free(tmp3
);
1732 tcg_temp_free(tmp2
);
1733 tcg_temp_free_i32(tmp
);
1734 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1735 gen_op_iwmmxt_set_mup();
1737 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1738 rd
= (insn
>> 12) & 0xf;
1739 wrd
= (insn
>> 16) & 0xf;
1740 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1742 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1743 tmp
= tcg_temp_new_i32();
1744 switch ((insn
>> 22) & 3) {
1746 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1747 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1749 tcg_gen_ext8s_i32(tmp
, tmp
);
1751 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1755 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1756 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1758 tcg_gen_ext16s_i32(tmp
, tmp
);
1760 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1764 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1765 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1768 store_reg(s
, rd
, tmp
);
1770 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1771 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1773 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1774 switch ((insn
>> 22) & 3) {
1776 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1779 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1782 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1785 tcg_gen_shli_i32(tmp
, tmp
, 28);
1787 tcg_temp_free_i32(tmp
);
1789 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1790 if (((insn
>> 6) & 3) == 3)
1792 rd
= (insn
>> 12) & 0xf;
1793 wrd
= (insn
>> 16) & 0xf;
1794 tmp
= load_reg(s
, rd
);
1795 switch ((insn
>> 6) & 3) {
1797 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1800 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1803 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1806 tcg_temp_free_i32(tmp
);
1807 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1808 gen_op_iwmmxt_set_mup();
1810 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1811 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1813 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1814 tmp2
= tcg_temp_new_i32();
1815 tcg_gen_mov_i32(tmp2
, tmp
);
1816 switch ((insn
>> 22) & 3) {
1818 for (i
= 0; i
< 7; i
++) {
1819 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1820 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1824 for (i
= 0; i
< 3; i
++) {
1825 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1826 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1830 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1831 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1835 tcg_temp_free_i32(tmp2
);
1836 tcg_temp_free_i32(tmp
);
1838 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1839 wrd
= (insn
>> 12) & 0xf;
1840 rd0
= (insn
>> 16) & 0xf;
1841 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1842 switch ((insn
>> 22) & 3) {
1844 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1847 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1850 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1855 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1856 gen_op_iwmmxt_set_mup();
1858 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1859 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1861 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1862 tmp2
= tcg_temp_new_i32();
1863 tcg_gen_mov_i32(tmp2
, tmp
);
1864 switch ((insn
>> 22) & 3) {
1866 for (i
= 0; i
< 7; i
++) {
1867 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1868 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1872 for (i
= 0; i
< 3; i
++) {
1873 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1874 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1878 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1879 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1883 tcg_temp_free_i32(tmp2
);
1884 tcg_temp_free_i32(tmp
);
1886 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1887 rd
= (insn
>> 12) & 0xf;
1888 rd0
= (insn
>> 16) & 0xf;
1889 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1891 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1892 tmp
= tcg_temp_new_i32();
1893 switch ((insn
>> 22) & 3) {
1895 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1898 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1901 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1904 store_reg(s
, rd
, tmp
);
1906 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1907 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1908 wrd
= (insn
>> 12) & 0xf;
1909 rd0
= (insn
>> 16) & 0xf;
1910 rd1
= (insn
>> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1912 switch ((insn
>> 22) & 3) {
1914 if (insn
& (1 << 21))
1915 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1917 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1920 if (insn
& (1 << 21))
1921 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1923 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1926 if (insn
& (1 << 21))
1927 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1929 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1934 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1935 gen_op_iwmmxt_set_mup();
1936 gen_op_iwmmxt_set_cup();
1938 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1939 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1940 wrd
= (insn
>> 12) & 0xf;
1941 rd0
= (insn
>> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1943 switch ((insn
>> 22) & 3) {
1945 if (insn
& (1 << 21))
1946 gen_op_iwmmxt_unpacklsb_M0();
1948 gen_op_iwmmxt_unpacklub_M0();
1951 if (insn
& (1 << 21))
1952 gen_op_iwmmxt_unpacklsw_M0();
1954 gen_op_iwmmxt_unpackluw_M0();
1957 if (insn
& (1 << 21))
1958 gen_op_iwmmxt_unpacklsl_M0();
1960 gen_op_iwmmxt_unpacklul_M0();
1965 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1966 gen_op_iwmmxt_set_mup();
1967 gen_op_iwmmxt_set_cup();
1969 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1970 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1971 wrd
= (insn
>> 12) & 0xf;
1972 rd0
= (insn
>> 16) & 0xf;
1973 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1974 switch ((insn
>> 22) & 3) {
1976 if (insn
& (1 << 21))
1977 gen_op_iwmmxt_unpackhsb_M0();
1979 gen_op_iwmmxt_unpackhub_M0();
1982 if (insn
& (1 << 21))
1983 gen_op_iwmmxt_unpackhsw_M0();
1985 gen_op_iwmmxt_unpackhuw_M0();
1988 if (insn
& (1 << 21))
1989 gen_op_iwmmxt_unpackhsl_M0();
1991 gen_op_iwmmxt_unpackhul_M0();
1996 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
2000 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2001 case 0x214: case 0x614: case 0xa14: case 0xe14:
2002 if (((insn
>> 22) & 3) == 0)
2004 wrd
= (insn
>> 12) & 0xf;
2005 rd0
= (insn
>> 16) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2007 tmp
= tcg_temp_new_i32();
2008 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2009 tcg_temp_free_i32(tmp
);
2012 switch ((insn
>> 22) & 3) {
2014 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2017 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2020 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2023 tcg_temp_free_i32(tmp
);
2024 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2025 gen_op_iwmmxt_set_mup();
2026 gen_op_iwmmxt_set_cup();
2028 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2029 case 0x014: case 0x414: case 0x814: case 0xc14:
2030 if (((insn
>> 22) & 3) == 0)
2032 wrd
= (insn
>> 12) & 0xf;
2033 rd0
= (insn
>> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2035 tmp
= tcg_temp_new_i32();
2036 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2037 tcg_temp_free_i32(tmp
);
2040 switch ((insn
>> 22) & 3) {
2042 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2045 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2048 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2051 tcg_temp_free_i32(tmp
);
2052 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2053 gen_op_iwmmxt_set_mup();
2054 gen_op_iwmmxt_set_cup();
2056 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2057 case 0x114: case 0x514: case 0x914: case 0xd14:
2058 if (((insn
>> 22) & 3) == 0)
2060 wrd
= (insn
>> 12) & 0xf;
2061 rd0
= (insn
>> 16) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2063 tmp
= tcg_temp_new_i32();
2064 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2065 tcg_temp_free_i32(tmp
);
2068 switch ((insn
>> 22) & 3) {
2070 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2073 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2076 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2079 tcg_temp_free_i32(tmp
);
2080 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2084 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2085 case 0x314: case 0x714: case 0xb14: case 0xf14:
2086 if (((insn
>> 22) & 3) == 0)
2088 wrd
= (insn
>> 12) & 0xf;
2089 rd0
= (insn
>> 16) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2091 tmp
= tcg_temp_new_i32();
2092 switch ((insn
>> 22) & 3) {
2094 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2095 tcg_temp_free_i32(tmp
);
2098 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2101 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2102 tcg_temp_free_i32(tmp
);
2105 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2108 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2109 tcg_temp_free_i32(tmp
);
2112 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2115 tcg_temp_free_i32(tmp
);
2116 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2120 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2121 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2122 wrd
= (insn
>> 12) & 0xf;
2123 rd0
= (insn
>> 16) & 0xf;
2124 rd1
= (insn
>> 0) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2126 switch ((insn
>> 22) & 3) {
2128 if (insn
& (1 << 21))
2129 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2131 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2134 if (insn
& (1 << 21))
2135 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2137 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2140 if (insn
& (1 << 21))
2141 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2143 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2148 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2149 gen_op_iwmmxt_set_mup();
2151 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2152 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2153 wrd
= (insn
>> 12) & 0xf;
2154 rd0
= (insn
>> 16) & 0xf;
2155 rd1
= (insn
>> 0) & 0xf;
2156 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2157 switch ((insn
>> 22) & 3) {
2159 if (insn
& (1 << 21))
2160 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2162 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2165 if (insn
& (1 << 21))
2166 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2168 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2171 if (insn
& (1 << 21))
2172 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2174 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2179 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2180 gen_op_iwmmxt_set_mup();
2182 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2183 case 0x402: case 0x502: case 0x602: case 0x702:
2184 wrd
= (insn
>> 12) & 0xf;
2185 rd0
= (insn
>> 16) & 0xf;
2186 rd1
= (insn
>> 0) & 0xf;
2187 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2188 tmp
= tcg_const_i32((insn
>> 20) & 3);
2189 iwmmxt_load_reg(cpu_V1
, rd1
);
2190 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2192 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2193 gen_op_iwmmxt_set_mup();
2195 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2196 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2197 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2198 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2199 wrd
= (insn
>> 12) & 0xf;
2200 rd0
= (insn
>> 16) & 0xf;
2201 rd1
= (insn
>> 0) & 0xf;
2202 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2203 switch ((insn
>> 20) & 0xf) {
2205 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2208 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2211 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2214 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2217 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2220 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2223 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2226 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2229 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2234 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2235 gen_op_iwmmxt_set_mup();
2236 gen_op_iwmmxt_set_cup();
2238 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2239 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2240 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2241 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2242 wrd
= (insn
>> 12) & 0xf;
2243 rd0
= (insn
>> 16) & 0xf;
2244 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2245 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2246 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2248 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2249 gen_op_iwmmxt_set_mup();
2250 gen_op_iwmmxt_set_cup();
2252 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2253 case 0x418: case 0x518: case 0x618: case 0x718:
2254 case 0x818: case 0x918: case 0xa18: case 0xb18:
2255 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2256 wrd
= (insn
>> 12) & 0xf;
2257 rd0
= (insn
>> 16) & 0xf;
2258 rd1
= (insn
>> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2260 switch ((insn
>> 20) & 0xf) {
2262 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2265 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2268 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2271 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2274 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2277 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2280 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2283 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2286 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2295 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2296 case 0x408: case 0x508: case 0x608: case 0x708:
2297 case 0x808: case 0x908: case 0xa08: case 0xb08:
2298 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2299 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2301 wrd
= (insn
>> 12) & 0xf;
2302 rd0
= (insn
>> 16) & 0xf;
2303 rd1
= (insn
>> 0) & 0xf;
2304 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2305 switch ((insn
>> 22) & 3) {
2307 if (insn
& (1 << 21))
2308 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2310 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2313 if (insn
& (1 << 21))
2314 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2316 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2319 if (insn
& (1 << 21))
2320 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2322 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2325 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2329 case 0x201: case 0x203: case 0x205: case 0x207:
2330 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2331 case 0x211: case 0x213: case 0x215: case 0x217:
2332 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2333 wrd
= (insn
>> 5) & 0xf;
2334 rd0
= (insn
>> 12) & 0xf;
2335 rd1
= (insn
>> 0) & 0xf;
2336 if (rd0
== 0xf || rd1
== 0xf)
2338 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2339 tmp
= load_reg(s
, rd0
);
2340 tmp2
= load_reg(s
, rd1
);
2341 switch ((insn
>> 16) & 0xf) {
2342 case 0x0: /* TMIA */
2343 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2345 case 0x8: /* TMIAPH */
2346 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2348 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2349 if (insn
& (1 << 16))
2350 tcg_gen_shri_i32(tmp
, tmp
, 16);
2351 if (insn
& (1 << 17))
2352 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2353 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2356 tcg_temp_free_i32(tmp2
);
2357 tcg_temp_free_i32(tmp
);
2360 tcg_temp_free_i32(tmp2
);
2361 tcg_temp_free_i32(tmp
);
2362 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2363 gen_op_iwmmxt_set_mup();
2372 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2373 (ie. an undefined instruction). */
2374 static int disas_dsp_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2376 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2379 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2380 /* Multiply with Internal Accumulate Format */
2381 rd0
= (insn
>> 12) & 0xf;
2383 acc
= (insn
>> 5) & 7;
2388 tmp
= load_reg(s
, rd0
);
2389 tmp2
= load_reg(s
, rd1
);
2390 switch ((insn
>> 16) & 0xf) {
2392 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2394 case 0x8: /* MIAPH */
2395 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2397 case 0xc: /* MIABB */
2398 case 0xd: /* MIABT */
2399 case 0xe: /* MIATB */
2400 case 0xf: /* MIATT */
2401 if (insn
& (1 << 16))
2402 tcg_gen_shri_i32(tmp
, tmp
, 16);
2403 if (insn
& (1 << 17))
2404 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2405 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2410 tcg_temp_free_i32(tmp2
);
2411 tcg_temp_free_i32(tmp
);
2413 gen_op_iwmmxt_movq_wRn_M0(acc
);
2417 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2418 /* Internal Accumulator Access Format */
2419 rdhi
= (insn
>> 16) & 0xf;
2420 rdlo
= (insn
>> 12) & 0xf;
2426 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2427 iwmmxt_load_reg(cpu_V0
, acc
);
2428 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2429 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2430 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2431 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2433 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2434 iwmmxt_store_reg(cpu_V0
, acc
);
2442 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2443 #define VFP_SREG(insn, bigbit, smallbit) \
2444 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2445 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2446 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2447 reg = (((insn) >> (bigbit)) & 0x0f) \
2448 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2450 if (insn & (1 << (smallbit))) \
2452 reg = ((insn) >> (bigbit)) & 0x0f; \
2455 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2456 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2457 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2458 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2459 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2460 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2462 /* Move between integer and VFP cores. */
2463 static TCGv
gen_vfp_mrs(void)
2465 TCGv tmp
= tcg_temp_new_i32();
2466 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2470 static void gen_vfp_msr(TCGv tmp
)
2472 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2473 tcg_temp_free_i32(tmp
);
2476 static void gen_neon_dup_u8(TCGv var
, int shift
)
2478 TCGv tmp
= tcg_temp_new_i32();
2480 tcg_gen_shri_i32(var
, var
, shift
);
2481 tcg_gen_ext8u_i32(var
, var
);
2482 tcg_gen_shli_i32(tmp
, var
, 8);
2483 tcg_gen_or_i32(var
, var
, tmp
);
2484 tcg_gen_shli_i32(tmp
, var
, 16);
2485 tcg_gen_or_i32(var
, var
, tmp
);
2486 tcg_temp_free_i32(tmp
);
2489 static void gen_neon_dup_low16(TCGv var
)
2491 TCGv tmp
= tcg_temp_new_i32();
2492 tcg_gen_ext16u_i32(var
, var
);
2493 tcg_gen_shli_i32(tmp
, var
, 16);
2494 tcg_gen_or_i32(var
, var
, tmp
);
2495 tcg_temp_free_i32(tmp
);
2498 static void gen_neon_dup_high16(TCGv var
)
2500 TCGv tmp
= tcg_temp_new_i32();
2501 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2502 tcg_gen_shri_i32(tmp
, var
, 16);
2503 tcg_gen_or_i32(var
, var
, tmp
);
2504 tcg_temp_free_i32(tmp
);
2507 static TCGv
gen_load_and_replicate(DisasContext
*s
, TCGv addr
, int size
)
2509 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2513 tmp
= gen_ld8u(addr
, IS_USER(s
));
2514 gen_neon_dup_u8(tmp
, 0);
2517 tmp
= gen_ld16u(addr
, IS_USER(s
));
2518 gen_neon_dup_low16(tmp
);
2521 tmp
= gen_ld32(addr
, IS_USER(s
));
2523 default: /* Avoid compiler warnings. */
2529 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2530 (ie. an undefined instruction). */
2531 static int disas_vfp_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
2533 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2539 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2542 if (!s
->vfp_enabled
) {
2543 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2544 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2546 rn
= (insn
>> 16) & 0xf;
2547 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2548 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2551 dp
= ((insn
& 0xf00) == 0xb00);
2552 switch ((insn
>> 24) & 0xf) {
2554 if (insn
& (1 << 4)) {
2555 /* single register transfer */
2556 rd
= (insn
>> 12) & 0xf;
2561 VFP_DREG_N(rn
, insn
);
2564 if (insn
& 0x00c00060
2565 && !arm_feature(env
, ARM_FEATURE_NEON
))
2568 pass
= (insn
>> 21) & 1;
2569 if (insn
& (1 << 22)) {
2571 offset
= ((insn
>> 5) & 3) * 8;
2572 } else if (insn
& (1 << 5)) {
2574 offset
= (insn
& (1 << 6)) ? 16 : 0;
2579 if (insn
& ARM_CP_RW_BIT
) {
2581 tmp
= neon_load_reg(rn
, pass
);
2585 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2586 if (insn
& (1 << 23))
2592 if (insn
& (1 << 23)) {
2594 tcg_gen_shri_i32(tmp
, tmp
, 16);
2600 tcg_gen_sari_i32(tmp
, tmp
, 16);
2609 store_reg(s
, rd
, tmp
);
2612 tmp
= load_reg(s
, rd
);
2613 if (insn
& (1 << 23)) {
2616 gen_neon_dup_u8(tmp
, 0);
2617 } else if (size
== 1) {
2618 gen_neon_dup_low16(tmp
);
2620 for (n
= 0; n
<= pass
* 2; n
++) {
2621 tmp2
= tcg_temp_new_i32();
2622 tcg_gen_mov_i32(tmp2
, tmp
);
2623 neon_store_reg(rn
, n
, tmp2
);
2625 neon_store_reg(rn
, n
, tmp
);
2630 tmp2
= neon_load_reg(rn
, pass
);
2631 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2632 tcg_temp_free_i32(tmp2
);
2635 tmp2
= neon_load_reg(rn
, pass
);
2636 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2637 tcg_temp_free_i32(tmp2
);
2642 neon_store_reg(rn
, pass
, tmp
);
2646 if ((insn
& 0x6f) != 0x00)
2648 rn
= VFP_SREG_N(insn
);
2649 if (insn
& ARM_CP_RW_BIT
) {
2651 if (insn
& (1 << 21)) {
2652 /* system register */
2657 /* VFP2 allows access to FSID from userspace.
2658 VFP3 restricts all id registers to privileged
2661 && arm_feature(env
, ARM_FEATURE_VFP3
))
2663 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2668 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2670 case ARM_VFP_FPINST
:
2671 case ARM_VFP_FPINST2
:
2672 /* Not present in VFP3. */
2674 || arm_feature(env
, ARM_FEATURE_VFP3
))
2676 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2680 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2681 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2683 tmp
= tcg_temp_new_i32();
2684 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2690 || !arm_feature(env
, ARM_FEATURE_MVFR
))
2692 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2698 gen_mov_F0_vreg(0, rn
);
2699 tmp
= gen_vfp_mrs();
2702 /* Set the 4 flag bits in the CPSR. */
2704 tcg_temp_free_i32(tmp
);
2706 store_reg(s
, rd
, tmp
);
2710 tmp
= load_reg(s
, rd
);
2711 if (insn
& (1 << 21)) {
2713 /* system register */
2718 /* Writes are ignored. */
2721 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2722 tcg_temp_free_i32(tmp
);
2728 /* TODO: VFP subarchitecture support.
2729 * For now, keep the EN bit only */
2730 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2731 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2734 case ARM_VFP_FPINST
:
2735 case ARM_VFP_FPINST2
:
2736 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2743 gen_mov_vreg_F0(0, rn
);
2748 /* data processing */
2749 /* The opcode is in bits 23, 21, 20 and 6. */
2750 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2754 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2756 /* rn is register number */
2757 VFP_DREG_N(rn
, insn
);
2760 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2761 /* Integer or single precision destination. */
2762 rd
= VFP_SREG_D(insn
);
2764 VFP_DREG_D(rd
, insn
);
2767 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2768 /* VCVT from int is always from S reg regardless of dp bit.
2769 * VCVT with immediate frac_bits has same format as SREG_M
2771 rm
= VFP_SREG_M(insn
);
2773 VFP_DREG_M(rm
, insn
);
2776 rn
= VFP_SREG_N(insn
);
2777 if (op
== 15 && rn
== 15) {
2778 /* Double precision destination. */
2779 VFP_DREG_D(rd
, insn
);
2781 rd
= VFP_SREG_D(insn
);
2783 /* NB that we implicitly rely on the encoding for the frac_bits
2784 * in VCVT of fixed to float being the same as that of an SREG_M
2786 rm
= VFP_SREG_M(insn
);
2789 veclen
= s
->vec_len
;
2790 if (op
== 15 && rn
> 3)
2793 /* Shut up compiler warnings. */
2804 /* Figure out what type of vector operation this is. */
2805 if ((rd
& bank_mask
) == 0) {
2810 delta_d
= (s
->vec_stride
>> 1) + 1;
2812 delta_d
= s
->vec_stride
+ 1;
2814 if ((rm
& bank_mask
) == 0) {
2815 /* mixed scalar/vector */
2824 /* Load the initial operands. */
2829 /* Integer source */
2830 gen_mov_F0_vreg(0, rm
);
2835 gen_mov_F0_vreg(dp
, rd
);
2836 gen_mov_F1_vreg(dp
, rm
);
2840 /* Compare with zero */
2841 gen_mov_F0_vreg(dp
, rd
);
2852 /* Source and destination the same. */
2853 gen_mov_F0_vreg(dp
, rd
);
2859 /* VCVTB, VCVTT: only present with the halfprec extension,
2860 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2862 if (dp
|| !arm_feature(env
, ARM_FEATURE_VFP_FP16
)) {
2865 /* Otherwise fall through */
2867 /* One source operand. */
2868 gen_mov_F0_vreg(dp
, rm
);
2872 /* Two source operands. */
2873 gen_mov_F0_vreg(dp
, rn
);
2874 gen_mov_F1_vreg(dp
, rm
);
2878 /* Perform the calculation. */
2880 case 0: /* VMLA: fd + (fn * fm) */
2881 /* Note that order of inputs to the add matters for NaNs */
2883 gen_mov_F0_vreg(dp
, rd
);
2886 case 1: /* VMLS: fd + -(fn * fm) */
2889 gen_mov_F0_vreg(dp
, rd
);
2892 case 2: /* VNMLS: -fd + (fn * fm) */
2893 /* Note that it isn't valid to replace (-A + B) with (B - A)
2894 * or similar plausible looking simplifications
2895 * because this will give wrong results for NaNs.
2898 gen_mov_F0_vreg(dp
, rd
);
2902 case 3: /* VNMLA: -fd + -(fn * fm) */
2905 gen_mov_F0_vreg(dp
, rd
);
2909 case 4: /* mul: fn * fm */
2912 case 5: /* nmul: -(fn * fm) */
2916 case 6: /* add: fn + fm */
2919 case 7: /* sub: fn - fm */
2922 case 8: /* div: fn / fm */
2925 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2926 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2927 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2928 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2929 /* These are fused multiply-add, and must be done as one
2930 * floating point operation with no rounding between the
2931 * multiplication and addition steps.
2932 * NB that doing the negations here as separate steps is
2933 * correct : an input NaN should come out with its sign bit
2934 * flipped if it is a negated-input.
2936 if (!arm_feature(env
, ARM_FEATURE_VFP4
)) {
2944 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
2946 frd
= tcg_temp_new_i64();
2947 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
2950 gen_helper_vfp_negd(frd
, frd
);
2952 fpst
= get_fpstatus_ptr(0);
2953 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
2954 cpu_F1d
, frd
, fpst
);
2955 tcg_temp_free_ptr(fpst
);
2956 tcg_temp_free_i64(frd
);
2962 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
2964 frd
= tcg_temp_new_i32();
2965 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
2967 gen_helper_vfp_negs(frd
, frd
);
2969 fpst
= get_fpstatus_ptr(0);
2970 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
2971 cpu_F1s
, frd
, fpst
);
2972 tcg_temp_free_ptr(fpst
);
2973 tcg_temp_free_i32(frd
);
2976 case 14: /* fconst */
2977 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2980 n
= (insn
<< 12) & 0x80000000;
2981 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
2988 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
2995 tcg_gen_movi_i32(cpu_F0s
, n
);
2998 case 15: /* extension space */
3012 case 4: /* vcvtb.f32.f16 */
3013 tmp
= gen_vfp_mrs();
3014 tcg_gen_ext16u_i32(tmp
, tmp
);
3015 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3016 tcg_temp_free_i32(tmp
);
3018 case 5: /* vcvtt.f32.f16 */
3019 tmp
= gen_vfp_mrs();
3020 tcg_gen_shri_i32(tmp
, tmp
, 16);
3021 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3022 tcg_temp_free_i32(tmp
);
3024 case 6: /* vcvtb.f16.f32 */
3025 tmp
= tcg_temp_new_i32();
3026 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3027 gen_mov_F0_vreg(0, rd
);
3028 tmp2
= gen_vfp_mrs();
3029 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3030 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3031 tcg_temp_free_i32(tmp2
);
3034 case 7: /* vcvtt.f16.f32 */
3035 tmp
= tcg_temp_new_i32();
3036 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3037 tcg_gen_shli_i32(tmp
, tmp
, 16);
3038 gen_mov_F0_vreg(0, rd
);
3039 tmp2
= gen_vfp_mrs();
3040 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3041 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3042 tcg_temp_free_i32(tmp2
);
3054 case 11: /* cmpez */
3058 case 15: /* single<->double conversion */
3060 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3062 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3064 case 16: /* fuito */
3065 gen_vfp_uito(dp
, 0);
3067 case 17: /* fsito */
3068 gen_vfp_sito(dp
, 0);
3070 case 20: /* fshto */
3071 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3073 gen_vfp_shto(dp
, 16 - rm
, 0);
3075 case 21: /* fslto */
3076 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3078 gen_vfp_slto(dp
, 32 - rm
, 0);
3080 case 22: /* fuhto */
3081 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3083 gen_vfp_uhto(dp
, 16 - rm
, 0);
3085 case 23: /* fulto */
3086 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3088 gen_vfp_ulto(dp
, 32 - rm
, 0);
3090 case 24: /* ftoui */
3091 gen_vfp_toui(dp
, 0);
3093 case 25: /* ftouiz */
3094 gen_vfp_touiz(dp
, 0);
3096 case 26: /* ftosi */
3097 gen_vfp_tosi(dp
, 0);
3099 case 27: /* ftosiz */
3100 gen_vfp_tosiz(dp
, 0);
3102 case 28: /* ftosh */
3103 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3105 gen_vfp_tosh(dp
, 16 - rm
, 0);
3107 case 29: /* ftosl */
3108 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3110 gen_vfp_tosl(dp
, 32 - rm
, 0);
3112 case 30: /* ftouh */
3113 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3115 gen_vfp_touh(dp
, 16 - rm
, 0);
3117 case 31: /* ftoul */
3118 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3120 gen_vfp_toul(dp
, 32 - rm
, 0);
3122 default: /* undefined */
3126 default: /* undefined */
3130 /* Write back the result. */
3131 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3132 ; /* Comparison, do nothing. */
3133 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3134 /* VCVT double to int: always integer result. */
3135 gen_mov_vreg_F0(0, rd
);
3136 else if (op
== 15 && rn
== 15)
3138 gen_mov_vreg_F0(!dp
, rd
);
3140 gen_mov_vreg_F0(dp
, rd
);
3142 /* break out of the loop if we have finished */
3146 if (op
== 15 && delta_m
== 0) {
3147 /* single source one-many */
3149 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3151 gen_mov_vreg_F0(dp
, rd
);
3155 /* Setup the next operands. */
3157 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3161 /* One source operand. */
3162 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3164 gen_mov_F0_vreg(dp
, rm
);
3166 /* Two source operands. */
3167 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3169 gen_mov_F0_vreg(dp
, rn
);
3171 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3173 gen_mov_F1_vreg(dp
, rm
);
3181 if ((insn
& 0x03e00000) == 0x00400000) {
3182 /* two-register transfer */
3183 rn
= (insn
>> 16) & 0xf;
3184 rd
= (insn
>> 12) & 0xf;
3186 VFP_DREG_M(rm
, insn
);
3188 rm
= VFP_SREG_M(insn
);
3191 if (insn
& ARM_CP_RW_BIT
) {
3194 gen_mov_F0_vreg(0, rm
* 2);
3195 tmp
= gen_vfp_mrs();
3196 store_reg(s
, rd
, tmp
);
3197 gen_mov_F0_vreg(0, rm
* 2 + 1);
3198 tmp
= gen_vfp_mrs();
3199 store_reg(s
, rn
, tmp
);
3201 gen_mov_F0_vreg(0, rm
);
3202 tmp
= gen_vfp_mrs();
3203 store_reg(s
, rd
, tmp
);
3204 gen_mov_F0_vreg(0, rm
+ 1);
3205 tmp
= gen_vfp_mrs();
3206 store_reg(s
, rn
, tmp
);
3211 tmp
= load_reg(s
, rd
);
3213 gen_mov_vreg_F0(0, rm
* 2);
3214 tmp
= load_reg(s
, rn
);
3216 gen_mov_vreg_F0(0, rm
* 2 + 1);
3218 tmp
= load_reg(s
, rd
);
3220 gen_mov_vreg_F0(0, rm
);
3221 tmp
= load_reg(s
, rn
);
3223 gen_mov_vreg_F0(0, rm
+ 1);
3228 rn
= (insn
>> 16) & 0xf;
3230 VFP_DREG_D(rd
, insn
);
3232 rd
= VFP_SREG_D(insn
);
3233 if ((insn
& 0x01200000) == 0x01000000) {
3234 /* Single load/store */
3235 offset
= (insn
& 0xff) << 2;
3236 if ((insn
& (1 << 23)) == 0)
3238 if (s
->thumb
&& rn
== 15) {
3239 /* This is actually UNPREDICTABLE */
3240 addr
= tcg_temp_new_i32();
3241 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3243 addr
= load_reg(s
, rn
);
3245 tcg_gen_addi_i32(addr
, addr
, offset
);
3246 if (insn
& (1 << 20)) {
3247 gen_vfp_ld(s
, dp
, addr
);
3248 gen_mov_vreg_F0(dp
, rd
);
3250 gen_mov_F0_vreg(dp
, rd
);
3251 gen_vfp_st(s
, dp
, addr
);
3253 tcg_temp_free_i32(addr
);
3255 /* load/store multiple */
3256 int w
= insn
& (1 << 21);
3258 n
= (insn
>> 1) & 0x7f;
3262 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
3263 /* P == U , W == 1 => UNDEF */
3266 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
3267 /* UNPREDICTABLE cases for bad immediates: we choose to
3268 * UNDEF to avoid generating huge numbers of TCG ops
3272 if (rn
== 15 && w
) {
3273 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3277 if (s
->thumb
&& rn
== 15) {
3278 /* This is actually UNPREDICTABLE */
3279 addr
= tcg_temp_new_i32();
3280 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3282 addr
= load_reg(s
, rn
);
3284 if (insn
& (1 << 24)) /* pre-decrement */
3285 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3291 for (i
= 0; i
< n
; i
++) {
3292 if (insn
& ARM_CP_RW_BIT
) {
3294 gen_vfp_ld(s
, dp
, addr
);
3295 gen_mov_vreg_F0(dp
, rd
+ i
);
3298 gen_mov_F0_vreg(dp
, rd
+ i
);
3299 gen_vfp_st(s
, dp
, addr
);
3301 tcg_gen_addi_i32(addr
, addr
, offset
);
3305 if (insn
& (1 << 24))
3306 offset
= -offset
* n
;
3307 else if (dp
&& (insn
& 1))
3313 tcg_gen_addi_i32(addr
, addr
, offset
);
3314 store_reg(s
, rn
, addr
);
3316 tcg_temp_free_i32(addr
);
3322 /* Should never happen. */
3328 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3330 TranslationBlock
*tb
;
3333 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3335 gen_set_pc_im(dest
);
3336 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
3338 gen_set_pc_im(dest
);
3343 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3345 if (unlikely(s
->singlestep_enabled
)) {
3346 /* An indirect jump so that we still trigger the debug exception. */
3351 gen_goto_tb(s
, 0, dest
);
3352 s
->is_jmp
= DISAS_TB_JUMP
;
3356 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3359 tcg_gen_sari_i32(t0
, t0
, 16);
3363 tcg_gen_sari_i32(t1
, t1
, 16);
3366 tcg_gen_mul_i32(t0
, t0
, t1
);
3369 /* Return the mask of PSR bits set by a MSR instruction. */
3370 static uint32_t msr_mask(CPUARMState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3374 if (flags
& (1 << 0))
3376 if (flags
& (1 << 1))
3378 if (flags
& (1 << 2))
3380 if (flags
& (1 << 3))
3383 /* Mask out undefined bits. */
3384 mask
&= ~CPSR_RESERVED
;
3385 if (!arm_feature(env
, ARM_FEATURE_V4T
))
3387 if (!arm_feature(env
, ARM_FEATURE_V5
))
3388 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3389 if (!arm_feature(env
, ARM_FEATURE_V6
))
3390 mask
&= ~(CPSR_E
| CPSR_GE
);
3391 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3393 /* Mask out execution state bits. */
3396 /* Mask out privileged bits. */
3402 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3403 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3407 /* ??? This is also undefined in system mode. */
3411 tmp
= load_cpu_field(spsr
);
3412 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3413 tcg_gen_andi_i32(t0
, t0
, mask
);
3414 tcg_gen_or_i32(tmp
, tmp
, t0
);
3415 store_cpu_field(tmp
, spsr
);
3417 gen_set_cpsr(t0
, mask
);
3419 tcg_temp_free_i32(t0
);
3424 /* Returns nonzero if access to the PSR is not permitted. */
3425 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3428 tmp
= tcg_temp_new_i32();
3429 tcg_gen_movi_i32(tmp
, val
);
3430 return gen_set_psr(s
, mask
, spsr
, tmp
);
3433 /* Generate an old-style exception return. Marks pc as dead. */
3434 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3437 store_reg(s
, 15, pc
);
3438 tmp
= load_cpu_field(spsr
);
3439 gen_set_cpsr(tmp
, 0xffffffff);
3440 tcg_temp_free_i32(tmp
);
3441 s
->is_jmp
= DISAS_UPDATE
;
3444 /* Generate a v6 exception return. Marks both values as dead. */
3445 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3447 gen_set_cpsr(cpsr
, 0xffffffff);
3448 tcg_temp_free_i32(cpsr
);
3449 store_reg(s
, 15, pc
);
3450 s
->is_jmp
= DISAS_UPDATE
;
3454 gen_set_condexec (DisasContext
*s
)
3456 if (s
->condexec_mask
) {
3457 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3458 TCGv tmp
= tcg_temp_new_i32();
3459 tcg_gen_movi_i32(tmp
, val
);
3460 store_cpu_field(tmp
, condexec_bits
);
3464 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3466 gen_set_condexec(s
);
3467 gen_set_pc_im(s
->pc
- offset
);
3468 gen_exception(excp
);
3469 s
->is_jmp
= DISAS_JUMP
;
3472 static void gen_nop_hint(DisasContext
*s
, int val
)
3476 gen_set_pc_im(s
->pc
);
3477 s
->is_jmp
= DISAS_WFI
;
3481 /* TODO: Implement SEV and WFE. May help SMP performance. */
3487 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3489 static inline void gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3492 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3493 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3494 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3499 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3502 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3503 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3504 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3509 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3510 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3511 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3512 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3513 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3515 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3516 switch ((size << 1) | u) { \
3518 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3521 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3524 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3527 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3530 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3533 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3535 default: return 1; \
3538 #define GEN_NEON_INTEGER_OP(name) do { \
3539 switch ((size << 1) | u) { \
3541 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3544 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3547 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3550 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3553 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3556 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3558 default: return 1; \
3561 static TCGv
neon_load_scratch(int scratch
)
3563 TCGv tmp
= tcg_temp_new_i32();
3564 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3568 static void neon_store_scratch(int scratch
, TCGv var
)
3570 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3571 tcg_temp_free_i32(var
);
3574 static inline TCGv
neon_get_scalar(int size
, int reg
)
3578 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3580 gen_neon_dup_high16(tmp
);
3582 gen_neon_dup_low16(tmp
);
3585 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3590 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3593 if (!q
&& size
== 2) {
3596 tmp
= tcg_const_i32(rd
);
3597 tmp2
= tcg_const_i32(rm
);
3601 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
3604 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
3607 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
3615 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
3618 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
3624 tcg_temp_free_i32(tmp
);
3625 tcg_temp_free_i32(tmp2
);
3629 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3632 if (!q
&& size
== 2) {
3635 tmp
= tcg_const_i32(rd
);
3636 tmp2
= tcg_const_i32(rm
);
3640 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
3643 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
3646 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
3654 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
3657 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
3663 tcg_temp_free_i32(tmp
);
3664 tcg_temp_free_i32(tmp2
);
3668 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3672 rd
= tcg_temp_new_i32();
3673 tmp
= tcg_temp_new_i32();
3675 tcg_gen_shli_i32(rd
, t0
, 8);
3676 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3677 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3678 tcg_gen_or_i32(rd
, rd
, tmp
);
3680 tcg_gen_shri_i32(t1
, t1
, 8);
3681 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3682 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3683 tcg_gen_or_i32(t1
, t1
, tmp
);
3684 tcg_gen_mov_i32(t0
, rd
);
3686 tcg_temp_free_i32(tmp
);
3687 tcg_temp_free_i32(rd
);
3690 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3694 rd
= tcg_temp_new_i32();
3695 tmp
= tcg_temp_new_i32();
3697 tcg_gen_shli_i32(rd
, t0
, 16);
3698 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3699 tcg_gen_or_i32(rd
, rd
, tmp
);
3700 tcg_gen_shri_i32(t1
, t1
, 16);
3701 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3702 tcg_gen_or_i32(t1
, t1
, tmp
);
3703 tcg_gen_mov_i32(t0
, rd
);
3705 tcg_temp_free_i32(tmp
);
3706 tcg_temp_free_i32(rd
);
3714 } neon_ls_element_type
[11] = {
3728 /* Translate a NEON load/store element instruction. Return nonzero if the
3729 instruction is invalid. */
3730 static int disas_neon_ls_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
3749 if (!s
->vfp_enabled
)
3751 VFP_DREG_D(rd
, insn
);
3752 rn
= (insn
>> 16) & 0xf;
3754 load
= (insn
& (1 << 21)) != 0;
3755 if ((insn
& (1 << 23)) == 0) {
3756 /* Load store all elements. */
3757 op
= (insn
>> 8) & 0xf;
3758 size
= (insn
>> 6) & 3;
3761 /* Catch UNDEF cases for bad values of align field */
3764 if (((insn
>> 5) & 1) == 1) {
3769 if (((insn
>> 4) & 3) == 3) {
3776 nregs
= neon_ls_element_type
[op
].nregs
;
3777 interleave
= neon_ls_element_type
[op
].interleave
;
3778 spacing
= neon_ls_element_type
[op
].spacing
;
3779 if (size
== 3 && (interleave
| spacing
) != 1)
3781 addr
= tcg_temp_new_i32();
3782 load_reg_var(s
, addr
, rn
);
3783 stride
= (1 << size
) * interleave
;
3784 for (reg
= 0; reg
< nregs
; reg
++) {
3785 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3786 load_reg_var(s
, addr
, rn
);
3787 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3788 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3789 load_reg_var(s
, addr
, rn
);
3790 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3794 tmp64
= gen_ld64(addr
, IS_USER(s
));
3795 neon_store_reg64(tmp64
, rd
);
3796 tcg_temp_free_i64(tmp64
);
3798 tmp64
= tcg_temp_new_i64();
3799 neon_load_reg64(tmp64
, rd
);
3800 gen_st64(tmp64
, addr
, IS_USER(s
));
3802 tcg_gen_addi_i32(addr
, addr
, stride
);
3804 for (pass
= 0; pass
< 2; pass
++) {
3807 tmp
= gen_ld32(addr
, IS_USER(s
));
3808 neon_store_reg(rd
, pass
, tmp
);
3810 tmp
= neon_load_reg(rd
, pass
);
3811 gen_st32(tmp
, addr
, IS_USER(s
));
3813 tcg_gen_addi_i32(addr
, addr
, stride
);
3814 } else if (size
== 1) {
3816 tmp
= gen_ld16u(addr
, IS_USER(s
));
3817 tcg_gen_addi_i32(addr
, addr
, stride
);
3818 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3819 tcg_gen_addi_i32(addr
, addr
, stride
);
3820 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3821 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3822 tcg_temp_free_i32(tmp2
);
3823 neon_store_reg(rd
, pass
, tmp
);
3825 tmp
= neon_load_reg(rd
, pass
);
3826 tmp2
= tcg_temp_new_i32();
3827 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3828 gen_st16(tmp
, addr
, IS_USER(s
));
3829 tcg_gen_addi_i32(addr
, addr
, stride
);
3830 gen_st16(tmp2
, addr
, IS_USER(s
));
3831 tcg_gen_addi_i32(addr
, addr
, stride
);
3833 } else /* size == 0 */ {
3836 for (n
= 0; n
< 4; n
++) {
3837 tmp
= gen_ld8u(addr
, IS_USER(s
));
3838 tcg_gen_addi_i32(addr
, addr
, stride
);
3842 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
3843 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3844 tcg_temp_free_i32(tmp
);
3847 neon_store_reg(rd
, pass
, tmp2
);
3849 tmp2
= neon_load_reg(rd
, pass
);
3850 for (n
= 0; n
< 4; n
++) {
3851 tmp
= tcg_temp_new_i32();
3853 tcg_gen_mov_i32(tmp
, tmp2
);
3855 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3857 gen_st8(tmp
, addr
, IS_USER(s
));
3858 tcg_gen_addi_i32(addr
, addr
, stride
);
3860 tcg_temp_free_i32(tmp2
);
3867 tcg_temp_free_i32(addr
);
3870 size
= (insn
>> 10) & 3;
3872 /* Load single element to all lanes. */
3873 int a
= (insn
>> 4) & 1;
3877 size
= (insn
>> 6) & 3;
3878 nregs
= ((insn
>> 8) & 3) + 1;
3881 if (nregs
!= 4 || a
== 0) {
3884 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3887 if (nregs
== 1 && a
== 1 && size
== 0) {
3890 if (nregs
== 3 && a
== 1) {
3893 addr
= tcg_temp_new_i32();
3894 load_reg_var(s
, addr
, rn
);
3896 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3897 tmp
= gen_load_and_replicate(s
, addr
, size
);
3898 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3899 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3900 if (insn
& (1 << 5)) {
3901 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
3902 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
3904 tcg_temp_free_i32(tmp
);
3906 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3907 stride
= (insn
& (1 << 5)) ? 2 : 1;
3908 for (reg
= 0; reg
< nregs
; reg
++) {
3909 tmp
= gen_load_and_replicate(s
, addr
, size
);
3910 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3911 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3912 tcg_temp_free_i32(tmp
);
3913 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3917 tcg_temp_free_i32(addr
);
3918 stride
= (1 << size
) * nregs
;
3920 /* Single element. */
3921 int idx
= (insn
>> 4) & 0xf;
3922 pass
= (insn
>> 7) & 1;
3925 shift
= ((insn
>> 5) & 3) * 8;
3929 shift
= ((insn
>> 6) & 1) * 16;
3930 stride
= (insn
& (1 << 5)) ? 2 : 1;
3934 stride
= (insn
& (1 << 6)) ? 2 : 1;
3939 nregs
= ((insn
>> 8) & 3) + 1;
3940 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3943 if (((idx
& (1 << size
)) != 0) ||
3944 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
3949 if ((idx
& 1) != 0) {
3954 if (size
== 2 && (idx
& 2) != 0) {
3959 if ((size
== 2) && ((idx
& 3) == 3)) {
3966 if ((rd
+ stride
* (nregs
- 1)) > 31) {
3967 /* Attempts to write off the end of the register file
3968 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3969 * the neon_load_reg() would write off the end of the array.
3973 addr
= tcg_temp_new_i32();
3974 load_reg_var(s
, addr
, rn
);
3975 for (reg
= 0; reg
< nregs
; reg
++) {
3979 tmp
= gen_ld8u(addr
, IS_USER(s
));
3982 tmp
= gen_ld16u(addr
, IS_USER(s
));
3985 tmp
= gen_ld32(addr
, IS_USER(s
));
3987 default: /* Avoid compiler warnings. */
3991 tmp2
= neon_load_reg(rd
, pass
);
3992 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
3993 tcg_temp_free_i32(tmp2
);
3995 neon_store_reg(rd
, pass
, tmp
);
3996 } else { /* Store */
3997 tmp
= neon_load_reg(rd
, pass
);
3999 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4002 gen_st8(tmp
, addr
, IS_USER(s
));
4005 gen_st16(tmp
, addr
, IS_USER(s
));
4008 gen_st32(tmp
, addr
, IS_USER(s
));
4013 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4015 tcg_temp_free_i32(addr
);
4016 stride
= nregs
* (1 << size
);
4022 base
= load_reg(s
, rn
);
4024 tcg_gen_addi_i32(base
, base
, stride
);
4027 index
= load_reg(s
, rm
);
4028 tcg_gen_add_i32(base
, base
, index
);
4029 tcg_temp_free_i32(index
);
4031 store_reg(s
, rn
, base
);
4036 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4037 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4039 tcg_gen_and_i32(t
, t
, c
);
4040 tcg_gen_andc_i32(f
, f
, c
);
4041 tcg_gen_or_i32(dest
, t
, f
);
4044 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4047 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4048 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4049 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4054 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4057 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4058 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4059 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4064 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4067 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4068 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4069 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4074 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4077 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4078 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4079 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4084 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4090 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4091 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4096 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4097 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4104 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4105 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4110 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4111 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4118 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4122 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4123 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4124 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4129 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4130 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4131 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4135 tcg_temp_free_i32(src
);
4138 static inline void gen_neon_addl(int size
)
4141 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4142 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4143 case 2: tcg_gen_add_i64(CPU_V001
); break;
4148 static inline void gen_neon_subl(int size
)
4151 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4152 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4153 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4158 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4161 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4162 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4163 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4168 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4171 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4172 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4177 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4181 switch ((size
<< 1) | u
) {
4182 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4183 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4184 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4185 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4187 tmp
= gen_muls_i64_i32(a
, b
);
4188 tcg_gen_mov_i64(dest
, tmp
);
4189 tcg_temp_free_i64(tmp
);
4192 tmp
= gen_mulu_i64_i32(a
, b
);
4193 tcg_gen_mov_i64(dest
, tmp
);
4194 tcg_temp_free_i64(tmp
);
4199 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4200 Don't forget to clean them now. */
4202 tcg_temp_free_i32(a
);
4203 tcg_temp_free_i32(b
);
4207 static void gen_neon_narrow_op(int op
, int u
, int size
, TCGv dest
, TCGv_i64 src
)
4211 gen_neon_unarrow_sats(size
, dest
, src
);
4213 gen_neon_narrow(size
, dest
, src
);
4217 gen_neon_narrow_satu(size
, dest
, src
);
4219 gen_neon_narrow_sats(size
, dest
, src
);
4224 /* Symbolic constants for op fields for Neon 3-register same-length.
4225 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4228 #define NEON_3R_VHADD 0
4229 #define NEON_3R_VQADD 1
4230 #define NEON_3R_VRHADD 2
4231 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4232 #define NEON_3R_VHSUB 4
4233 #define NEON_3R_VQSUB 5
4234 #define NEON_3R_VCGT 6
4235 #define NEON_3R_VCGE 7
4236 #define NEON_3R_VSHL 8
4237 #define NEON_3R_VQSHL 9
4238 #define NEON_3R_VRSHL 10
4239 #define NEON_3R_VQRSHL 11
4240 #define NEON_3R_VMAX 12
4241 #define NEON_3R_VMIN 13
4242 #define NEON_3R_VABD 14
4243 #define NEON_3R_VABA 15
4244 #define NEON_3R_VADD_VSUB 16
4245 #define NEON_3R_VTST_VCEQ 17
4246 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4247 #define NEON_3R_VMUL 19
4248 #define NEON_3R_VPMAX 20
4249 #define NEON_3R_VPMIN 21
4250 #define NEON_3R_VQDMULH_VQRDMULH 22
4251 #define NEON_3R_VPADD 23
4252 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4253 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4254 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4255 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4256 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4257 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4258 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4260 static const uint8_t neon_3r_sizes
[] = {
4261 [NEON_3R_VHADD
] = 0x7,
4262 [NEON_3R_VQADD
] = 0xf,
4263 [NEON_3R_VRHADD
] = 0x7,
4264 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4265 [NEON_3R_VHSUB
] = 0x7,
4266 [NEON_3R_VQSUB
] = 0xf,
4267 [NEON_3R_VCGT
] = 0x7,
4268 [NEON_3R_VCGE
] = 0x7,
4269 [NEON_3R_VSHL
] = 0xf,
4270 [NEON_3R_VQSHL
] = 0xf,
4271 [NEON_3R_VRSHL
] = 0xf,
4272 [NEON_3R_VQRSHL
] = 0xf,
4273 [NEON_3R_VMAX
] = 0x7,
4274 [NEON_3R_VMIN
] = 0x7,
4275 [NEON_3R_VABD
] = 0x7,
4276 [NEON_3R_VABA
] = 0x7,
4277 [NEON_3R_VADD_VSUB
] = 0xf,
4278 [NEON_3R_VTST_VCEQ
] = 0x7,
4279 [NEON_3R_VML
] = 0x7,
4280 [NEON_3R_VMUL
] = 0x7,
4281 [NEON_3R_VPMAX
] = 0x7,
4282 [NEON_3R_VPMIN
] = 0x7,
4283 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4284 [NEON_3R_VPADD
] = 0x7,
4285 [NEON_3R_VFM
] = 0x5, /* size bit 1 encodes op */
4286 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4287 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4288 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4289 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4290 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4291 [NEON_3R_VRECPS_VRSQRTS
] = 0x5, /* size bit 1 encodes op */
4294 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4295 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4298 #define NEON_2RM_VREV64 0
4299 #define NEON_2RM_VREV32 1
4300 #define NEON_2RM_VREV16 2
4301 #define NEON_2RM_VPADDL 4
4302 #define NEON_2RM_VPADDL_U 5
4303 #define NEON_2RM_VCLS 8
4304 #define NEON_2RM_VCLZ 9
4305 #define NEON_2RM_VCNT 10
4306 #define NEON_2RM_VMVN 11
4307 #define NEON_2RM_VPADAL 12
4308 #define NEON_2RM_VPADAL_U 13
4309 #define NEON_2RM_VQABS 14
4310 #define NEON_2RM_VQNEG 15
4311 #define NEON_2RM_VCGT0 16
4312 #define NEON_2RM_VCGE0 17
4313 #define NEON_2RM_VCEQ0 18
4314 #define NEON_2RM_VCLE0 19
4315 #define NEON_2RM_VCLT0 20
4316 #define NEON_2RM_VABS 22
4317 #define NEON_2RM_VNEG 23
4318 #define NEON_2RM_VCGT0_F 24
4319 #define NEON_2RM_VCGE0_F 25
4320 #define NEON_2RM_VCEQ0_F 26
4321 #define NEON_2RM_VCLE0_F 27
4322 #define NEON_2RM_VCLT0_F 28
4323 #define NEON_2RM_VABS_F 30
4324 #define NEON_2RM_VNEG_F 31
4325 #define NEON_2RM_VSWP 32
4326 #define NEON_2RM_VTRN 33
4327 #define NEON_2RM_VUZP 34
4328 #define NEON_2RM_VZIP 35
4329 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4330 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4331 #define NEON_2RM_VSHLL 38
4332 #define NEON_2RM_VCVT_F16_F32 44
4333 #define NEON_2RM_VCVT_F32_F16 46
4334 #define NEON_2RM_VRECPE 56
4335 #define NEON_2RM_VRSQRTE 57
4336 #define NEON_2RM_VRECPE_F 58
4337 #define NEON_2RM_VRSQRTE_F 59
4338 #define NEON_2RM_VCVT_FS 60
4339 #define NEON_2RM_VCVT_FU 61
4340 #define NEON_2RM_VCVT_SF 62
4341 #define NEON_2RM_VCVT_UF 63
4343 static int neon_2rm_is_float_op(int op
)
4345 /* Return true if this neon 2reg-misc op is float-to-float */
4346 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
4347 op
>= NEON_2RM_VRECPE_F
);
4350 /* Each entry in this array has bit n set if the insn allows
4351 * size value n (otherwise it will UNDEF). Since unallocated
4352 * op values will have no bits set they always UNDEF.
4354 static const uint8_t neon_2rm_sizes
[] = {
4355 [NEON_2RM_VREV64
] = 0x7,
4356 [NEON_2RM_VREV32
] = 0x3,
4357 [NEON_2RM_VREV16
] = 0x1,
4358 [NEON_2RM_VPADDL
] = 0x7,
4359 [NEON_2RM_VPADDL_U
] = 0x7,
4360 [NEON_2RM_VCLS
] = 0x7,
4361 [NEON_2RM_VCLZ
] = 0x7,
4362 [NEON_2RM_VCNT
] = 0x1,
4363 [NEON_2RM_VMVN
] = 0x1,
4364 [NEON_2RM_VPADAL
] = 0x7,
4365 [NEON_2RM_VPADAL_U
] = 0x7,
4366 [NEON_2RM_VQABS
] = 0x7,
4367 [NEON_2RM_VQNEG
] = 0x7,
4368 [NEON_2RM_VCGT0
] = 0x7,
4369 [NEON_2RM_VCGE0
] = 0x7,
4370 [NEON_2RM_VCEQ0
] = 0x7,
4371 [NEON_2RM_VCLE0
] = 0x7,
4372 [NEON_2RM_VCLT0
] = 0x7,
4373 [NEON_2RM_VABS
] = 0x7,
4374 [NEON_2RM_VNEG
] = 0x7,
4375 [NEON_2RM_VCGT0_F
] = 0x4,
4376 [NEON_2RM_VCGE0_F
] = 0x4,
4377 [NEON_2RM_VCEQ0_F
] = 0x4,
4378 [NEON_2RM_VCLE0_F
] = 0x4,
4379 [NEON_2RM_VCLT0_F
] = 0x4,
4380 [NEON_2RM_VABS_F
] = 0x4,
4381 [NEON_2RM_VNEG_F
] = 0x4,
4382 [NEON_2RM_VSWP
] = 0x1,
4383 [NEON_2RM_VTRN
] = 0x7,
4384 [NEON_2RM_VUZP
] = 0x7,
4385 [NEON_2RM_VZIP
] = 0x7,
4386 [NEON_2RM_VMOVN
] = 0x7,
4387 [NEON_2RM_VQMOVN
] = 0x7,
4388 [NEON_2RM_VSHLL
] = 0x7,
4389 [NEON_2RM_VCVT_F16_F32
] = 0x2,
4390 [NEON_2RM_VCVT_F32_F16
] = 0x2,
4391 [NEON_2RM_VRECPE
] = 0x4,
4392 [NEON_2RM_VRSQRTE
] = 0x4,
4393 [NEON_2RM_VRECPE_F
] = 0x4,
4394 [NEON_2RM_VRSQRTE_F
] = 0x4,
4395 [NEON_2RM_VCVT_FS
] = 0x4,
4396 [NEON_2RM_VCVT_FU
] = 0x4,
4397 [NEON_2RM_VCVT_SF
] = 0x4,
4398 [NEON_2RM_VCVT_UF
] = 0x4,
4401 /* Translate a NEON data processing instruction. Return nonzero if the
4402 instruction is invalid.
4403 We process data in a mixture of 32-bit and 64-bit chunks.
4404 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4406 static int disas_neon_data_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
4418 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4421 if (!s
->vfp_enabled
)
4423 q
= (insn
& (1 << 6)) != 0;
4424 u
= (insn
>> 24) & 1;
4425 VFP_DREG_D(rd
, insn
);
4426 VFP_DREG_N(rn
, insn
);
4427 VFP_DREG_M(rm
, insn
);
4428 size
= (insn
>> 20) & 3;
4429 if ((insn
& (1 << 23)) == 0) {
4430 /* Three register same length. */
4431 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4432 /* Catch invalid op and bad size combinations: UNDEF */
4433 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4436 /* All insns of this form UNDEF for either this condition or the
4437 * superset of cases "Q==1"; we catch the latter later.
4439 if (q
&& ((rd
| rn
| rm
) & 1)) {
4442 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
4443 /* 64-bit element instructions. */
4444 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4445 neon_load_reg64(cpu_V0
, rn
+ pass
);
4446 neon_load_reg64(cpu_V1
, rm
+ pass
);
4450 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
4453 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
4459 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
4462 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
4468 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4470 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4475 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4478 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4484 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4486 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4489 case NEON_3R_VQRSHL
:
4491 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4494 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4498 case NEON_3R_VADD_VSUB
:
4500 tcg_gen_sub_i64(CPU_V001
);
4502 tcg_gen_add_i64(CPU_V001
);
4508 neon_store_reg64(cpu_V0
, rd
+ pass
);
4517 case NEON_3R_VQRSHL
:
4520 /* Shift instruction operands are reversed. */
4535 case NEON_3R_FLOAT_ARITH
:
4536 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
4538 case NEON_3R_FLOAT_MINMAX
:
4539 pairwise
= u
; /* if VPMIN/VPMAX (float) */
4541 case NEON_3R_FLOAT_CMP
:
4543 /* no encoding for U=0 C=1x */
4547 case NEON_3R_FLOAT_ACMP
:
4552 case NEON_3R_VRECPS_VRSQRTS
:
4558 if (u
&& (size
!= 0)) {
4559 /* UNDEF on invalid size for polynomial subcase */
4564 if (!arm_feature(env
, ARM_FEATURE_VFP4
) || u
) {
4572 if (pairwise
&& q
) {
4573 /* All the pairwise insns UNDEF if Q is set */
4577 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4582 tmp
= neon_load_reg(rn
, 0);
4583 tmp2
= neon_load_reg(rn
, 1);
4585 tmp
= neon_load_reg(rm
, 0);
4586 tmp2
= neon_load_reg(rm
, 1);
4590 tmp
= neon_load_reg(rn
, pass
);
4591 tmp2
= neon_load_reg(rm
, pass
);
4595 GEN_NEON_INTEGER_OP(hadd
);
4598 GEN_NEON_INTEGER_OP_ENV(qadd
);
4600 case NEON_3R_VRHADD
:
4601 GEN_NEON_INTEGER_OP(rhadd
);
4603 case NEON_3R_LOGIC
: /* Logic ops. */
4604 switch ((u
<< 2) | size
) {
4606 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4609 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4612 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4615 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4618 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4621 tmp3
= neon_load_reg(rd
, pass
);
4622 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4623 tcg_temp_free_i32(tmp3
);
4626 tmp3
= neon_load_reg(rd
, pass
);
4627 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4628 tcg_temp_free_i32(tmp3
);
4631 tmp3
= neon_load_reg(rd
, pass
);
4632 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4633 tcg_temp_free_i32(tmp3
);
4638 GEN_NEON_INTEGER_OP(hsub
);
4641 GEN_NEON_INTEGER_OP_ENV(qsub
);
4644 GEN_NEON_INTEGER_OP(cgt
);
4647 GEN_NEON_INTEGER_OP(cge
);
4650 GEN_NEON_INTEGER_OP(shl
);
4653 GEN_NEON_INTEGER_OP_ENV(qshl
);
4656 GEN_NEON_INTEGER_OP(rshl
);
4658 case NEON_3R_VQRSHL
:
4659 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4662 GEN_NEON_INTEGER_OP(max
);
4665 GEN_NEON_INTEGER_OP(min
);
4668 GEN_NEON_INTEGER_OP(abd
);
4671 GEN_NEON_INTEGER_OP(abd
);
4672 tcg_temp_free_i32(tmp2
);
4673 tmp2
= neon_load_reg(rd
, pass
);
4674 gen_neon_add(size
, tmp
, tmp2
);
4676 case NEON_3R_VADD_VSUB
:
4677 if (!u
) { /* VADD */
4678 gen_neon_add(size
, tmp
, tmp2
);
4681 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4682 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4683 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4688 case NEON_3R_VTST_VCEQ
:
4689 if (!u
) { /* VTST */
4691 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4692 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4693 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4698 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4699 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4700 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4705 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
4707 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4708 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4709 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4712 tcg_temp_free_i32(tmp2
);
4713 tmp2
= neon_load_reg(rd
, pass
);
4715 gen_neon_rsb(size
, tmp
, tmp2
);
4717 gen_neon_add(size
, tmp
, tmp2
);
4721 if (u
) { /* polynomial */
4722 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4723 } else { /* Integer */
4725 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4726 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4727 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4733 GEN_NEON_INTEGER_OP(pmax
);
4736 GEN_NEON_INTEGER_OP(pmin
);
4738 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
4739 if (!u
) { /* VQDMULH */
4742 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4745 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4749 } else { /* VQRDMULH */
4752 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4755 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4763 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4764 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4765 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4769 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
4771 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4772 switch ((u
<< 2) | size
) {
4775 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4778 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
4781 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
4786 tcg_temp_free_ptr(fpstatus
);
4789 case NEON_3R_FLOAT_MULTIPLY
:
4791 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4792 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
4794 tcg_temp_free_i32(tmp2
);
4795 tmp2
= neon_load_reg(rd
, pass
);
4797 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4799 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
4802 tcg_temp_free_ptr(fpstatus
);
4805 case NEON_3R_FLOAT_CMP
:
4807 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4809 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
4812 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4814 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4817 tcg_temp_free_ptr(fpstatus
);
4820 case NEON_3R_FLOAT_ACMP
:
4822 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4824 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4826 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4828 tcg_temp_free_ptr(fpstatus
);
4831 case NEON_3R_FLOAT_MINMAX
:
4833 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4835 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
, fpstatus
);
4837 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
, fpstatus
);
4839 tcg_temp_free_ptr(fpstatus
);
4842 case NEON_3R_VRECPS_VRSQRTS
:
4844 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4846 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4850 /* VFMA, VFMS: fused multiply-add */
4851 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4852 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
4855 gen_helper_vfp_negs(tmp
, tmp
);
4857 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
4858 tcg_temp_free_i32(tmp3
);
4859 tcg_temp_free_ptr(fpstatus
);
4865 tcg_temp_free_i32(tmp2
);
4867 /* Save the result. For elementwise operations we can put it
4868 straight into the destination register. For pairwise operations
4869 we have to be careful to avoid clobbering the source operands. */
4870 if (pairwise
&& rd
== rm
) {
4871 neon_store_scratch(pass
, tmp
);
4873 neon_store_reg(rd
, pass
, tmp
);
4877 if (pairwise
&& rd
== rm
) {
4878 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4879 tmp
= neon_load_scratch(pass
);
4880 neon_store_reg(rd
, pass
, tmp
);
4883 /* End of 3 register same size operations. */
4884 } else if (insn
& (1 << 4)) {
4885 if ((insn
& 0x00380080) != 0) {
4886 /* Two registers and shift. */
4887 op
= (insn
>> 8) & 0xf;
4888 if (insn
& (1 << 7)) {
4896 while ((insn
& (1 << (size
+ 19))) == 0)
4899 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4900 /* To avoid excessive dumplication of ops we implement shift
4901 by immediate using the variable shift operations. */
4903 /* Shift by immediate:
4904 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4905 if (q
&& ((rd
| rm
) & 1)) {
4908 if (!u
&& (op
== 4 || op
== 6)) {
4911 /* Right shifts are encoded as N - shift, where N is the
4912 element size in bits. */
4914 shift
= shift
- (1 << (size
+ 3));
4922 imm
= (uint8_t) shift
;
4927 imm
= (uint16_t) shift
;
4938 for (pass
= 0; pass
< count
; pass
++) {
4940 neon_load_reg64(cpu_V0
, rm
+ pass
);
4941 tcg_gen_movi_i64(cpu_V1
, imm
);
4946 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4948 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4953 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4955 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4958 case 5: /* VSHL, VSLI */
4959 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4961 case 6: /* VQSHLU */
4962 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
4967 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4970 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4975 if (op
== 1 || op
== 3) {
4977 neon_load_reg64(cpu_V1
, rd
+ pass
);
4978 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4979 } else if (op
== 4 || (op
== 5 && u
)) {
4981 neon_load_reg64(cpu_V1
, rd
+ pass
);
4983 if (shift
< -63 || shift
> 63) {
4987 mask
= 0xffffffffffffffffull
>> -shift
;
4989 mask
= 0xffffffffffffffffull
<< shift
;
4992 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
4993 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4995 neon_store_reg64(cpu_V0
, rd
+ pass
);
4996 } else { /* size < 3 */
4997 /* Operands in T0 and T1. */
4998 tmp
= neon_load_reg(rm
, pass
);
4999 tmp2
= tcg_temp_new_i32();
5000 tcg_gen_movi_i32(tmp2
, imm
);
5004 GEN_NEON_INTEGER_OP(shl
);
5008 GEN_NEON_INTEGER_OP(rshl
);
5011 case 5: /* VSHL, VSLI */
5013 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
5014 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
5015 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
5019 case 6: /* VQSHLU */
5022 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5026 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5030 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5038 GEN_NEON_INTEGER_OP_ENV(qshl
);
5041 tcg_temp_free_i32(tmp2
);
5043 if (op
== 1 || op
== 3) {
5045 tmp2
= neon_load_reg(rd
, pass
);
5046 gen_neon_add(size
, tmp
, tmp2
);
5047 tcg_temp_free_i32(tmp2
);
5048 } else if (op
== 4 || (op
== 5 && u
)) {
5053 mask
= 0xff >> -shift
;
5055 mask
= (uint8_t)(0xff << shift
);
5061 mask
= 0xffff >> -shift
;
5063 mask
= (uint16_t)(0xffff << shift
);
5067 if (shift
< -31 || shift
> 31) {
5071 mask
= 0xffffffffu
>> -shift
;
5073 mask
= 0xffffffffu
<< shift
;
5079 tmp2
= neon_load_reg(rd
, pass
);
5080 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5081 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5082 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5083 tcg_temp_free_i32(tmp2
);
5085 neon_store_reg(rd
, pass
, tmp
);
5088 } else if (op
< 10) {
5089 /* Shift by immediate and narrow:
5090 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5091 int input_unsigned
= (op
== 8) ? !u
: u
;
5095 shift
= shift
- (1 << (size
+ 3));
5098 tmp64
= tcg_const_i64(shift
);
5099 neon_load_reg64(cpu_V0
, rm
);
5100 neon_load_reg64(cpu_V1
, rm
+ 1);
5101 for (pass
= 0; pass
< 2; pass
++) {
5109 if (input_unsigned
) {
5110 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5112 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5115 if (input_unsigned
) {
5116 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5118 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5121 tmp
= tcg_temp_new_i32();
5122 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5123 neon_store_reg(rd
, pass
, tmp
);
5125 tcg_temp_free_i64(tmp64
);
5128 imm
= (uint16_t)shift
;
5132 imm
= (uint32_t)shift
;
5134 tmp2
= tcg_const_i32(imm
);
5135 tmp4
= neon_load_reg(rm
+ 1, 0);
5136 tmp5
= neon_load_reg(rm
+ 1, 1);
5137 for (pass
= 0; pass
< 2; pass
++) {
5139 tmp
= neon_load_reg(rm
, 0);
5143 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5146 tmp3
= neon_load_reg(rm
, 1);
5150 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5152 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5153 tcg_temp_free_i32(tmp
);
5154 tcg_temp_free_i32(tmp3
);
5155 tmp
= tcg_temp_new_i32();
5156 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5157 neon_store_reg(rd
, pass
, tmp
);
5159 tcg_temp_free_i32(tmp2
);
5161 } else if (op
== 10) {
5163 if (q
|| (rd
& 1)) {
5166 tmp
= neon_load_reg(rm
, 0);
5167 tmp2
= neon_load_reg(rm
, 1);
5168 for (pass
= 0; pass
< 2; pass
++) {
5172 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5175 /* The shift is less than the width of the source
5176 type, so we can just shift the whole register. */
5177 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5178 /* Widen the result of shift: we need to clear
5179 * the potential overflow bits resulting from
5180 * left bits of the narrow input appearing as
5181 * right bits of left the neighbour narrow
5183 if (size
< 2 || !u
) {
5186 imm
= (0xffu
>> (8 - shift
));
5188 } else if (size
== 1) {
5189 imm
= 0xffff >> (16 - shift
);
5192 imm
= 0xffffffff >> (32 - shift
);
5195 imm64
= imm
| (((uint64_t)imm
) << 32);
5199 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5202 neon_store_reg64(cpu_V0
, rd
+ pass
);
5204 } else if (op
>= 14) {
5205 /* VCVT fixed-point. */
5206 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5209 /* We have already masked out the must-be-1 top bit of imm6,
5210 * hence this 32-shift where the ARM ARM has 64-imm6.
5213 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5214 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5217 gen_vfp_ulto(0, shift
, 1);
5219 gen_vfp_slto(0, shift
, 1);
5222 gen_vfp_toul(0, shift
, 1);
5224 gen_vfp_tosl(0, shift
, 1);
5226 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
5231 } else { /* (insn & 0x00380080) == 0 */
5233 if (q
&& (rd
& 1)) {
5237 op
= (insn
>> 8) & 0xf;
5238 /* One register and immediate. */
5239 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5240 invert
= (insn
& (1 << 5)) != 0;
5241 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5242 * We choose to not special-case this and will behave as if a
5243 * valid constant encoding of 0 had been given.
5262 imm
= (imm
<< 8) | (imm
<< 24);
5265 imm
= (imm
<< 8) | 0xff;
5268 imm
= (imm
<< 16) | 0xffff;
5271 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5279 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5280 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5286 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5287 if (op
& 1 && op
< 12) {
5288 tmp
= neon_load_reg(rd
, pass
);
5290 /* The immediate value has already been inverted, so
5292 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5294 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5298 tmp
= tcg_temp_new_i32();
5299 if (op
== 14 && invert
) {
5303 for (n
= 0; n
< 4; n
++) {
5304 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5305 val
|= 0xff << (n
* 8);
5307 tcg_gen_movi_i32(tmp
, val
);
5309 tcg_gen_movi_i32(tmp
, imm
);
5312 neon_store_reg(rd
, pass
, tmp
);
5315 } else { /* (insn & 0x00800010 == 0x00800000) */
5317 op
= (insn
>> 8) & 0xf;
5318 if ((insn
& (1 << 6)) == 0) {
5319 /* Three registers of different lengths. */
5323 /* undefreq: bit 0 : UNDEF if size != 0
5324 * bit 1 : UNDEF if size == 0
5325 * bit 2 : UNDEF if U == 1
5326 * Note that [1:0] set implies 'always UNDEF'
5329 /* prewiden, src1_wide, src2_wide, undefreq */
5330 static const int neon_3reg_wide
[16][4] = {
5331 {1, 0, 0, 0}, /* VADDL */
5332 {1, 1, 0, 0}, /* VADDW */
5333 {1, 0, 0, 0}, /* VSUBL */
5334 {1, 1, 0, 0}, /* VSUBW */
5335 {0, 1, 1, 0}, /* VADDHN */
5336 {0, 0, 0, 0}, /* VABAL */
5337 {0, 1, 1, 0}, /* VSUBHN */
5338 {0, 0, 0, 0}, /* VABDL */
5339 {0, 0, 0, 0}, /* VMLAL */
5340 {0, 0, 0, 6}, /* VQDMLAL */
5341 {0, 0, 0, 0}, /* VMLSL */
5342 {0, 0, 0, 6}, /* VQDMLSL */
5343 {0, 0, 0, 0}, /* Integer VMULL */
5344 {0, 0, 0, 2}, /* VQDMULL */
5345 {0, 0, 0, 5}, /* Polynomial VMULL */
5346 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5349 prewiden
= neon_3reg_wide
[op
][0];
5350 src1_wide
= neon_3reg_wide
[op
][1];
5351 src2_wide
= neon_3reg_wide
[op
][2];
5352 undefreq
= neon_3reg_wide
[op
][3];
5354 if (((undefreq
& 1) && (size
!= 0)) ||
5355 ((undefreq
& 2) && (size
== 0)) ||
5356 ((undefreq
& 4) && u
)) {
5359 if ((src1_wide
&& (rn
& 1)) ||
5360 (src2_wide
&& (rm
& 1)) ||
5361 (!src2_wide
&& (rd
& 1))) {
5365 /* Avoid overlapping operands. Wide source operands are
5366 always aligned so will never overlap with wide
5367 destinations in problematic ways. */
5368 if (rd
== rm
&& !src2_wide
) {
5369 tmp
= neon_load_reg(rm
, 1);
5370 neon_store_scratch(2, tmp
);
5371 } else if (rd
== rn
&& !src1_wide
) {
5372 tmp
= neon_load_reg(rn
, 1);
5373 neon_store_scratch(2, tmp
);
5376 for (pass
= 0; pass
< 2; pass
++) {
5378 neon_load_reg64(cpu_V0
, rn
+ pass
);
5381 if (pass
== 1 && rd
== rn
) {
5382 tmp
= neon_load_scratch(2);
5384 tmp
= neon_load_reg(rn
, pass
);
5387 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5391 neon_load_reg64(cpu_V1
, rm
+ pass
);
5394 if (pass
== 1 && rd
== rm
) {
5395 tmp2
= neon_load_scratch(2);
5397 tmp2
= neon_load_reg(rm
, pass
);
5400 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5404 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5405 gen_neon_addl(size
);
5407 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5408 gen_neon_subl(size
);
5410 case 5: case 7: /* VABAL, VABDL */
5411 switch ((size
<< 1) | u
) {
5413 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5416 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5419 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5422 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5425 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5428 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5432 tcg_temp_free_i32(tmp2
);
5433 tcg_temp_free_i32(tmp
);
5435 case 8: case 9: case 10: case 11: case 12: case 13:
5436 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5437 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5439 case 14: /* Polynomial VMULL */
5440 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5441 tcg_temp_free_i32(tmp2
);
5442 tcg_temp_free_i32(tmp
);
5444 default: /* 15 is RESERVED: caught earlier */
5449 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5450 neon_store_reg64(cpu_V0
, rd
+ pass
);
5451 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5453 neon_load_reg64(cpu_V1
, rd
+ pass
);
5455 case 10: /* VMLSL */
5456 gen_neon_negl(cpu_V0
, size
);
5458 case 5: case 8: /* VABAL, VMLAL */
5459 gen_neon_addl(size
);
5461 case 9: case 11: /* VQDMLAL, VQDMLSL */
5462 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5464 gen_neon_negl(cpu_V0
, size
);
5466 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5471 neon_store_reg64(cpu_V0
, rd
+ pass
);
5472 } else if (op
== 4 || op
== 6) {
5473 /* Narrowing operation. */
5474 tmp
= tcg_temp_new_i32();
5478 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5481 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5484 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5485 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5492 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5495 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5498 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5499 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5500 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5508 neon_store_reg(rd
, 0, tmp3
);
5509 neon_store_reg(rd
, 1, tmp
);
5512 /* Write back the result. */
5513 neon_store_reg64(cpu_V0
, rd
+ pass
);
5517 /* Two registers and a scalar. NB that for ops of this form
5518 * the ARM ARM labels bit 24 as Q, but it is in our variable
5525 case 1: /* Float VMLA scalar */
5526 case 5: /* Floating point VMLS scalar */
5527 case 9: /* Floating point VMUL scalar */
5532 case 0: /* Integer VMLA scalar */
5533 case 4: /* Integer VMLS scalar */
5534 case 8: /* Integer VMUL scalar */
5535 case 12: /* VQDMULH scalar */
5536 case 13: /* VQRDMULH scalar */
5537 if (u
&& ((rd
| rn
) & 1)) {
5540 tmp
= neon_get_scalar(size
, rm
);
5541 neon_store_scratch(0, tmp
);
5542 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5543 tmp
= neon_load_scratch(0);
5544 tmp2
= neon_load_reg(rn
, pass
);
5547 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5549 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5551 } else if (op
== 13) {
5553 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5555 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5557 } else if (op
& 1) {
5558 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5559 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5560 tcg_temp_free_ptr(fpstatus
);
5563 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5564 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5565 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5569 tcg_temp_free_i32(tmp2
);
5572 tmp2
= neon_load_reg(rd
, pass
);
5575 gen_neon_add(size
, tmp
, tmp2
);
5579 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5580 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5581 tcg_temp_free_ptr(fpstatus
);
5585 gen_neon_rsb(size
, tmp
, tmp2
);
5589 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5590 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5591 tcg_temp_free_ptr(fpstatus
);
5597 tcg_temp_free_i32(tmp2
);
5599 neon_store_reg(rd
, pass
, tmp
);
5602 case 3: /* VQDMLAL scalar */
5603 case 7: /* VQDMLSL scalar */
5604 case 11: /* VQDMULL scalar */
5609 case 2: /* VMLAL sclar */
5610 case 6: /* VMLSL scalar */
5611 case 10: /* VMULL scalar */
5615 tmp2
= neon_get_scalar(size
, rm
);
5616 /* We need a copy of tmp2 because gen_neon_mull
5617 * deletes it during pass 0. */
5618 tmp4
= tcg_temp_new_i32();
5619 tcg_gen_mov_i32(tmp4
, tmp2
);
5620 tmp3
= neon_load_reg(rn
, 1);
5622 for (pass
= 0; pass
< 2; pass
++) {
5624 tmp
= neon_load_reg(rn
, 0);
5629 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5631 neon_load_reg64(cpu_V1
, rd
+ pass
);
5635 gen_neon_negl(cpu_V0
, size
);
5638 gen_neon_addl(size
);
5641 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5643 gen_neon_negl(cpu_V0
, size
);
5645 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5651 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5656 neon_store_reg64(cpu_V0
, rd
+ pass
);
5661 default: /* 14 and 15 are RESERVED */
5665 } else { /* size == 3 */
5668 imm
= (insn
>> 8) & 0xf;
5673 if (q
&& ((rd
| rn
| rm
) & 1)) {
5678 neon_load_reg64(cpu_V0
, rn
);
5680 neon_load_reg64(cpu_V1
, rn
+ 1);
5682 } else if (imm
== 8) {
5683 neon_load_reg64(cpu_V0
, rn
+ 1);
5685 neon_load_reg64(cpu_V1
, rm
);
5688 tmp64
= tcg_temp_new_i64();
5690 neon_load_reg64(cpu_V0
, rn
);
5691 neon_load_reg64(tmp64
, rn
+ 1);
5693 neon_load_reg64(cpu_V0
, rn
+ 1);
5694 neon_load_reg64(tmp64
, rm
);
5696 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5697 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5698 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5700 neon_load_reg64(cpu_V1
, rm
);
5702 neon_load_reg64(cpu_V1
, rm
+ 1);
5705 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5706 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5707 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5708 tcg_temp_free_i64(tmp64
);
5711 neon_load_reg64(cpu_V0
, rn
);
5712 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5713 neon_load_reg64(cpu_V1
, rm
);
5714 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5715 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5717 neon_store_reg64(cpu_V0
, rd
);
5719 neon_store_reg64(cpu_V1
, rd
+ 1);
5721 } else if ((insn
& (1 << 11)) == 0) {
5722 /* Two register misc. */
5723 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5724 size
= (insn
>> 18) & 3;
5725 /* UNDEF for unknown op values and bad op-size combinations */
5726 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
5729 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
5730 q
&& ((rm
| rd
) & 1)) {
5734 case NEON_2RM_VREV64
:
5735 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5736 tmp
= neon_load_reg(rm
, pass
* 2);
5737 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5739 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5740 case 1: gen_swap_half(tmp
); break;
5741 case 2: /* no-op */ break;
5744 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5746 neon_store_reg(rd
, pass
* 2, tmp2
);
5749 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5750 case 1: gen_swap_half(tmp2
); break;
5753 neon_store_reg(rd
, pass
* 2, tmp2
);
5757 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
5758 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
5759 for (pass
= 0; pass
< q
+ 1; pass
++) {
5760 tmp
= neon_load_reg(rm
, pass
* 2);
5761 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5762 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5763 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5765 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5766 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5767 case 2: tcg_gen_add_i64(CPU_V001
); break;
5770 if (op
>= NEON_2RM_VPADAL
) {
5772 neon_load_reg64(cpu_V1
, rd
+ pass
);
5773 gen_neon_addl(size
);
5775 neon_store_reg64(cpu_V0
, rd
+ pass
);
5781 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5782 tmp
= neon_load_reg(rm
, n
);
5783 tmp2
= neon_load_reg(rd
, n
+ 1);
5784 neon_store_reg(rm
, n
, tmp2
);
5785 neon_store_reg(rd
, n
+ 1, tmp
);
5792 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
5797 if (gen_neon_zip(rd
, rm
, size
, q
)) {
5801 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
5802 /* also VQMOVUN; op field and mnemonics don't line up */
5807 for (pass
= 0; pass
< 2; pass
++) {
5808 neon_load_reg64(cpu_V0
, rm
+ pass
);
5809 tmp
= tcg_temp_new_i32();
5810 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
5815 neon_store_reg(rd
, 0, tmp2
);
5816 neon_store_reg(rd
, 1, tmp
);
5820 case NEON_2RM_VSHLL
:
5821 if (q
|| (rd
& 1)) {
5824 tmp
= neon_load_reg(rm
, 0);
5825 tmp2
= neon_load_reg(rm
, 1);
5826 for (pass
= 0; pass
< 2; pass
++) {
5829 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5830 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5831 neon_store_reg64(cpu_V0
, rd
+ pass
);
5834 case NEON_2RM_VCVT_F16_F32
:
5835 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5839 tmp
= tcg_temp_new_i32();
5840 tmp2
= tcg_temp_new_i32();
5841 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5842 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5843 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5844 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5845 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5846 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5847 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5848 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5849 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5850 neon_store_reg(rd
, 0, tmp2
);
5851 tmp2
= tcg_temp_new_i32();
5852 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5853 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5854 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5855 neon_store_reg(rd
, 1, tmp2
);
5856 tcg_temp_free_i32(tmp
);
5858 case NEON_2RM_VCVT_F32_F16
:
5859 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5863 tmp3
= tcg_temp_new_i32();
5864 tmp
= neon_load_reg(rm
, 0);
5865 tmp2
= neon_load_reg(rm
, 1);
5866 tcg_gen_ext16u_i32(tmp3
, tmp
);
5867 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5868 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5869 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5870 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5871 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5872 tcg_temp_free_i32(tmp
);
5873 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5874 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5875 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5876 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5877 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5878 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5879 tcg_temp_free_i32(tmp2
);
5880 tcg_temp_free_i32(tmp3
);
5884 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5885 if (neon_2rm_is_float_op(op
)) {
5886 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5887 neon_reg_offset(rm
, pass
));
5890 tmp
= neon_load_reg(rm
, pass
);
5893 case NEON_2RM_VREV32
:
5895 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5896 case 1: gen_swap_half(tmp
); break;
5900 case NEON_2RM_VREV16
:
5905 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5906 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5907 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5913 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5914 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5915 case 2: gen_helper_clz(tmp
, tmp
); break;
5920 gen_helper_neon_cnt_u8(tmp
, tmp
);
5923 tcg_gen_not_i32(tmp
, tmp
);
5925 case NEON_2RM_VQABS
:
5928 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
5931 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
5934 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
5939 case NEON_2RM_VQNEG
:
5942 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
5945 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
5948 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
5953 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
5954 tmp2
= tcg_const_i32(0);
5956 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5957 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5958 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
5961 tcg_temp_free(tmp2
);
5962 if (op
== NEON_2RM_VCLE0
) {
5963 tcg_gen_not_i32(tmp
, tmp
);
5966 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
5967 tmp2
= tcg_const_i32(0);
5969 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
5970 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
5971 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
5974 tcg_temp_free(tmp2
);
5975 if (op
== NEON_2RM_VCLT0
) {
5976 tcg_gen_not_i32(tmp
, tmp
);
5979 case NEON_2RM_VCEQ0
:
5980 tmp2
= tcg_const_i32(0);
5982 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5983 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5984 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5987 tcg_temp_free(tmp2
);
5991 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
5992 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
5993 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
5998 tmp2
= tcg_const_i32(0);
5999 gen_neon_rsb(size
, tmp
, tmp2
);
6000 tcg_temp_free(tmp2
);
6002 case NEON_2RM_VCGT0_F
:
6004 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6005 tmp2
= tcg_const_i32(0);
6006 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6007 tcg_temp_free(tmp2
);
6008 tcg_temp_free_ptr(fpstatus
);
6011 case NEON_2RM_VCGE0_F
:
6013 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6014 tmp2
= tcg_const_i32(0);
6015 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6016 tcg_temp_free(tmp2
);
6017 tcg_temp_free_ptr(fpstatus
);
6020 case NEON_2RM_VCEQ0_F
:
6022 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6023 tmp2
= tcg_const_i32(0);
6024 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6025 tcg_temp_free(tmp2
);
6026 tcg_temp_free_ptr(fpstatus
);
6029 case NEON_2RM_VCLE0_F
:
6031 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6032 tmp2
= tcg_const_i32(0);
6033 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6034 tcg_temp_free(tmp2
);
6035 tcg_temp_free_ptr(fpstatus
);
6038 case NEON_2RM_VCLT0_F
:
6040 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6041 tmp2
= tcg_const_i32(0);
6042 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6043 tcg_temp_free(tmp2
);
6044 tcg_temp_free_ptr(fpstatus
);
6047 case NEON_2RM_VABS_F
:
6050 case NEON_2RM_VNEG_F
:
6054 tmp2
= neon_load_reg(rd
, pass
);
6055 neon_store_reg(rm
, pass
, tmp2
);
6058 tmp2
= neon_load_reg(rd
, pass
);
6060 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6061 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6064 neon_store_reg(rm
, pass
, tmp2
);
6066 case NEON_2RM_VRECPE
:
6067 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
6069 case NEON_2RM_VRSQRTE
:
6070 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
6072 case NEON_2RM_VRECPE_F
:
6073 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6075 case NEON_2RM_VRSQRTE_F
:
6076 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6078 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6081 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6084 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6085 gen_vfp_tosiz(0, 1);
6087 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6088 gen_vfp_touiz(0, 1);
6091 /* Reserved op values were caught by the
6092 * neon_2rm_sizes[] check earlier.
6096 if (neon_2rm_is_float_op(op
)) {
6097 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
6098 neon_reg_offset(rd
, pass
));
6100 neon_store_reg(rd
, pass
, tmp
);
6105 } else if ((insn
& (1 << 10)) == 0) {
6107 int n
= ((insn
>> 8) & 3) + 1;
6108 if ((rn
+ n
) > 32) {
6109 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6110 * helper function running off the end of the register file.
6115 if (insn
& (1 << 6)) {
6116 tmp
= neon_load_reg(rd
, 0);
6118 tmp
= tcg_temp_new_i32();
6119 tcg_gen_movi_i32(tmp
, 0);
6121 tmp2
= neon_load_reg(rm
, 0);
6122 tmp4
= tcg_const_i32(rn
);
6123 tmp5
= tcg_const_i32(n
);
6124 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
6125 tcg_temp_free_i32(tmp
);
6126 if (insn
& (1 << 6)) {
6127 tmp
= neon_load_reg(rd
, 1);
6129 tmp
= tcg_temp_new_i32();
6130 tcg_gen_movi_i32(tmp
, 0);
6132 tmp3
= neon_load_reg(rm
, 1);
6133 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
6134 tcg_temp_free_i32(tmp5
);
6135 tcg_temp_free_i32(tmp4
);
6136 neon_store_reg(rd
, 0, tmp2
);
6137 neon_store_reg(rd
, 1, tmp3
);
6138 tcg_temp_free_i32(tmp
);
6139 } else if ((insn
& 0x380) == 0) {
6141 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6144 if (insn
& (1 << 19)) {
6145 tmp
= neon_load_reg(rm
, 1);
6147 tmp
= neon_load_reg(rm
, 0);
6149 if (insn
& (1 << 16)) {
6150 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
6151 } else if (insn
& (1 << 17)) {
6152 if ((insn
>> 18) & 1)
6153 gen_neon_dup_high16(tmp
);
6155 gen_neon_dup_low16(tmp
);
6157 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6158 tmp2
= tcg_temp_new_i32();
6159 tcg_gen_mov_i32(tmp2
, tmp
);
6160 neon_store_reg(rd
, pass
, tmp2
);
6162 tcg_temp_free_i32(tmp
);
6171 static int disas_coproc_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
6173 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
6174 const ARMCPRegInfo
*ri
;
6175 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6177 cpnum
= (insn
>> 8) & 0xf;
6178 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
6179 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
6182 /* First check for coprocessor space used for actual instructions */
6186 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6187 return disas_iwmmxt_insn(env
, s
, insn
);
6188 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6189 return disas_dsp_insn(env
, s
, insn
);
6194 return disas_vfp_insn (env
, s
, insn
);
6199 /* Otherwise treat as a generic register access */
6200 is64
= (insn
& (1 << 25)) == 0;
6201 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
6209 opc1
= (insn
>> 4) & 0xf;
6211 rt2
= (insn
>> 16) & 0xf;
6213 crn
= (insn
>> 16) & 0xf;
6214 opc1
= (insn
>> 21) & 7;
6215 opc2
= (insn
>> 5) & 7;
6218 isread
= (insn
>> 20) & 1;
6219 rt
= (insn
>> 12) & 0xf;
6221 ri
= get_arm_cp_reginfo(cpu
,
6222 ENCODE_CP_REG(cpnum
, is64
, crn
, crm
, opc1
, opc2
));
6224 /* Check access permissions */
6225 if (!cp_access_ok(env
, ri
, isread
)) {
6229 /* Handle special cases first */
6230 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
6237 gen_set_pc_im(s
->pc
);
6238 s
->is_jmp
= DISAS_WFI
;
6249 if (ri
->type
& ARM_CP_CONST
) {
6250 tmp64
= tcg_const_i64(ri
->resetvalue
);
6251 } else if (ri
->readfn
) {
6253 gen_set_pc_im(s
->pc
);
6254 tmp64
= tcg_temp_new_i64();
6255 tmpptr
= tcg_const_ptr(ri
);
6256 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
6257 tcg_temp_free_ptr(tmpptr
);
6259 tmp64
= tcg_temp_new_i64();
6260 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6262 tmp
= tcg_temp_new_i32();
6263 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6264 store_reg(s
, rt
, tmp
);
6265 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6266 tmp
= tcg_temp_new_i32();
6267 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6268 tcg_temp_free_i64(tmp64
);
6269 store_reg(s
, rt2
, tmp
);
6272 if (ri
->type
& ARM_CP_CONST
) {
6273 tmp
= tcg_const_i32(ri
->resetvalue
);
6274 } else if (ri
->readfn
) {
6276 gen_set_pc_im(s
->pc
);
6277 tmp
= tcg_temp_new_i32();
6278 tmpptr
= tcg_const_ptr(ri
);
6279 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
6280 tcg_temp_free_ptr(tmpptr
);
6282 tmp
= load_cpu_offset(ri
->fieldoffset
);
6285 /* Destination register of r15 for 32 bit loads sets
6286 * the condition codes from the high 4 bits of the value
6289 tcg_temp_free_i32(tmp
);
6291 store_reg(s
, rt
, tmp
);
6296 if (ri
->type
& ARM_CP_CONST
) {
6297 /* If not forbidden by access permissions, treat as WI */
6303 TCGv_i64 tmp64
= tcg_temp_new_i64();
6304 tmplo
= load_reg(s
, rt
);
6305 tmphi
= load_reg(s
, rt2
);
6306 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
6307 tcg_temp_free_i32(tmplo
);
6308 tcg_temp_free_i32(tmphi
);
6310 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
6311 gen_set_pc_im(s
->pc
);
6312 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
6313 tcg_temp_free_ptr(tmpptr
);
6315 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6317 tcg_temp_free_i64(tmp64
);
6322 gen_set_pc_im(s
->pc
);
6323 tmp
= load_reg(s
, rt
);
6324 tmpptr
= tcg_const_ptr(ri
);
6325 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
6326 tcg_temp_free_ptr(tmpptr
);
6327 tcg_temp_free_i32(tmp
);
6329 TCGv tmp
= load_reg(s
, rt
);
6330 store_cpu_offset(tmp
, ri
->fieldoffset
);
6333 /* We default to ending the TB on a coprocessor register write,
6334 * but allow this to be suppressed by the register definition
6335 * (usually only necessary to work around guest bugs).
6337 if (!(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
6348 /* Store a 64-bit value to a register pair. Clobbers val. */
6349 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
6352 tmp
= tcg_temp_new_i32();
6353 tcg_gen_trunc_i64_i32(tmp
, val
);
6354 store_reg(s
, rlow
, tmp
);
6355 tmp
= tcg_temp_new_i32();
6356 tcg_gen_shri_i64(val
, val
, 32);
6357 tcg_gen_trunc_i64_i32(tmp
, val
);
6358 store_reg(s
, rhigh
, tmp
);
6361 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6362 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
6367 /* Load value and extend to 64 bits. */
6368 tmp
= tcg_temp_new_i64();
6369 tmp2
= load_reg(s
, rlow
);
6370 tcg_gen_extu_i32_i64(tmp
, tmp2
);
6371 tcg_temp_free_i32(tmp2
);
6372 tcg_gen_add_i64(val
, val
, tmp
);
6373 tcg_temp_free_i64(tmp
);
6376 /* load and add a 64-bit value from a register pair. */
6377 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
6383 /* Load 64-bit value rd:rn. */
6384 tmpl
= load_reg(s
, rlow
);
6385 tmph
= load_reg(s
, rhigh
);
6386 tmp
= tcg_temp_new_i64();
6387 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
6388 tcg_temp_free_i32(tmpl
);
6389 tcg_temp_free_i32(tmph
);
6390 tcg_gen_add_i64(val
, val
, tmp
);
6391 tcg_temp_free_i64(tmp
);
6394 /* Set N and Z flags from a 64-bit value. */
6395 static void gen_logicq_cc(TCGv_i64 val
)
6397 TCGv tmp
= tcg_temp_new_i32();
6398 gen_helper_logicq_cc(tmp
, val
);
6400 tcg_temp_free_i32(tmp
);
6403 /* Load/Store exclusive instructions are implemented by remembering
6404 the value/address loaded, and seeing if these are the same
6405 when the store is performed. This should be is sufficient to implement
6406 the architecturally mandated semantics, and avoids having to monitor
6409 In system emulation mode only one CPU will be running at once, so
6410 this sequence is effectively atomic. In user emulation mode we
6411 throw an exception and handle the atomic operation elsewhere. */
6412 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6413 TCGv addr
, int size
)
6419 tmp
= gen_ld8u(addr
, IS_USER(s
));
6422 tmp
= gen_ld16u(addr
, IS_USER(s
));
6426 tmp
= gen_ld32(addr
, IS_USER(s
));
6431 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6432 store_reg(s
, rt
, tmp
);
6434 TCGv tmp2
= tcg_temp_new_i32();
6435 tcg_gen_addi_i32(tmp2
, addr
, 4);
6436 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6437 tcg_temp_free_i32(tmp2
);
6438 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6439 store_reg(s
, rt2
, tmp
);
6441 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6444 static void gen_clrex(DisasContext
*s
)
6446 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6449 #ifdef CONFIG_USER_ONLY
6450 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6451 TCGv addr
, int size
)
6453 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6454 tcg_gen_movi_i32(cpu_exclusive_info
,
6455 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6456 gen_exception_insn(s
, 4, EXCP_STREX
);
6459 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6460 TCGv addr
, int size
)
6466 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6472 fail_label
= gen_new_label();
6473 done_label
= gen_new_label();
6474 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6477 tmp
= gen_ld8u(addr
, IS_USER(s
));
6480 tmp
= gen_ld16u(addr
, IS_USER(s
));
6484 tmp
= gen_ld32(addr
, IS_USER(s
));
6489 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6490 tcg_temp_free_i32(tmp
);
6492 TCGv tmp2
= tcg_temp_new_i32();
6493 tcg_gen_addi_i32(tmp2
, addr
, 4);
6494 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6495 tcg_temp_free_i32(tmp2
);
6496 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6497 tcg_temp_free_i32(tmp
);
6499 tmp
= load_reg(s
, rt
);
6502 gen_st8(tmp
, addr
, IS_USER(s
));
6505 gen_st16(tmp
, addr
, IS_USER(s
));
6509 gen_st32(tmp
, addr
, IS_USER(s
));
6515 tcg_gen_addi_i32(addr
, addr
, 4);
6516 tmp
= load_reg(s
, rt2
);
6517 gen_st32(tmp
, addr
, IS_USER(s
));
6519 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6520 tcg_gen_br(done_label
);
6521 gen_set_label(fail_label
);
6522 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6523 gen_set_label(done_label
);
6524 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6528 static void disas_arm_insn(CPUARMState
* env
, DisasContext
*s
)
6530 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6537 insn
= arm_ldl_code(s
->pc
, s
->bswap_code
);
6540 /* M variants do not implement ARM mode. */
6545 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6546 * choose to UNDEF. In ARMv5 and above the space is used
6547 * for miscellaneous unconditional instructions.
6551 /* Unconditional instructions. */
6552 if (((insn
>> 25) & 7) == 1) {
6553 /* NEON Data processing. */
6554 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6557 if (disas_neon_data_insn(env
, s
, insn
))
6561 if ((insn
& 0x0f100000) == 0x04000000) {
6562 /* NEON load/store. */
6563 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6566 if (disas_neon_ls_insn(env
, s
, insn
))
6570 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6571 ((insn
& 0x0f30f010) == 0x0710f000)) {
6572 if ((insn
& (1 << 22)) == 0) {
6574 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6578 /* Otherwise PLD; v5TE+ */
6582 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6583 ((insn
& 0x0f70f010) == 0x0650f000)) {
6585 return; /* PLI; V7 */
6587 if (((insn
& 0x0f700000) == 0x04100000) ||
6588 ((insn
& 0x0f700010) == 0x06100000)) {
6589 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6592 return; /* v7MP: Unallocated memory hint: must NOP */
6595 if ((insn
& 0x0ffffdff) == 0x01010000) {
6598 if (((insn
>> 9) & 1) != s
->bswap_code
) {
6599 /* Dynamic endianness switching not implemented. */
6603 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6604 switch ((insn
>> 4) & 0xf) {
6613 /* We don't emulate caches so these are a no-op. */
6618 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6624 op1
= (insn
& 0x1f);
6625 addr
= tcg_temp_new_i32();
6626 tmp
= tcg_const_i32(op1
);
6627 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6628 tcg_temp_free_i32(tmp
);
6629 i
= (insn
>> 23) & 3;
6631 case 0: offset
= -4; break; /* DA */
6632 case 1: offset
= 0; break; /* IA */
6633 case 2: offset
= -8; break; /* DB */
6634 case 3: offset
= 4; break; /* IB */
6638 tcg_gen_addi_i32(addr
, addr
, offset
);
6639 tmp
= load_reg(s
, 14);
6640 gen_st32(tmp
, addr
, 0);
6641 tmp
= load_cpu_field(spsr
);
6642 tcg_gen_addi_i32(addr
, addr
, 4);
6643 gen_st32(tmp
, addr
, 0);
6644 if (insn
& (1 << 21)) {
6645 /* Base writeback. */
6647 case 0: offset
= -8; break;
6648 case 1: offset
= 4; break;
6649 case 2: offset
= -4; break;
6650 case 3: offset
= 0; break;
6654 tcg_gen_addi_i32(addr
, addr
, offset
);
6655 tmp
= tcg_const_i32(op1
);
6656 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6657 tcg_temp_free_i32(tmp
);
6658 tcg_temp_free_i32(addr
);
6660 tcg_temp_free_i32(addr
);
6663 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6669 rn
= (insn
>> 16) & 0xf;
6670 addr
= load_reg(s
, rn
);
6671 i
= (insn
>> 23) & 3;
6673 case 0: offset
= -4; break; /* DA */
6674 case 1: offset
= 0; break; /* IA */
6675 case 2: offset
= -8; break; /* DB */
6676 case 3: offset
= 4; break; /* IB */
6680 tcg_gen_addi_i32(addr
, addr
, offset
);
6681 /* Load PC into tmp and CPSR into tmp2. */
6682 tmp
= gen_ld32(addr
, 0);
6683 tcg_gen_addi_i32(addr
, addr
, 4);
6684 tmp2
= gen_ld32(addr
, 0);
6685 if (insn
& (1 << 21)) {
6686 /* Base writeback. */
6688 case 0: offset
= -8; break;
6689 case 1: offset
= 4; break;
6690 case 2: offset
= -4; break;
6691 case 3: offset
= 0; break;
6695 tcg_gen_addi_i32(addr
, addr
, offset
);
6696 store_reg(s
, rn
, addr
);
6698 tcg_temp_free_i32(addr
);
6700 gen_rfe(s
, tmp
, tmp2
);
6702 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6703 /* branch link and change to thumb (blx <offset>) */
6706 val
= (uint32_t)s
->pc
;
6707 tmp
= tcg_temp_new_i32();
6708 tcg_gen_movi_i32(tmp
, val
);
6709 store_reg(s
, 14, tmp
);
6710 /* Sign-extend the 24-bit offset */
6711 offset
= (((int32_t)insn
) << 8) >> 8;
6712 /* offset * 4 + bit24 * 2 + (thumb bit) */
6713 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6714 /* pipeline offset */
6716 /* protected by ARCH(5); above, near the start of uncond block */
6719 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6720 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6721 /* iWMMXt register transfer. */
6722 if (env
->cp15
.c15_cpar
& (1 << 1))
6723 if (!disas_iwmmxt_insn(env
, s
, insn
))
6726 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6727 /* Coprocessor double register transfer. */
6729 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6730 /* Additional coprocessor register transfer. */
6731 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6734 /* cps (privileged) */
6738 if (insn
& (1 << 19)) {
6739 if (insn
& (1 << 8))
6741 if (insn
& (1 << 7))
6743 if (insn
& (1 << 6))
6745 if (insn
& (1 << 18))
6748 if (insn
& (1 << 17)) {
6750 val
|= (insn
& 0x1f);
6753 gen_set_psr_im(s
, mask
, 0, val
);
6760 /* if not always execute, we generate a conditional jump to
6762 s
->condlabel
= gen_new_label();
6763 gen_test_cc(cond
^ 1, s
->condlabel
);
6766 if ((insn
& 0x0f900000) == 0x03000000) {
6767 if ((insn
& (1 << 21)) == 0) {
6769 rd
= (insn
>> 12) & 0xf;
6770 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6771 if ((insn
& (1 << 22)) == 0) {
6773 tmp
= tcg_temp_new_i32();
6774 tcg_gen_movi_i32(tmp
, val
);
6777 tmp
= load_reg(s
, rd
);
6778 tcg_gen_ext16u_i32(tmp
, tmp
);
6779 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6781 store_reg(s
, rd
, tmp
);
6783 if (((insn
>> 12) & 0xf) != 0xf)
6785 if (((insn
>> 16) & 0xf) == 0) {
6786 gen_nop_hint(s
, insn
& 0xff);
6788 /* CPSR = immediate */
6790 shift
= ((insn
>> 8) & 0xf) * 2;
6792 val
= (val
>> shift
) | (val
<< (32 - shift
));
6793 i
= ((insn
& (1 << 22)) != 0);
6794 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6798 } else if ((insn
& 0x0f900000) == 0x01000000
6799 && (insn
& 0x00000090) != 0x00000090) {
6800 /* miscellaneous instructions */
6801 op1
= (insn
>> 21) & 3;
6802 sh
= (insn
>> 4) & 0xf;
6805 case 0x0: /* move program status register */
6808 tmp
= load_reg(s
, rm
);
6809 i
= ((op1
& 2) != 0);
6810 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6814 rd
= (insn
>> 12) & 0xf;
6818 tmp
= load_cpu_field(spsr
);
6820 tmp
= tcg_temp_new_i32();
6821 gen_helper_cpsr_read(tmp
);
6823 store_reg(s
, rd
, tmp
);
6828 /* branch/exchange thumb (bx). */
6830 tmp
= load_reg(s
, rm
);
6832 } else if (op1
== 3) {
6835 rd
= (insn
>> 12) & 0xf;
6836 tmp
= load_reg(s
, rm
);
6837 gen_helper_clz(tmp
, tmp
);
6838 store_reg(s
, rd
, tmp
);
6846 /* Trivial implementation equivalent to bx. */
6847 tmp
= load_reg(s
, rm
);
6858 /* branch link/exchange thumb (blx) */
6859 tmp
= load_reg(s
, rm
);
6860 tmp2
= tcg_temp_new_i32();
6861 tcg_gen_movi_i32(tmp2
, s
->pc
);
6862 store_reg(s
, 14, tmp2
);
6865 case 0x5: /* saturating add/subtract */
6867 rd
= (insn
>> 12) & 0xf;
6868 rn
= (insn
>> 16) & 0xf;
6869 tmp
= load_reg(s
, rm
);
6870 tmp2
= load_reg(s
, rn
);
6872 gen_helper_double_saturate(tmp2
, tmp2
);
6874 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6876 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6877 tcg_temp_free_i32(tmp2
);
6878 store_reg(s
, rd
, tmp
);
6881 /* SMC instruction (op1 == 3)
6882 and undefined instructions (op1 == 0 || op1 == 2)
6889 gen_exception_insn(s
, 4, EXCP_BKPT
);
6891 case 0x8: /* signed multiply */
6896 rs
= (insn
>> 8) & 0xf;
6897 rn
= (insn
>> 12) & 0xf;
6898 rd
= (insn
>> 16) & 0xf;
6900 /* (32 * 16) >> 16 */
6901 tmp
= load_reg(s
, rm
);
6902 tmp2
= load_reg(s
, rs
);
6904 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6907 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6908 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6909 tmp
= tcg_temp_new_i32();
6910 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6911 tcg_temp_free_i64(tmp64
);
6912 if ((sh
& 2) == 0) {
6913 tmp2
= load_reg(s
, rn
);
6914 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6915 tcg_temp_free_i32(tmp2
);
6917 store_reg(s
, rd
, tmp
);
6920 tmp
= load_reg(s
, rm
);
6921 tmp2
= load_reg(s
, rs
);
6922 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6923 tcg_temp_free_i32(tmp2
);
6925 tmp64
= tcg_temp_new_i64();
6926 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6927 tcg_temp_free_i32(tmp
);
6928 gen_addq(s
, tmp64
, rn
, rd
);
6929 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6930 tcg_temp_free_i64(tmp64
);
6933 tmp2
= load_reg(s
, rn
);
6934 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6935 tcg_temp_free_i32(tmp2
);
6937 store_reg(s
, rd
, tmp
);
6944 } else if (((insn
& 0x0e000000) == 0 &&
6945 (insn
& 0x00000090) != 0x90) ||
6946 ((insn
& 0x0e000000) == (1 << 25))) {
6947 int set_cc
, logic_cc
, shiftop
;
6949 op1
= (insn
>> 21) & 0xf;
6950 set_cc
= (insn
>> 20) & 1;
6951 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6953 /* data processing instruction */
6954 if (insn
& (1 << 25)) {
6955 /* immediate operand */
6957 shift
= ((insn
>> 8) & 0xf) * 2;
6959 val
= (val
>> shift
) | (val
<< (32 - shift
));
6961 tmp2
= tcg_temp_new_i32();
6962 tcg_gen_movi_i32(tmp2
, val
);
6963 if (logic_cc
&& shift
) {
6964 gen_set_CF_bit31(tmp2
);
6969 tmp2
= load_reg(s
, rm
);
6970 shiftop
= (insn
>> 5) & 3;
6971 if (!(insn
& (1 << 4))) {
6972 shift
= (insn
>> 7) & 0x1f;
6973 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6975 rs
= (insn
>> 8) & 0xf;
6976 tmp
= load_reg(s
, rs
);
6977 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6980 if (op1
!= 0x0f && op1
!= 0x0d) {
6981 rn
= (insn
>> 16) & 0xf;
6982 tmp
= load_reg(s
, rn
);
6986 rd
= (insn
>> 12) & 0xf;
6989 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6993 store_reg_bx(env
, s
, rd
, tmp
);
6996 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7000 store_reg_bx(env
, s
, rd
, tmp
);
7003 if (set_cc
&& rd
== 15) {
7004 /* SUBS r15, ... is used for exception return. */
7008 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7009 gen_exception_return(s
, tmp
);
7012 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7014 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7016 store_reg_bx(env
, s
, rd
, tmp
);
7021 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
7023 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7025 store_reg_bx(env
, s
, rd
, tmp
);
7029 gen_helper_add_cc(tmp
, tmp
, tmp2
);
7031 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7033 store_reg_bx(env
, s
, rd
, tmp
);
7037 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
7039 gen_add_carry(tmp
, tmp
, tmp2
);
7041 store_reg_bx(env
, s
, rd
, tmp
);
7045 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
7047 gen_sub_carry(tmp
, tmp
, tmp2
);
7049 store_reg_bx(env
, s
, rd
, tmp
);
7053 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
7055 gen_sub_carry(tmp
, tmp2
, tmp
);
7057 store_reg_bx(env
, s
, rd
, tmp
);
7061 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7064 tcg_temp_free_i32(tmp
);
7068 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7071 tcg_temp_free_i32(tmp
);
7075 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7077 tcg_temp_free_i32(tmp
);
7081 gen_helper_add_cc(tmp
, tmp
, tmp2
);
7083 tcg_temp_free_i32(tmp
);
7086 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7090 store_reg_bx(env
, s
, rd
, tmp
);
7093 if (logic_cc
&& rd
== 15) {
7094 /* MOVS r15, ... is used for exception return. */
7098 gen_exception_return(s
, tmp2
);
7103 store_reg_bx(env
, s
, rd
, tmp2
);
7107 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
7111 store_reg_bx(env
, s
, rd
, tmp
);
7115 tcg_gen_not_i32(tmp2
, tmp2
);
7119 store_reg_bx(env
, s
, rd
, tmp2
);
7122 if (op1
!= 0x0f && op1
!= 0x0d) {
7123 tcg_temp_free_i32(tmp2
);
7126 /* other instructions */
7127 op1
= (insn
>> 24) & 0xf;
7131 /* multiplies, extra load/stores */
7132 sh
= (insn
>> 5) & 3;
7135 rd
= (insn
>> 16) & 0xf;
7136 rn
= (insn
>> 12) & 0xf;
7137 rs
= (insn
>> 8) & 0xf;
7139 op1
= (insn
>> 20) & 0xf;
7141 case 0: case 1: case 2: case 3: case 6:
7143 tmp
= load_reg(s
, rs
);
7144 tmp2
= load_reg(s
, rm
);
7145 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7146 tcg_temp_free_i32(tmp2
);
7147 if (insn
& (1 << 22)) {
7148 /* Subtract (mls) */
7150 tmp2
= load_reg(s
, rn
);
7151 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7152 tcg_temp_free_i32(tmp2
);
7153 } else if (insn
& (1 << 21)) {
7155 tmp2
= load_reg(s
, rn
);
7156 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7157 tcg_temp_free_i32(tmp2
);
7159 if (insn
& (1 << 20))
7161 store_reg(s
, rd
, tmp
);
7164 /* 64 bit mul double accumulate (UMAAL) */
7166 tmp
= load_reg(s
, rs
);
7167 tmp2
= load_reg(s
, rm
);
7168 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7169 gen_addq_lo(s
, tmp64
, rn
);
7170 gen_addq_lo(s
, tmp64
, rd
);
7171 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7172 tcg_temp_free_i64(tmp64
);
7174 case 8: case 9: case 10: case 11:
7175 case 12: case 13: case 14: case 15:
7176 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7177 tmp
= load_reg(s
, rs
);
7178 tmp2
= load_reg(s
, rm
);
7179 if (insn
& (1 << 22)) {
7180 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7182 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7184 if (insn
& (1 << 21)) { /* mult accumulate */
7185 gen_addq(s
, tmp64
, rn
, rd
);
7187 if (insn
& (1 << 20)) {
7188 gen_logicq_cc(tmp64
);
7190 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7191 tcg_temp_free_i64(tmp64
);
7197 rn
= (insn
>> 16) & 0xf;
7198 rd
= (insn
>> 12) & 0xf;
7199 if (insn
& (1 << 23)) {
7200 /* load/store exclusive */
7201 op1
= (insn
>> 21) & 0x3;
7206 addr
= tcg_temp_local_new_i32();
7207 load_reg_var(s
, addr
, rn
);
7208 if (insn
& (1 << 20)) {
7211 gen_load_exclusive(s
, rd
, 15, addr
, 2);
7213 case 1: /* ldrexd */
7214 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
7216 case 2: /* ldrexb */
7217 gen_load_exclusive(s
, rd
, 15, addr
, 0);
7219 case 3: /* ldrexh */
7220 gen_load_exclusive(s
, rd
, 15, addr
, 1);
7229 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
7231 case 1: /* strexd */
7232 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
7234 case 2: /* strexb */
7235 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
7237 case 3: /* strexh */
7238 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
7244 tcg_temp_free(addr
);
7246 /* SWP instruction */
7249 /* ??? This is not really atomic. However we know
7250 we never have multiple CPUs running in parallel,
7251 so it is good enough. */
7252 addr
= load_reg(s
, rn
);
7253 tmp
= load_reg(s
, rm
);
7254 if (insn
& (1 << 22)) {
7255 tmp2
= gen_ld8u(addr
, IS_USER(s
));
7256 gen_st8(tmp
, addr
, IS_USER(s
));
7258 tmp2
= gen_ld32(addr
, IS_USER(s
));
7259 gen_st32(tmp
, addr
, IS_USER(s
));
7261 tcg_temp_free_i32(addr
);
7262 store_reg(s
, rd
, tmp2
);
7268 /* Misc load/store */
7269 rn
= (insn
>> 16) & 0xf;
7270 rd
= (insn
>> 12) & 0xf;
7271 addr
= load_reg(s
, rn
);
7272 if (insn
& (1 << 24))
7273 gen_add_datah_offset(s
, insn
, 0, addr
);
7275 if (insn
& (1 << 20)) {
7279 tmp
= gen_ld16u(addr
, IS_USER(s
));
7282 tmp
= gen_ld8s(addr
, IS_USER(s
));
7286 tmp
= gen_ld16s(addr
, IS_USER(s
));
7290 } else if (sh
& 2) {
7295 tmp
= load_reg(s
, rd
);
7296 gen_st32(tmp
, addr
, IS_USER(s
));
7297 tcg_gen_addi_i32(addr
, addr
, 4);
7298 tmp
= load_reg(s
, rd
+ 1);
7299 gen_st32(tmp
, addr
, IS_USER(s
));
7303 tmp
= gen_ld32(addr
, IS_USER(s
));
7304 store_reg(s
, rd
, tmp
);
7305 tcg_gen_addi_i32(addr
, addr
, 4);
7306 tmp
= gen_ld32(addr
, IS_USER(s
));
7310 address_offset
= -4;
7313 tmp
= load_reg(s
, rd
);
7314 gen_st16(tmp
, addr
, IS_USER(s
));
7317 /* Perform base writeback before the loaded value to
7318 ensure correct behavior with overlapping index registers.
7319 ldrd with base writeback is is undefined if the
7320 destination and index registers overlap. */
7321 if (!(insn
& (1 << 24))) {
7322 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
7323 store_reg(s
, rn
, addr
);
7324 } else if (insn
& (1 << 21)) {
7326 tcg_gen_addi_i32(addr
, addr
, address_offset
);
7327 store_reg(s
, rn
, addr
);
7329 tcg_temp_free_i32(addr
);
7332 /* Complete the load. */
7333 store_reg(s
, rd
, tmp
);
7342 if (insn
& (1 << 4)) {
7344 /* Armv6 Media instructions. */
7346 rn
= (insn
>> 16) & 0xf;
7347 rd
= (insn
>> 12) & 0xf;
7348 rs
= (insn
>> 8) & 0xf;
7349 switch ((insn
>> 23) & 3) {
7350 case 0: /* Parallel add/subtract. */
7351 op1
= (insn
>> 20) & 7;
7352 tmp
= load_reg(s
, rn
);
7353 tmp2
= load_reg(s
, rm
);
7354 sh
= (insn
>> 5) & 7;
7355 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
7357 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
7358 tcg_temp_free_i32(tmp2
);
7359 store_reg(s
, rd
, tmp
);
7362 if ((insn
& 0x00700020) == 0) {
7363 /* Halfword pack. */
7364 tmp
= load_reg(s
, rn
);
7365 tmp2
= load_reg(s
, rm
);
7366 shift
= (insn
>> 7) & 0x1f;
7367 if (insn
& (1 << 6)) {
7371 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7372 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7373 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7377 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7378 tcg_gen_ext16u_i32(tmp
, tmp
);
7379 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7381 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7382 tcg_temp_free_i32(tmp2
);
7383 store_reg(s
, rd
, tmp
);
7384 } else if ((insn
& 0x00200020) == 0x00200000) {
7386 tmp
= load_reg(s
, rm
);
7387 shift
= (insn
>> 7) & 0x1f;
7388 if (insn
& (1 << 6)) {
7391 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7393 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7395 sh
= (insn
>> 16) & 0x1f;
7396 tmp2
= tcg_const_i32(sh
);
7397 if (insn
& (1 << 22))
7398 gen_helper_usat(tmp
, tmp
, tmp2
);
7400 gen_helper_ssat(tmp
, tmp
, tmp2
);
7401 tcg_temp_free_i32(tmp2
);
7402 store_reg(s
, rd
, tmp
);
7403 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
7405 tmp
= load_reg(s
, rm
);
7406 sh
= (insn
>> 16) & 0x1f;
7407 tmp2
= tcg_const_i32(sh
);
7408 if (insn
& (1 << 22))
7409 gen_helper_usat16(tmp
, tmp
, tmp2
);
7411 gen_helper_ssat16(tmp
, tmp
, tmp2
);
7412 tcg_temp_free_i32(tmp2
);
7413 store_reg(s
, rd
, tmp
);
7414 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
7416 tmp
= load_reg(s
, rn
);
7417 tmp2
= load_reg(s
, rm
);
7418 tmp3
= tcg_temp_new_i32();
7419 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
7420 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7421 tcg_temp_free_i32(tmp3
);
7422 tcg_temp_free_i32(tmp2
);
7423 store_reg(s
, rd
, tmp
);
7424 } else if ((insn
& 0x000003e0) == 0x00000060) {
7425 tmp
= load_reg(s
, rm
);
7426 shift
= (insn
>> 10) & 3;
7427 /* ??? In many cases it's not necessary to do a
7428 rotate, a shift is sufficient. */
7430 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7431 op1
= (insn
>> 20) & 7;
7433 case 0: gen_sxtb16(tmp
); break;
7434 case 2: gen_sxtb(tmp
); break;
7435 case 3: gen_sxth(tmp
); break;
7436 case 4: gen_uxtb16(tmp
); break;
7437 case 6: gen_uxtb(tmp
); break;
7438 case 7: gen_uxth(tmp
); break;
7439 default: goto illegal_op
;
7442 tmp2
= load_reg(s
, rn
);
7443 if ((op1
& 3) == 0) {
7444 gen_add16(tmp
, tmp2
);
7446 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7447 tcg_temp_free_i32(tmp2
);
7450 store_reg(s
, rd
, tmp
);
7451 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7453 tmp
= load_reg(s
, rm
);
7454 if (insn
& (1 << 22)) {
7455 if (insn
& (1 << 7)) {
7459 gen_helper_rbit(tmp
, tmp
);
7462 if (insn
& (1 << 7))
7465 tcg_gen_bswap32_i32(tmp
, tmp
);
7467 store_reg(s
, rd
, tmp
);
7472 case 2: /* Multiplies (Type 3). */
7473 switch ((insn
>> 20) & 0x7) {
7475 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
7476 /* op2 not 00x or 11x : UNDEF */
7479 /* Signed multiply most significant [accumulate].
7480 (SMMUL, SMMLA, SMMLS) */
7481 tmp
= load_reg(s
, rm
);
7482 tmp2
= load_reg(s
, rs
);
7483 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7486 tmp
= load_reg(s
, rd
);
7487 if (insn
& (1 << 6)) {
7488 tmp64
= gen_subq_msw(tmp64
, tmp
);
7490 tmp64
= gen_addq_msw(tmp64
, tmp
);
7493 if (insn
& (1 << 5)) {
7494 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7496 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7497 tmp
= tcg_temp_new_i32();
7498 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7499 tcg_temp_free_i64(tmp64
);
7500 store_reg(s
, rn
, tmp
);
7504 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7505 if (insn
& (1 << 7)) {
7508 tmp
= load_reg(s
, rm
);
7509 tmp2
= load_reg(s
, rs
);
7510 if (insn
& (1 << 5))
7511 gen_swap_half(tmp2
);
7512 gen_smul_dual(tmp
, tmp2
);
7513 if (insn
& (1 << 6)) {
7514 /* This subtraction cannot overflow. */
7515 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7517 /* This addition cannot overflow 32 bits;
7518 * however it may overflow considered as a signed
7519 * operation, in which case we must set the Q flag.
7521 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7523 tcg_temp_free_i32(tmp2
);
7524 if (insn
& (1 << 22)) {
7525 /* smlald, smlsld */
7526 tmp64
= tcg_temp_new_i64();
7527 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7528 tcg_temp_free_i32(tmp
);
7529 gen_addq(s
, tmp64
, rd
, rn
);
7530 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7531 tcg_temp_free_i64(tmp64
);
7533 /* smuad, smusd, smlad, smlsd */
7536 tmp2
= load_reg(s
, rd
);
7537 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7538 tcg_temp_free_i32(tmp2
);
7540 store_reg(s
, rn
, tmp
);
7546 if (!arm_feature(env
, ARM_FEATURE_ARM_DIV
)) {
7549 if (((insn
>> 5) & 7) || (rd
!= 15)) {
7552 tmp
= load_reg(s
, rm
);
7553 tmp2
= load_reg(s
, rs
);
7554 if (insn
& (1 << 21)) {
7555 gen_helper_udiv(tmp
, tmp
, tmp2
);
7557 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7559 tcg_temp_free_i32(tmp2
);
7560 store_reg(s
, rn
, tmp
);
7567 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7569 case 0: /* Unsigned sum of absolute differences. */
7571 tmp
= load_reg(s
, rm
);
7572 tmp2
= load_reg(s
, rs
);
7573 gen_helper_usad8(tmp
, tmp
, tmp2
);
7574 tcg_temp_free_i32(tmp2
);
7576 tmp2
= load_reg(s
, rd
);
7577 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7578 tcg_temp_free_i32(tmp2
);
7580 store_reg(s
, rn
, tmp
);
7582 case 0x20: case 0x24: case 0x28: case 0x2c:
7583 /* Bitfield insert/clear. */
7585 shift
= (insn
>> 7) & 0x1f;
7586 i
= (insn
>> 16) & 0x1f;
7589 tmp
= tcg_temp_new_i32();
7590 tcg_gen_movi_i32(tmp
, 0);
7592 tmp
= load_reg(s
, rm
);
7595 tmp2
= load_reg(s
, rd
);
7596 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7597 tcg_temp_free_i32(tmp2
);
7599 store_reg(s
, rd
, tmp
);
7601 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7602 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7604 tmp
= load_reg(s
, rm
);
7605 shift
= (insn
>> 7) & 0x1f;
7606 i
= ((insn
>> 16) & 0x1f) + 1;
7611 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7613 gen_sbfx(tmp
, shift
, i
);
7616 store_reg(s
, rd
, tmp
);
7626 /* Check for undefined extension instructions
7627 * per the ARM Bible IE:
7628 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7630 sh
= (0xf << 20) | (0xf << 4);
7631 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7635 /* load/store byte/word */
7636 rn
= (insn
>> 16) & 0xf;
7637 rd
= (insn
>> 12) & 0xf;
7638 tmp2
= load_reg(s
, rn
);
7639 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7640 if (insn
& (1 << 24))
7641 gen_add_data_offset(s
, insn
, tmp2
);
7642 if (insn
& (1 << 20)) {
7644 if (insn
& (1 << 22)) {
7645 tmp
= gen_ld8u(tmp2
, i
);
7647 tmp
= gen_ld32(tmp2
, i
);
7651 tmp
= load_reg(s
, rd
);
7652 if (insn
& (1 << 22))
7653 gen_st8(tmp
, tmp2
, i
);
7655 gen_st32(tmp
, tmp2
, i
);
7657 if (!(insn
& (1 << 24))) {
7658 gen_add_data_offset(s
, insn
, tmp2
);
7659 store_reg(s
, rn
, tmp2
);
7660 } else if (insn
& (1 << 21)) {
7661 store_reg(s
, rn
, tmp2
);
7663 tcg_temp_free_i32(tmp2
);
7665 if (insn
& (1 << 20)) {
7666 /* Complete the load. */
7667 store_reg_from_load(env
, s
, rd
, tmp
);
7673 int j
, n
, user
, loaded_base
;
7675 /* load/store multiple words */
7676 /* XXX: store correct base if write back */
7678 if (insn
& (1 << 22)) {
7680 goto illegal_op
; /* only usable in supervisor mode */
7682 if ((insn
& (1 << 15)) == 0)
7685 rn
= (insn
>> 16) & 0xf;
7686 addr
= load_reg(s
, rn
);
7688 /* compute total size */
7690 TCGV_UNUSED(loaded_var
);
7693 if (insn
& (1 << i
))
7696 /* XXX: test invalid n == 0 case ? */
7697 if (insn
& (1 << 23)) {
7698 if (insn
& (1 << 24)) {
7700 tcg_gen_addi_i32(addr
, addr
, 4);
7702 /* post increment */
7705 if (insn
& (1 << 24)) {
7707 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7709 /* post decrement */
7711 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7716 if (insn
& (1 << i
)) {
7717 if (insn
& (1 << 20)) {
7719 tmp
= gen_ld32(addr
, IS_USER(s
));
7721 tmp2
= tcg_const_i32(i
);
7722 gen_helper_set_user_reg(tmp2
, tmp
);
7723 tcg_temp_free_i32(tmp2
);
7724 tcg_temp_free_i32(tmp
);
7725 } else if (i
== rn
) {
7729 store_reg_from_load(env
, s
, i
, tmp
);
7734 /* special case: r15 = PC + 8 */
7735 val
= (long)s
->pc
+ 4;
7736 tmp
= tcg_temp_new_i32();
7737 tcg_gen_movi_i32(tmp
, val
);
7739 tmp
= tcg_temp_new_i32();
7740 tmp2
= tcg_const_i32(i
);
7741 gen_helper_get_user_reg(tmp
, tmp2
);
7742 tcg_temp_free_i32(tmp2
);
7744 tmp
= load_reg(s
, i
);
7746 gen_st32(tmp
, addr
, IS_USER(s
));
7749 /* no need to add after the last transfer */
7751 tcg_gen_addi_i32(addr
, addr
, 4);
7754 if (insn
& (1 << 21)) {
7756 if (insn
& (1 << 23)) {
7757 if (insn
& (1 << 24)) {
7760 /* post increment */
7761 tcg_gen_addi_i32(addr
, addr
, 4);
7764 if (insn
& (1 << 24)) {
7767 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7769 /* post decrement */
7770 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7773 store_reg(s
, rn
, addr
);
7775 tcg_temp_free_i32(addr
);
7778 store_reg(s
, rn
, loaded_var
);
7780 if ((insn
& (1 << 22)) && !user
) {
7781 /* Restore CPSR from SPSR. */
7782 tmp
= load_cpu_field(spsr
);
7783 gen_set_cpsr(tmp
, 0xffffffff);
7784 tcg_temp_free_i32(tmp
);
7785 s
->is_jmp
= DISAS_UPDATE
;
7794 /* branch (and link) */
7795 val
= (int32_t)s
->pc
;
7796 if (insn
& (1 << 24)) {
7797 tmp
= tcg_temp_new_i32();
7798 tcg_gen_movi_i32(tmp
, val
);
7799 store_reg(s
, 14, tmp
);
7801 offset
= (((int32_t)insn
<< 8) >> 8);
7802 val
+= (offset
<< 2) + 4;
7810 if (disas_coproc_insn(env
, s
, insn
))
7815 gen_set_pc_im(s
->pc
);
7816 s
->is_jmp
= DISAS_SWI
;
7820 gen_exception_insn(s
, 4, EXCP_UDEF
);
7826 /* Return true if this is a Thumb-2 logical op. */
7828 thumb2_logic_op(int op
)
7833 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7834 then set condition code flags based on the result of the operation.
7835 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7836 to the high bit of T1.
7837 Returns zero if the opcode is valid. */
7840 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7847 tcg_gen_and_i32(t0
, t0
, t1
);
7851 tcg_gen_andc_i32(t0
, t0
, t1
);
7855 tcg_gen_or_i32(t0
, t0
, t1
);
7859 tcg_gen_orc_i32(t0
, t0
, t1
);
7863 tcg_gen_xor_i32(t0
, t0
, t1
);
7868 gen_helper_add_cc(t0
, t0
, t1
);
7870 tcg_gen_add_i32(t0
, t0
, t1
);
7874 gen_helper_adc_cc(t0
, t0
, t1
);
7880 gen_helper_sbc_cc(t0
, t0
, t1
);
7882 gen_sub_carry(t0
, t0
, t1
);
7886 gen_helper_sub_cc(t0
, t0
, t1
);
7888 tcg_gen_sub_i32(t0
, t0
, t1
);
7892 gen_helper_sub_cc(t0
, t1
, t0
);
7894 tcg_gen_sub_i32(t0
, t1
, t0
);
7896 default: /* 5, 6, 7, 9, 12, 15. */
7902 gen_set_CF_bit31(t1
);
7907 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7909 static int disas_thumb2_insn(CPUARMState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7911 uint32_t insn
, imm
, shift
, offset
;
7912 uint32_t rd
, rn
, rm
, rs
;
7923 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7924 || arm_feature (env
, ARM_FEATURE_M
))) {
7925 /* Thumb-1 cores may need to treat bl and blx as a pair of
7926 16-bit instructions to get correct prefetch abort behavior. */
7928 if ((insn
& (1 << 12)) == 0) {
7930 /* Second half of blx. */
7931 offset
= ((insn
& 0x7ff) << 1);
7932 tmp
= load_reg(s
, 14);
7933 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7934 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7936 tmp2
= tcg_temp_new_i32();
7937 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7938 store_reg(s
, 14, tmp2
);
7942 if (insn
& (1 << 11)) {
7943 /* Second half of bl. */
7944 offset
= ((insn
& 0x7ff) << 1) | 1;
7945 tmp
= load_reg(s
, 14);
7946 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7948 tmp2
= tcg_temp_new_i32();
7949 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7950 store_reg(s
, 14, tmp2
);
7954 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7955 /* Instruction spans a page boundary. Implement it as two
7956 16-bit instructions in case the second half causes an
7958 offset
= ((int32_t)insn
<< 21) >> 9;
7959 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7962 /* Fall through to 32-bit decode. */
7965 insn
= arm_lduw_code(s
->pc
, s
->bswap_code
);
7967 insn
|= (uint32_t)insn_hw1
<< 16;
7969 if ((insn
& 0xf800e800) != 0xf000e800) {
7973 rn
= (insn
>> 16) & 0xf;
7974 rs
= (insn
>> 12) & 0xf;
7975 rd
= (insn
>> 8) & 0xf;
7977 switch ((insn
>> 25) & 0xf) {
7978 case 0: case 1: case 2: case 3:
7979 /* 16-bit instructions. Should never happen. */
7982 if (insn
& (1 << 22)) {
7983 /* Other load/store, table branch. */
7984 if (insn
& 0x01200000) {
7985 /* Load/store doubleword. */
7987 addr
= tcg_temp_new_i32();
7988 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7990 addr
= load_reg(s
, rn
);
7992 offset
= (insn
& 0xff) * 4;
7993 if ((insn
& (1 << 23)) == 0)
7995 if (insn
& (1 << 24)) {
7996 tcg_gen_addi_i32(addr
, addr
, offset
);
7999 if (insn
& (1 << 20)) {
8001 tmp
= gen_ld32(addr
, IS_USER(s
));
8002 store_reg(s
, rs
, tmp
);
8003 tcg_gen_addi_i32(addr
, addr
, 4);
8004 tmp
= gen_ld32(addr
, IS_USER(s
));
8005 store_reg(s
, rd
, tmp
);
8008 tmp
= load_reg(s
, rs
);
8009 gen_st32(tmp
, addr
, IS_USER(s
));
8010 tcg_gen_addi_i32(addr
, addr
, 4);
8011 tmp
= load_reg(s
, rd
);
8012 gen_st32(tmp
, addr
, IS_USER(s
));
8014 if (insn
& (1 << 21)) {
8015 /* Base writeback. */
8018 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
8019 store_reg(s
, rn
, addr
);
8021 tcg_temp_free_i32(addr
);
8023 } else if ((insn
& (1 << 23)) == 0) {
8024 /* Load/store exclusive word. */
8025 addr
= tcg_temp_local_new();
8026 load_reg_var(s
, addr
, rn
);
8027 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
8028 if (insn
& (1 << 20)) {
8029 gen_load_exclusive(s
, rs
, 15, addr
, 2);
8031 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
8033 tcg_temp_free(addr
);
8034 } else if ((insn
& (1 << 6)) == 0) {
8037 addr
= tcg_temp_new_i32();
8038 tcg_gen_movi_i32(addr
, s
->pc
);
8040 addr
= load_reg(s
, rn
);
8042 tmp
= load_reg(s
, rm
);
8043 tcg_gen_add_i32(addr
, addr
, tmp
);
8044 if (insn
& (1 << 4)) {
8046 tcg_gen_add_i32(addr
, addr
, tmp
);
8047 tcg_temp_free_i32(tmp
);
8048 tmp
= gen_ld16u(addr
, IS_USER(s
));
8050 tcg_temp_free_i32(tmp
);
8051 tmp
= gen_ld8u(addr
, IS_USER(s
));
8053 tcg_temp_free_i32(addr
);
8054 tcg_gen_shli_i32(tmp
, tmp
, 1);
8055 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
8056 store_reg(s
, 15, tmp
);
8058 /* Load/store exclusive byte/halfword/doubleword. */
8060 op
= (insn
>> 4) & 0x3;
8064 addr
= tcg_temp_local_new();
8065 load_reg_var(s
, addr
, rn
);
8066 if (insn
& (1 << 20)) {
8067 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
8069 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
8071 tcg_temp_free(addr
);
8074 /* Load/store multiple, RFE, SRS. */
8075 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
8076 /* Not available in user mode. */
8079 if (insn
& (1 << 20)) {
8081 addr
= load_reg(s
, rn
);
8082 if ((insn
& (1 << 24)) == 0)
8083 tcg_gen_addi_i32(addr
, addr
, -8);
8084 /* Load PC into tmp and CPSR into tmp2. */
8085 tmp
= gen_ld32(addr
, 0);
8086 tcg_gen_addi_i32(addr
, addr
, 4);
8087 tmp2
= gen_ld32(addr
, 0);
8088 if (insn
& (1 << 21)) {
8089 /* Base writeback. */
8090 if (insn
& (1 << 24)) {
8091 tcg_gen_addi_i32(addr
, addr
, 4);
8093 tcg_gen_addi_i32(addr
, addr
, -4);
8095 store_reg(s
, rn
, addr
);
8097 tcg_temp_free_i32(addr
);
8099 gen_rfe(s
, tmp
, tmp2
);
8103 addr
= tcg_temp_new_i32();
8104 tmp
= tcg_const_i32(op
);
8105 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
8106 tcg_temp_free_i32(tmp
);
8107 if ((insn
& (1 << 24)) == 0) {
8108 tcg_gen_addi_i32(addr
, addr
, -8);
8110 tmp
= load_reg(s
, 14);
8111 gen_st32(tmp
, addr
, 0);
8112 tcg_gen_addi_i32(addr
, addr
, 4);
8113 tmp
= tcg_temp_new_i32();
8114 gen_helper_cpsr_read(tmp
);
8115 gen_st32(tmp
, addr
, 0);
8116 if (insn
& (1 << 21)) {
8117 if ((insn
& (1 << 24)) == 0) {
8118 tcg_gen_addi_i32(addr
, addr
, -4);
8120 tcg_gen_addi_i32(addr
, addr
, 4);
8122 tmp
= tcg_const_i32(op
);
8123 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
8124 tcg_temp_free_i32(tmp
);
8126 tcg_temp_free_i32(addr
);
8130 int i
, loaded_base
= 0;
8132 /* Load/store multiple. */
8133 addr
= load_reg(s
, rn
);
8135 for (i
= 0; i
< 16; i
++) {
8136 if (insn
& (1 << i
))
8139 if (insn
& (1 << 24)) {
8140 tcg_gen_addi_i32(addr
, addr
, -offset
);
8143 TCGV_UNUSED(loaded_var
);
8144 for (i
= 0; i
< 16; i
++) {
8145 if ((insn
& (1 << i
)) == 0)
8147 if (insn
& (1 << 20)) {
8149 tmp
= gen_ld32(addr
, IS_USER(s
));
8152 } else if (i
== rn
) {
8156 store_reg(s
, i
, tmp
);
8160 tmp
= load_reg(s
, i
);
8161 gen_st32(tmp
, addr
, IS_USER(s
));
8163 tcg_gen_addi_i32(addr
, addr
, 4);
8166 store_reg(s
, rn
, loaded_var
);
8168 if (insn
& (1 << 21)) {
8169 /* Base register writeback. */
8170 if (insn
& (1 << 24)) {
8171 tcg_gen_addi_i32(addr
, addr
, -offset
);
8173 /* Fault if writeback register is in register list. */
8174 if (insn
& (1 << rn
))
8176 store_reg(s
, rn
, addr
);
8178 tcg_temp_free_i32(addr
);
8185 op
= (insn
>> 21) & 0xf;
8187 /* Halfword pack. */
8188 tmp
= load_reg(s
, rn
);
8189 tmp2
= load_reg(s
, rm
);
8190 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
8191 if (insn
& (1 << 5)) {
8195 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8196 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8197 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8201 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8202 tcg_gen_ext16u_i32(tmp
, tmp
);
8203 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8205 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8206 tcg_temp_free_i32(tmp2
);
8207 store_reg(s
, rd
, tmp
);
8209 /* Data processing register constant shift. */
8211 tmp
= tcg_temp_new_i32();
8212 tcg_gen_movi_i32(tmp
, 0);
8214 tmp
= load_reg(s
, rn
);
8216 tmp2
= load_reg(s
, rm
);
8218 shiftop
= (insn
>> 4) & 3;
8219 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8220 conds
= (insn
& (1 << 20)) != 0;
8221 logic_cc
= (conds
&& thumb2_logic_op(op
));
8222 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8223 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
8225 tcg_temp_free_i32(tmp2
);
8227 store_reg(s
, rd
, tmp
);
8229 tcg_temp_free_i32(tmp
);
8233 case 13: /* Misc data processing. */
8234 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
8235 if (op
< 4 && (insn
& 0xf000) != 0xf000)
8238 case 0: /* Register controlled shift. */
8239 tmp
= load_reg(s
, rn
);
8240 tmp2
= load_reg(s
, rm
);
8241 if ((insn
& 0x70) != 0)
8243 op
= (insn
>> 21) & 3;
8244 logic_cc
= (insn
& (1 << 20)) != 0;
8245 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
8248 store_reg_bx(env
, s
, rd
, tmp
);
8250 case 1: /* Sign/zero extend. */
8251 tmp
= load_reg(s
, rm
);
8252 shift
= (insn
>> 4) & 3;
8253 /* ??? In many cases it's not necessary to do a
8254 rotate, a shift is sufficient. */
8256 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8257 op
= (insn
>> 20) & 7;
8259 case 0: gen_sxth(tmp
); break;
8260 case 1: gen_uxth(tmp
); break;
8261 case 2: gen_sxtb16(tmp
); break;
8262 case 3: gen_uxtb16(tmp
); break;
8263 case 4: gen_sxtb(tmp
); break;
8264 case 5: gen_uxtb(tmp
); break;
8265 default: goto illegal_op
;
8268 tmp2
= load_reg(s
, rn
);
8269 if ((op
>> 1) == 1) {
8270 gen_add16(tmp
, tmp2
);
8272 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8273 tcg_temp_free_i32(tmp2
);
8276 store_reg(s
, rd
, tmp
);
8278 case 2: /* SIMD add/subtract. */
8279 op
= (insn
>> 20) & 7;
8280 shift
= (insn
>> 4) & 7;
8281 if ((op
& 3) == 3 || (shift
& 3) == 3)
8283 tmp
= load_reg(s
, rn
);
8284 tmp2
= load_reg(s
, rm
);
8285 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
8286 tcg_temp_free_i32(tmp2
);
8287 store_reg(s
, rd
, tmp
);
8289 case 3: /* Other data processing. */
8290 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
8292 /* Saturating add/subtract. */
8293 tmp
= load_reg(s
, rn
);
8294 tmp2
= load_reg(s
, rm
);
8296 gen_helper_double_saturate(tmp
, tmp
);
8298 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
8300 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
8301 tcg_temp_free_i32(tmp2
);
8303 tmp
= load_reg(s
, rn
);
8305 case 0x0a: /* rbit */
8306 gen_helper_rbit(tmp
, tmp
);
8308 case 0x08: /* rev */
8309 tcg_gen_bswap32_i32(tmp
, tmp
);
8311 case 0x09: /* rev16 */
8314 case 0x0b: /* revsh */
8317 case 0x10: /* sel */
8318 tmp2
= load_reg(s
, rm
);
8319 tmp3
= tcg_temp_new_i32();
8320 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
8321 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8322 tcg_temp_free_i32(tmp3
);
8323 tcg_temp_free_i32(tmp2
);
8325 case 0x18: /* clz */
8326 gen_helper_clz(tmp
, tmp
);
8332 store_reg(s
, rd
, tmp
);
8334 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8335 op
= (insn
>> 4) & 0xf;
8336 tmp
= load_reg(s
, rn
);
8337 tmp2
= load_reg(s
, rm
);
8338 switch ((insn
>> 20) & 7) {
8339 case 0: /* 32 x 32 -> 32 */
8340 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8341 tcg_temp_free_i32(tmp2
);
8343 tmp2
= load_reg(s
, rs
);
8345 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8347 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8348 tcg_temp_free_i32(tmp2
);
8351 case 1: /* 16 x 16 -> 32 */
8352 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8353 tcg_temp_free_i32(tmp2
);
8355 tmp2
= load_reg(s
, rs
);
8356 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8357 tcg_temp_free_i32(tmp2
);
8360 case 2: /* Dual multiply add. */
8361 case 4: /* Dual multiply subtract. */
8363 gen_swap_half(tmp2
);
8364 gen_smul_dual(tmp
, tmp2
);
8365 if (insn
& (1 << 22)) {
8366 /* This subtraction cannot overflow. */
8367 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8369 /* This addition cannot overflow 32 bits;
8370 * however it may overflow considered as a signed
8371 * operation, in which case we must set the Q flag.
8373 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8375 tcg_temp_free_i32(tmp2
);
8378 tmp2
= load_reg(s
, rs
);
8379 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8380 tcg_temp_free_i32(tmp2
);
8383 case 3: /* 32 * 16 -> 32msb */
8385 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8388 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8389 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8390 tmp
= tcg_temp_new_i32();
8391 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8392 tcg_temp_free_i64(tmp64
);
8395 tmp2
= load_reg(s
, rs
);
8396 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8397 tcg_temp_free_i32(tmp2
);
8400 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8401 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8403 tmp
= load_reg(s
, rs
);
8404 if (insn
& (1 << 20)) {
8405 tmp64
= gen_addq_msw(tmp64
, tmp
);
8407 tmp64
= gen_subq_msw(tmp64
, tmp
);
8410 if (insn
& (1 << 4)) {
8411 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8413 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8414 tmp
= tcg_temp_new_i32();
8415 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8416 tcg_temp_free_i64(tmp64
);
8418 case 7: /* Unsigned sum of absolute differences. */
8419 gen_helper_usad8(tmp
, tmp
, tmp2
);
8420 tcg_temp_free_i32(tmp2
);
8422 tmp2
= load_reg(s
, rs
);
8423 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8424 tcg_temp_free_i32(tmp2
);
8428 store_reg(s
, rd
, tmp
);
8430 case 6: case 7: /* 64-bit multiply, Divide. */
8431 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
8432 tmp
= load_reg(s
, rn
);
8433 tmp2
= load_reg(s
, rm
);
8434 if ((op
& 0x50) == 0x10) {
8436 if (!arm_feature(env
, ARM_FEATURE_THUMB_DIV
)) {
8440 gen_helper_udiv(tmp
, tmp
, tmp2
);
8442 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8443 tcg_temp_free_i32(tmp2
);
8444 store_reg(s
, rd
, tmp
);
8445 } else if ((op
& 0xe) == 0xc) {
8446 /* Dual multiply accumulate long. */
8448 gen_swap_half(tmp2
);
8449 gen_smul_dual(tmp
, tmp2
);
8451 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8453 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8455 tcg_temp_free_i32(tmp2
);
8457 tmp64
= tcg_temp_new_i64();
8458 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8459 tcg_temp_free_i32(tmp
);
8460 gen_addq(s
, tmp64
, rs
, rd
);
8461 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8462 tcg_temp_free_i64(tmp64
);
8465 /* Unsigned 64-bit multiply */
8466 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8470 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8471 tcg_temp_free_i32(tmp2
);
8472 tmp64
= tcg_temp_new_i64();
8473 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8474 tcg_temp_free_i32(tmp
);
8476 /* Signed 64-bit multiply */
8477 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8482 gen_addq_lo(s
, tmp64
, rs
);
8483 gen_addq_lo(s
, tmp64
, rd
);
8484 } else if (op
& 0x40) {
8485 /* 64-bit accumulate. */
8486 gen_addq(s
, tmp64
, rs
, rd
);
8488 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8489 tcg_temp_free_i64(tmp64
);
8494 case 6: case 7: case 14: case 15:
8496 if (((insn
>> 24) & 3) == 3) {
8497 /* Translate into the equivalent ARM encoding. */
8498 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8499 if (disas_neon_data_insn(env
, s
, insn
))
8502 if (insn
& (1 << 28))
8504 if (disas_coproc_insn (env
, s
, insn
))
8508 case 8: case 9: case 10: case 11:
8509 if (insn
& (1 << 15)) {
8510 /* Branches, misc control. */
8511 if (insn
& 0x5000) {
8512 /* Unconditional branch. */
8513 /* signextend(hw1[10:0]) -> offset[:12]. */
8514 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8515 /* hw1[10:0] -> offset[11:1]. */
8516 offset
|= (insn
& 0x7ff) << 1;
8517 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8518 offset[24:22] already have the same value because of the
8519 sign extension above. */
8520 offset
^= ((~insn
) & (1 << 13)) << 10;
8521 offset
^= ((~insn
) & (1 << 11)) << 11;
8523 if (insn
& (1 << 14)) {
8524 /* Branch and link. */
8525 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8529 if (insn
& (1 << 12)) {
8534 offset
&= ~(uint32_t)2;
8535 /* thumb2 bx, no need to check */
8536 gen_bx_im(s
, offset
);
8538 } else if (((insn
>> 23) & 7) == 7) {
8540 if (insn
& (1 << 13))
8543 if (insn
& (1 << 26)) {
8544 /* Secure monitor call (v6Z) */
8545 goto illegal_op
; /* not implemented. */
8547 op
= (insn
>> 20) & 7;
8549 case 0: /* msr cpsr. */
8551 tmp
= load_reg(s
, rn
);
8552 addr
= tcg_const_i32(insn
& 0xff);
8553 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8554 tcg_temp_free_i32(addr
);
8555 tcg_temp_free_i32(tmp
);
8560 case 1: /* msr spsr. */
8563 tmp
= load_reg(s
, rn
);
8565 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8569 case 2: /* cps, nop-hint. */
8570 if (((insn
>> 8) & 7) == 0) {
8571 gen_nop_hint(s
, insn
& 0xff);
8573 /* Implemented as NOP in user mode. */
8578 if (insn
& (1 << 10)) {
8579 if (insn
& (1 << 7))
8581 if (insn
& (1 << 6))
8583 if (insn
& (1 << 5))
8585 if (insn
& (1 << 9))
8586 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8588 if (insn
& (1 << 8)) {
8590 imm
|= (insn
& 0x1f);
8593 gen_set_psr_im(s
, offset
, 0, imm
);
8596 case 3: /* Special control operations. */
8598 op
= (insn
>> 4) & 0xf;
8606 /* These execute as NOPs. */
8613 /* Trivial implementation equivalent to bx. */
8614 tmp
= load_reg(s
, rn
);
8617 case 5: /* Exception return. */
8621 if (rn
!= 14 || rd
!= 15) {
8624 tmp
= load_reg(s
, rn
);
8625 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8626 gen_exception_return(s
, tmp
);
8628 case 6: /* mrs cpsr. */
8629 tmp
= tcg_temp_new_i32();
8631 addr
= tcg_const_i32(insn
& 0xff);
8632 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8633 tcg_temp_free_i32(addr
);
8635 gen_helper_cpsr_read(tmp
);
8637 store_reg(s
, rd
, tmp
);
8639 case 7: /* mrs spsr. */
8640 /* Not accessible in user mode. */
8641 if (IS_USER(s
) || IS_M(env
))
8643 tmp
= load_cpu_field(spsr
);
8644 store_reg(s
, rd
, tmp
);
8649 /* Conditional branch. */
8650 op
= (insn
>> 22) & 0xf;
8651 /* Generate a conditional jump to next instruction. */
8652 s
->condlabel
= gen_new_label();
8653 gen_test_cc(op
^ 1, s
->condlabel
);
8656 /* offset[11:1] = insn[10:0] */
8657 offset
= (insn
& 0x7ff) << 1;
8658 /* offset[17:12] = insn[21:16]. */
8659 offset
|= (insn
& 0x003f0000) >> 4;
8660 /* offset[31:20] = insn[26]. */
8661 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8662 /* offset[18] = insn[13]. */
8663 offset
|= (insn
& (1 << 13)) << 5;
8664 /* offset[19] = insn[11]. */
8665 offset
|= (insn
& (1 << 11)) << 8;
8667 /* jump to the offset */
8668 gen_jmp(s
, s
->pc
+ offset
);
8671 /* Data processing immediate. */
8672 if (insn
& (1 << 25)) {
8673 if (insn
& (1 << 24)) {
8674 if (insn
& (1 << 20))
8676 /* Bitfield/Saturate. */
8677 op
= (insn
>> 21) & 7;
8679 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8681 tmp
= tcg_temp_new_i32();
8682 tcg_gen_movi_i32(tmp
, 0);
8684 tmp
= load_reg(s
, rn
);
8687 case 2: /* Signed bitfield extract. */
8689 if (shift
+ imm
> 32)
8692 gen_sbfx(tmp
, shift
, imm
);
8694 case 6: /* Unsigned bitfield extract. */
8696 if (shift
+ imm
> 32)
8699 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8701 case 3: /* Bitfield insert/clear. */
8704 imm
= imm
+ 1 - shift
;
8706 tmp2
= load_reg(s
, rd
);
8707 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8708 tcg_temp_free_i32(tmp2
);
8713 default: /* Saturate. */
8716 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8718 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8720 tmp2
= tcg_const_i32(imm
);
8723 if ((op
& 1) && shift
== 0)
8724 gen_helper_usat16(tmp
, tmp
, tmp2
);
8726 gen_helper_usat(tmp
, tmp
, tmp2
);
8729 if ((op
& 1) && shift
== 0)
8730 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8732 gen_helper_ssat(tmp
, tmp
, tmp2
);
8734 tcg_temp_free_i32(tmp2
);
8737 store_reg(s
, rd
, tmp
);
8739 imm
= ((insn
& 0x04000000) >> 15)
8740 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8741 if (insn
& (1 << 22)) {
8742 /* 16-bit immediate. */
8743 imm
|= (insn
>> 4) & 0xf000;
8744 if (insn
& (1 << 23)) {
8746 tmp
= load_reg(s
, rd
);
8747 tcg_gen_ext16u_i32(tmp
, tmp
);
8748 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8751 tmp
= tcg_temp_new_i32();
8752 tcg_gen_movi_i32(tmp
, imm
);
8755 /* Add/sub 12-bit immediate. */
8757 offset
= s
->pc
& ~(uint32_t)3;
8758 if (insn
& (1 << 23))
8762 tmp
= tcg_temp_new_i32();
8763 tcg_gen_movi_i32(tmp
, offset
);
8765 tmp
= load_reg(s
, rn
);
8766 if (insn
& (1 << 23))
8767 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8769 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8772 store_reg(s
, rd
, tmp
);
8775 int shifter_out
= 0;
8776 /* modified 12-bit immediate. */
8777 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8778 imm
= (insn
& 0xff);
8781 /* Nothing to do. */
8783 case 1: /* 00XY00XY */
8786 case 2: /* XY00XY00 */
8790 case 3: /* XYXYXYXY */
8794 default: /* Rotated constant. */
8795 shift
= (shift
<< 1) | (imm
>> 7);
8797 imm
= imm
<< (32 - shift
);
8801 tmp2
= tcg_temp_new_i32();
8802 tcg_gen_movi_i32(tmp2
, imm
);
8803 rn
= (insn
>> 16) & 0xf;
8805 tmp
= tcg_temp_new_i32();
8806 tcg_gen_movi_i32(tmp
, 0);
8808 tmp
= load_reg(s
, rn
);
8810 op
= (insn
>> 21) & 0xf;
8811 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8812 shifter_out
, tmp
, tmp2
))
8814 tcg_temp_free_i32(tmp2
);
8815 rd
= (insn
>> 8) & 0xf;
8817 store_reg(s
, rd
, tmp
);
8819 tcg_temp_free_i32(tmp
);
8824 case 12: /* Load/store single data item. */
8829 if ((insn
& 0x01100000) == 0x01000000) {
8830 if (disas_neon_ls_insn(env
, s
, insn
))
8834 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8836 if (!(insn
& (1 << 20))) {
8840 /* Byte or halfword load space with dest == r15 : memory hints.
8841 * Catch them early so we don't emit pointless addressing code.
8842 * This space is a mix of:
8843 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8844 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8846 * unallocated hints, which must be treated as NOPs
8847 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8848 * which is easiest for the decoding logic
8849 * Some space which must UNDEF
8851 int op1
= (insn
>> 23) & 3;
8852 int op2
= (insn
>> 6) & 0x3f;
8857 /* UNPREDICTABLE, unallocated hint or
8858 * PLD/PLDW/PLI (literal)
8863 return 0; /* PLD/PLDW/PLI or unallocated hint */
8865 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
8866 return 0; /* PLD/PLDW/PLI or unallocated hint */
8868 /* UNDEF space, or an UNPREDICTABLE */
8874 addr
= tcg_temp_new_i32();
8876 /* s->pc has already been incremented by 4. */
8877 imm
= s
->pc
& 0xfffffffc;
8878 if (insn
& (1 << 23))
8879 imm
+= insn
& 0xfff;
8881 imm
-= insn
& 0xfff;
8882 tcg_gen_movi_i32(addr
, imm
);
8884 addr
= load_reg(s
, rn
);
8885 if (insn
& (1 << 23)) {
8886 /* Positive offset. */
8888 tcg_gen_addi_i32(addr
, addr
, imm
);
8891 switch ((insn
>> 8) & 0xf) {
8892 case 0x0: /* Shifted Register. */
8893 shift
= (insn
>> 4) & 0xf;
8895 tcg_temp_free_i32(addr
);
8898 tmp
= load_reg(s
, rm
);
8900 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8901 tcg_gen_add_i32(addr
, addr
, tmp
);
8902 tcg_temp_free_i32(tmp
);
8904 case 0xc: /* Negative offset. */
8905 tcg_gen_addi_i32(addr
, addr
, -imm
);
8907 case 0xe: /* User privilege. */
8908 tcg_gen_addi_i32(addr
, addr
, imm
);
8911 case 0x9: /* Post-decrement. */
8914 case 0xb: /* Post-increment. */
8918 case 0xd: /* Pre-decrement. */
8921 case 0xf: /* Pre-increment. */
8922 tcg_gen_addi_i32(addr
, addr
, imm
);
8926 tcg_temp_free_i32(addr
);
8931 if (insn
& (1 << 20)) {
8934 case 0: tmp
= gen_ld8u(addr
, user
); break;
8935 case 4: tmp
= gen_ld8s(addr
, user
); break;
8936 case 1: tmp
= gen_ld16u(addr
, user
); break;
8937 case 5: tmp
= gen_ld16s(addr
, user
); break;
8938 case 2: tmp
= gen_ld32(addr
, user
); break;
8940 tcg_temp_free_i32(addr
);
8946 store_reg(s
, rs
, tmp
);
8950 tmp
= load_reg(s
, rs
);
8952 case 0: gen_st8(tmp
, addr
, user
); break;
8953 case 1: gen_st16(tmp
, addr
, user
); break;
8954 case 2: gen_st32(tmp
, addr
, user
); break;
8956 tcg_temp_free_i32(addr
);
8961 tcg_gen_addi_i32(addr
, addr
, imm
);
8963 store_reg(s
, rn
, addr
);
8965 tcg_temp_free_i32(addr
);
8977 static void disas_thumb_insn(CPUARMState
*env
, DisasContext
*s
)
8979 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8986 if (s
->condexec_mask
) {
8987 cond
= s
->condexec_cond
;
8988 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
8989 s
->condlabel
= gen_new_label();
8990 gen_test_cc(cond
^ 1, s
->condlabel
);
8995 insn
= arm_lduw_code(s
->pc
, s
->bswap_code
);
8998 switch (insn
>> 12) {
9002 op
= (insn
>> 11) & 3;
9005 rn
= (insn
>> 3) & 7;
9006 tmp
= load_reg(s
, rn
);
9007 if (insn
& (1 << 10)) {
9009 tmp2
= tcg_temp_new_i32();
9010 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
9013 rm
= (insn
>> 6) & 7;
9014 tmp2
= load_reg(s
, rm
);
9016 if (insn
& (1 << 9)) {
9017 if (s
->condexec_mask
)
9018 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9020 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9022 if (s
->condexec_mask
)
9023 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9025 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9027 tcg_temp_free_i32(tmp2
);
9028 store_reg(s
, rd
, tmp
);
9030 /* shift immediate */
9031 rm
= (insn
>> 3) & 7;
9032 shift
= (insn
>> 6) & 0x1f;
9033 tmp
= load_reg(s
, rm
);
9034 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
9035 if (!s
->condexec_mask
)
9037 store_reg(s
, rd
, tmp
);
9041 /* arithmetic large immediate */
9042 op
= (insn
>> 11) & 3;
9043 rd
= (insn
>> 8) & 0x7;
9044 if (op
== 0) { /* mov */
9045 tmp
= tcg_temp_new_i32();
9046 tcg_gen_movi_i32(tmp
, insn
& 0xff);
9047 if (!s
->condexec_mask
)
9049 store_reg(s
, rd
, tmp
);
9051 tmp
= load_reg(s
, rd
);
9052 tmp2
= tcg_temp_new_i32();
9053 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
9056 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9057 tcg_temp_free_i32(tmp
);
9058 tcg_temp_free_i32(tmp2
);
9061 if (s
->condexec_mask
)
9062 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9064 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9065 tcg_temp_free_i32(tmp2
);
9066 store_reg(s
, rd
, tmp
);
9069 if (s
->condexec_mask
)
9070 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9072 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9073 tcg_temp_free_i32(tmp2
);
9074 store_reg(s
, rd
, tmp
);
9080 if (insn
& (1 << 11)) {
9081 rd
= (insn
>> 8) & 7;
9082 /* load pc-relative. Bit 1 of PC is ignored. */
9083 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
9084 val
&= ~(uint32_t)2;
9085 addr
= tcg_temp_new_i32();
9086 tcg_gen_movi_i32(addr
, val
);
9087 tmp
= gen_ld32(addr
, IS_USER(s
));
9088 tcg_temp_free_i32(addr
);
9089 store_reg(s
, rd
, tmp
);
9092 if (insn
& (1 << 10)) {
9093 /* data processing extended or blx */
9094 rd
= (insn
& 7) | ((insn
>> 4) & 8);
9095 rm
= (insn
>> 3) & 0xf;
9096 op
= (insn
>> 8) & 3;
9099 tmp
= load_reg(s
, rd
);
9100 tmp2
= load_reg(s
, rm
);
9101 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9102 tcg_temp_free_i32(tmp2
);
9103 store_reg(s
, rd
, tmp
);
9106 tmp
= load_reg(s
, rd
);
9107 tmp2
= load_reg(s
, rm
);
9108 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9109 tcg_temp_free_i32(tmp2
);
9110 tcg_temp_free_i32(tmp
);
9112 case 2: /* mov/cpy */
9113 tmp
= load_reg(s
, rm
);
9114 store_reg(s
, rd
, tmp
);
9116 case 3:/* branch [and link] exchange thumb register */
9117 tmp
= load_reg(s
, rm
);
9118 if (insn
& (1 << 7)) {
9120 val
= (uint32_t)s
->pc
| 1;
9121 tmp2
= tcg_temp_new_i32();
9122 tcg_gen_movi_i32(tmp2
, val
);
9123 store_reg(s
, 14, tmp2
);
9125 /* already thumb, no need to check */
9132 /* data processing register */
9134 rm
= (insn
>> 3) & 7;
9135 op
= (insn
>> 6) & 0xf;
9136 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
9137 /* the shift/rotate ops want the operands backwards */
9146 if (op
== 9) { /* neg */
9147 tmp
= tcg_temp_new_i32();
9148 tcg_gen_movi_i32(tmp
, 0);
9149 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
9150 tmp
= load_reg(s
, rd
);
9155 tmp2
= load_reg(s
, rm
);
9158 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9159 if (!s
->condexec_mask
)
9163 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
9164 if (!s
->condexec_mask
)
9168 if (s
->condexec_mask
) {
9169 gen_helper_shl(tmp2
, tmp2
, tmp
);
9171 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
9176 if (s
->condexec_mask
) {
9177 gen_helper_shr(tmp2
, tmp2
, tmp
);
9179 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
9184 if (s
->condexec_mask
) {
9185 gen_helper_sar(tmp2
, tmp2
, tmp
);
9187 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
9192 if (s
->condexec_mask
)
9195 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
9198 if (s
->condexec_mask
)
9199 gen_sub_carry(tmp
, tmp
, tmp2
);
9201 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
9204 if (s
->condexec_mask
) {
9205 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
9206 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
9208 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
9213 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9218 if (s
->condexec_mask
)
9219 tcg_gen_neg_i32(tmp
, tmp2
);
9221 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9224 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9228 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9232 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9233 if (!s
->condexec_mask
)
9237 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9238 if (!s
->condexec_mask
)
9242 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
9243 if (!s
->condexec_mask
)
9247 tcg_gen_not_i32(tmp2
, tmp2
);
9248 if (!s
->condexec_mask
)
9256 store_reg(s
, rm
, tmp2
);
9258 tcg_temp_free_i32(tmp
);
9260 store_reg(s
, rd
, tmp
);
9261 tcg_temp_free_i32(tmp2
);
9264 tcg_temp_free_i32(tmp
);
9265 tcg_temp_free_i32(tmp2
);
9270 /* load/store register offset. */
9272 rn
= (insn
>> 3) & 7;
9273 rm
= (insn
>> 6) & 7;
9274 op
= (insn
>> 9) & 7;
9275 addr
= load_reg(s
, rn
);
9276 tmp
= load_reg(s
, rm
);
9277 tcg_gen_add_i32(addr
, addr
, tmp
);
9278 tcg_temp_free_i32(tmp
);
9280 if (op
< 3) /* store */
9281 tmp
= load_reg(s
, rd
);
9285 gen_st32(tmp
, addr
, IS_USER(s
));
9288 gen_st16(tmp
, addr
, IS_USER(s
));
9291 gen_st8(tmp
, addr
, IS_USER(s
));
9294 tmp
= gen_ld8s(addr
, IS_USER(s
));
9297 tmp
= gen_ld32(addr
, IS_USER(s
));
9300 tmp
= gen_ld16u(addr
, IS_USER(s
));
9303 tmp
= gen_ld8u(addr
, IS_USER(s
));
9306 tmp
= gen_ld16s(addr
, IS_USER(s
));
9309 if (op
>= 3) /* load */
9310 store_reg(s
, rd
, tmp
);
9311 tcg_temp_free_i32(addr
);
9315 /* load/store word immediate offset */
9317 rn
= (insn
>> 3) & 7;
9318 addr
= load_reg(s
, rn
);
9319 val
= (insn
>> 4) & 0x7c;
9320 tcg_gen_addi_i32(addr
, addr
, val
);
9322 if (insn
& (1 << 11)) {
9324 tmp
= gen_ld32(addr
, IS_USER(s
));
9325 store_reg(s
, rd
, tmp
);
9328 tmp
= load_reg(s
, rd
);
9329 gen_st32(tmp
, addr
, IS_USER(s
));
9331 tcg_temp_free_i32(addr
);
9335 /* load/store byte immediate offset */
9337 rn
= (insn
>> 3) & 7;
9338 addr
= load_reg(s
, rn
);
9339 val
= (insn
>> 6) & 0x1f;
9340 tcg_gen_addi_i32(addr
, addr
, val
);
9342 if (insn
& (1 << 11)) {
9344 tmp
= gen_ld8u(addr
, IS_USER(s
));
9345 store_reg(s
, rd
, tmp
);
9348 tmp
= load_reg(s
, rd
);
9349 gen_st8(tmp
, addr
, IS_USER(s
));
9351 tcg_temp_free_i32(addr
);
9355 /* load/store halfword immediate offset */
9357 rn
= (insn
>> 3) & 7;
9358 addr
= load_reg(s
, rn
);
9359 val
= (insn
>> 5) & 0x3e;
9360 tcg_gen_addi_i32(addr
, addr
, val
);
9362 if (insn
& (1 << 11)) {
9364 tmp
= gen_ld16u(addr
, IS_USER(s
));
9365 store_reg(s
, rd
, tmp
);
9368 tmp
= load_reg(s
, rd
);
9369 gen_st16(tmp
, addr
, IS_USER(s
));
9371 tcg_temp_free_i32(addr
);
9375 /* load/store from stack */
9376 rd
= (insn
>> 8) & 7;
9377 addr
= load_reg(s
, 13);
9378 val
= (insn
& 0xff) * 4;
9379 tcg_gen_addi_i32(addr
, addr
, val
);
9381 if (insn
& (1 << 11)) {
9383 tmp
= gen_ld32(addr
, IS_USER(s
));
9384 store_reg(s
, rd
, tmp
);
9387 tmp
= load_reg(s
, rd
);
9388 gen_st32(tmp
, addr
, IS_USER(s
));
9390 tcg_temp_free_i32(addr
);
9394 /* add to high reg */
9395 rd
= (insn
>> 8) & 7;
9396 if (insn
& (1 << 11)) {
9398 tmp
= load_reg(s
, 13);
9400 /* PC. bit 1 is ignored. */
9401 tmp
= tcg_temp_new_i32();
9402 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
9404 val
= (insn
& 0xff) * 4;
9405 tcg_gen_addi_i32(tmp
, tmp
, val
);
9406 store_reg(s
, rd
, tmp
);
9411 op
= (insn
>> 8) & 0xf;
9414 /* adjust stack pointer */
9415 tmp
= load_reg(s
, 13);
9416 val
= (insn
& 0x7f) * 4;
9417 if (insn
& (1 << 7))
9418 val
= -(int32_t)val
;
9419 tcg_gen_addi_i32(tmp
, tmp
, val
);
9420 store_reg(s
, 13, tmp
);
9423 case 2: /* sign/zero extend. */
9426 rm
= (insn
>> 3) & 7;
9427 tmp
= load_reg(s
, rm
);
9428 switch ((insn
>> 6) & 3) {
9429 case 0: gen_sxth(tmp
); break;
9430 case 1: gen_sxtb(tmp
); break;
9431 case 2: gen_uxth(tmp
); break;
9432 case 3: gen_uxtb(tmp
); break;
9434 store_reg(s
, rd
, tmp
);
9436 case 4: case 5: case 0xc: case 0xd:
9438 addr
= load_reg(s
, 13);
9439 if (insn
& (1 << 8))
9443 for (i
= 0; i
< 8; i
++) {
9444 if (insn
& (1 << i
))
9447 if ((insn
& (1 << 11)) == 0) {
9448 tcg_gen_addi_i32(addr
, addr
, -offset
);
9450 for (i
= 0; i
< 8; i
++) {
9451 if (insn
& (1 << i
)) {
9452 if (insn
& (1 << 11)) {
9454 tmp
= gen_ld32(addr
, IS_USER(s
));
9455 store_reg(s
, i
, tmp
);
9458 tmp
= load_reg(s
, i
);
9459 gen_st32(tmp
, addr
, IS_USER(s
));
9461 /* advance to the next address. */
9462 tcg_gen_addi_i32(addr
, addr
, 4);
9466 if (insn
& (1 << 8)) {
9467 if (insn
& (1 << 11)) {
9469 tmp
= gen_ld32(addr
, IS_USER(s
));
9470 /* don't set the pc until the rest of the instruction
9474 tmp
= load_reg(s
, 14);
9475 gen_st32(tmp
, addr
, IS_USER(s
));
9477 tcg_gen_addi_i32(addr
, addr
, 4);
9479 if ((insn
& (1 << 11)) == 0) {
9480 tcg_gen_addi_i32(addr
, addr
, -offset
);
9482 /* write back the new stack pointer */
9483 store_reg(s
, 13, addr
);
9484 /* set the new PC value */
9485 if ((insn
& 0x0900) == 0x0900) {
9486 store_reg_from_load(env
, s
, 15, tmp
);
9490 case 1: case 3: case 9: case 11: /* czb */
9492 tmp
= load_reg(s
, rm
);
9493 s
->condlabel
= gen_new_label();
9495 if (insn
& (1 << 11))
9496 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9498 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9499 tcg_temp_free_i32(tmp
);
9500 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9501 val
= (uint32_t)s
->pc
+ 2;
9506 case 15: /* IT, nop-hint. */
9507 if ((insn
& 0xf) == 0) {
9508 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9512 s
->condexec_cond
= (insn
>> 4) & 0xe;
9513 s
->condexec_mask
= insn
& 0x1f;
9514 /* No actual code generated for this insn, just setup state. */
9517 case 0xe: /* bkpt */
9519 gen_exception_insn(s
, 2, EXCP_BKPT
);
9524 rn
= (insn
>> 3) & 0x7;
9526 tmp
= load_reg(s
, rn
);
9527 switch ((insn
>> 6) & 3) {
9528 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9529 case 1: gen_rev16(tmp
); break;
9530 case 3: gen_revsh(tmp
); break;
9531 default: goto illegal_op
;
9533 store_reg(s
, rd
, tmp
);
9537 switch ((insn
>> 5) & 7) {
9541 if (((insn
>> 3) & 1) != s
->bswap_code
) {
9542 /* Dynamic endianness switching not implemented. */
9553 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9556 addr
= tcg_const_i32(19);
9557 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9558 tcg_temp_free_i32(addr
);
9562 addr
= tcg_const_i32(16);
9563 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9564 tcg_temp_free_i32(addr
);
9566 tcg_temp_free_i32(tmp
);
9569 if (insn
& (1 << 4)) {
9570 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9574 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9589 /* load/store multiple */
9591 TCGV_UNUSED(loaded_var
);
9592 rn
= (insn
>> 8) & 0x7;
9593 addr
= load_reg(s
, rn
);
9594 for (i
= 0; i
< 8; i
++) {
9595 if (insn
& (1 << i
)) {
9596 if (insn
& (1 << 11)) {
9598 tmp
= gen_ld32(addr
, IS_USER(s
));
9602 store_reg(s
, i
, tmp
);
9606 tmp
= load_reg(s
, i
);
9607 gen_st32(tmp
, addr
, IS_USER(s
));
9609 /* advance to the next address */
9610 tcg_gen_addi_i32(addr
, addr
, 4);
9613 if ((insn
& (1 << rn
)) == 0) {
9614 /* base reg not in list: base register writeback */
9615 store_reg(s
, rn
, addr
);
9617 /* base reg in list: if load, complete it now */
9618 if (insn
& (1 << 11)) {
9619 store_reg(s
, rn
, loaded_var
);
9621 tcg_temp_free_i32(addr
);
9626 /* conditional branch or swi */
9627 cond
= (insn
>> 8) & 0xf;
9633 gen_set_pc_im(s
->pc
);
9634 s
->is_jmp
= DISAS_SWI
;
9637 /* generate a conditional jump to next instruction */
9638 s
->condlabel
= gen_new_label();
9639 gen_test_cc(cond
^ 1, s
->condlabel
);
9642 /* jump to the offset */
9643 val
= (uint32_t)s
->pc
+ 2;
9644 offset
= ((int32_t)insn
<< 24) >> 24;
9650 if (insn
& (1 << 11)) {
9651 if (disas_thumb2_insn(env
, s
, insn
))
9655 /* unconditional branch */
9656 val
= (uint32_t)s
->pc
;
9657 offset
= ((int32_t)insn
<< 21) >> 21;
9658 val
+= (offset
<< 1) + 2;
9663 if (disas_thumb2_insn(env
, s
, insn
))
9669 gen_exception_insn(s
, 4, EXCP_UDEF
);
9673 gen_exception_insn(s
, 2, EXCP_UDEF
);
9676 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9677 basic block 'tb'. If search_pc is TRUE, also generate PC
9678 information for each intermediate instruction. */
9679 static inline void gen_intermediate_code_internal(CPUARMState
*env
,
9680 TranslationBlock
*tb
,
9683 DisasContext dc1
, *dc
= &dc1
;
9685 uint16_t *gen_opc_end
;
9687 target_ulong pc_start
;
9688 uint32_t next_page_start
;
9692 /* generate intermediate code */
9697 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9699 dc
->is_jmp
= DISAS_NEXT
;
9701 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9703 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9704 dc
->bswap_code
= ARM_TBFLAG_BSWAP_CODE(tb
->flags
);
9705 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9706 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9707 #if !defined(CONFIG_USER_ONLY)
9708 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9710 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9711 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9712 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9713 cpu_F0s
= tcg_temp_new_i32();
9714 cpu_F1s
= tcg_temp_new_i32();
9715 cpu_F0d
= tcg_temp_new_i64();
9716 cpu_F1d
= tcg_temp_new_i64();
9719 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9720 cpu_M0
= tcg_temp_new_i64();
9721 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9724 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9726 max_insns
= CF_COUNT_MASK
;
9730 tcg_clear_temp_count();
9732 /* A note on handling of the condexec (IT) bits:
9734 * We want to avoid the overhead of having to write the updated condexec
9735 * bits back to the CPUARMState for every instruction in an IT block. So:
9736 * (1) if the condexec bits are not already zero then we write
9737 * zero back into the CPUARMState now. This avoids complications trying
9738 * to do it at the end of the block. (For example if we don't do this
9739 * it's hard to identify whether we can safely skip writing condexec
9740 * at the end of the TB, which we definitely want to do for the case
9741 * where a TB doesn't do anything with the IT state at all.)
9742 * (2) if we are going to leave the TB then we call gen_set_condexec()
9743 * which will write the correct value into CPUARMState if zero is wrong.
9744 * This is done both for leaving the TB at the end, and for leaving
9745 * it because of an exception we know will happen, which is done in
9746 * gen_exception_insn(). The latter is necessary because we need to
9747 * leave the TB with the PC/IT state just prior to execution of the
9748 * instruction which caused the exception.
9749 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9750 * then the CPUARMState will be wrong and we need to reset it.
9751 * This is handled in the same way as restoration of the
9752 * PC in these situations: we will be called again with search_pc=1
9753 * and generate a mapping of the condexec bits for each PC in
9754 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9755 * this to restore the condexec bits.
9757 * Note that there are no instructions which can read the condexec
9758 * bits, and none which can write non-static values to them, so
9759 * we don't need to care about whether CPUARMState is correct in the
9763 /* Reset the conditional execution bits immediately. This avoids
9764 complications trying to do it at the end of the block. */
9765 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9767 TCGv tmp
= tcg_temp_new_i32();
9768 tcg_gen_movi_i32(tmp
, 0);
9769 store_cpu_field(tmp
, condexec_bits
);
9772 #ifdef CONFIG_USER_ONLY
9773 /* Intercept jump to the magic kernel page. */
9774 if (dc
->pc
>= 0xffff0000) {
9775 /* We always get here via a jump, so know we are not in a
9776 conditional execution block. */
9777 gen_exception(EXCP_KERNEL_TRAP
);
9778 dc
->is_jmp
= DISAS_UPDATE
;
9782 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9783 /* We always get here via a jump, so know we are not in a
9784 conditional execution block. */
9785 gen_exception(EXCP_EXCEPTION_EXIT
);
9786 dc
->is_jmp
= DISAS_UPDATE
;
9791 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9792 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9793 if (bp
->pc
== dc
->pc
) {
9794 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9795 /* Advance PC so that clearing the breakpoint will
9796 invalidate this TB. */
9798 goto done_generating
;
9804 j
= gen_opc_ptr
- gen_opc_buf
;
9808 gen_opc_instr_start
[lj
++] = 0;
9810 gen_opc_pc
[lj
] = dc
->pc
;
9811 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9812 gen_opc_instr_start
[lj
] = 1;
9813 gen_opc_icount
[lj
] = num_insns
;
9816 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9819 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
9820 tcg_gen_debug_insn_start(dc
->pc
);
9824 disas_thumb_insn(env
, dc
);
9825 if (dc
->condexec_mask
) {
9826 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9827 | ((dc
->condexec_mask
>> 4) & 1);
9828 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9829 if (dc
->condexec_mask
== 0) {
9830 dc
->condexec_cond
= 0;
9834 disas_arm_insn(env
, dc
);
9837 if (dc
->condjmp
&& !dc
->is_jmp
) {
9838 gen_set_label(dc
->condlabel
);
9842 if (tcg_check_temp_count()) {
9843 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
9846 /* Translation stops when a conditional branch is encountered.
9847 * Otherwise the subsequent code could get translated several times.
9848 * Also stop translation when a page boundary is reached. This
9849 * ensures prefetch aborts occur at the right place. */
9851 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9852 !env
->singlestep_enabled
&&
9854 dc
->pc
< next_page_start
&&
9855 num_insns
< max_insns
);
9857 if (tb
->cflags
& CF_LAST_IO
) {
9859 /* FIXME: This can theoretically happen with self-modifying
9861 cpu_abort(env
, "IO on conditional branch instruction");
9866 /* At this stage dc->condjmp will only be set when the skipped
9867 instruction was a conditional branch or trap, and the PC has
9868 already been written. */
9869 if (unlikely(env
->singlestep_enabled
)) {
9870 /* Make sure the pc is updated, and raise a debug exception. */
9872 gen_set_condexec(dc
);
9873 if (dc
->is_jmp
== DISAS_SWI
) {
9874 gen_exception(EXCP_SWI
);
9876 gen_exception(EXCP_DEBUG
);
9878 gen_set_label(dc
->condlabel
);
9880 if (dc
->condjmp
|| !dc
->is_jmp
) {
9881 gen_set_pc_im(dc
->pc
);
9884 gen_set_condexec(dc
);
9885 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9886 gen_exception(EXCP_SWI
);
9888 /* FIXME: Single stepping a WFI insn will not halt
9890 gen_exception(EXCP_DEBUG
);
9893 /* While branches must always occur at the end of an IT block,
9894 there are a few other things that can cause us to terminate
9895 the TB in the middel of an IT block:
9896 - Exception generating instructions (bkpt, swi, undefined).
9898 - Hardware watchpoints.
9899 Hardware breakpoints have already been handled and skip this code.
9901 gen_set_condexec(dc
);
9902 switch(dc
->is_jmp
) {
9904 gen_goto_tb(dc
, 1, dc
->pc
);
9909 /* indicate that the hash table must be used to find the next TB */
9913 /* nothing more to generate */
9919 gen_exception(EXCP_SWI
);
9923 gen_set_label(dc
->condlabel
);
9924 gen_set_condexec(dc
);
9925 gen_goto_tb(dc
, 1, dc
->pc
);
9931 gen_icount_end(tb
, num_insns
);
9932 *gen_opc_ptr
= INDEX_op_end
;
9935 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9936 qemu_log("----------------\n");
9937 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9938 log_target_disas(pc_start
, dc
->pc
- pc_start
,
9939 dc
->thumb
| (dc
->bswap_code
<< 1));
9944 j
= gen_opc_ptr
- gen_opc_buf
;
9947 gen_opc_instr_start
[lj
++] = 0;
9949 tb
->size
= dc
->pc
- pc_start
;
9950 tb
->icount
= num_insns
;
9954 void gen_intermediate_code(CPUARMState
*env
, TranslationBlock
*tb
)
9956 gen_intermediate_code_internal(env
, tb
, 0);
9959 void gen_intermediate_code_pc(CPUARMState
*env
, TranslationBlock
*tb
)
9961 gen_intermediate_code_internal(env
, tb
, 1);
9964 static const char *cpu_mode_names
[16] = {
9965 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9966 "???", "???", "???", "und", "???", "???", "???", "sys"
9969 void cpu_dump_state(CPUARMState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
9979 /* ??? This assumes float64 and double have the same layout.
9980 Oh well, it's only debug dumps. */
9989 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
9991 cpu_fprintf(f
, "\n");
9993 cpu_fprintf(f
, " ");
9995 psr
= cpsr_read(env
);
9996 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
9998 psr
& (1 << 31) ? 'N' : '-',
9999 psr
& (1 << 30) ? 'Z' : '-',
10000 psr
& (1 << 29) ? 'C' : '-',
10001 psr
& (1 << 28) ? 'V' : '-',
10002 psr
& CPSR_T
? 'T' : 'A',
10003 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
10006 for (i
= 0; i
< 16; i
++) {
10007 d
.d
= env
->vfp
.regs
[i
];
10011 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
10012 i
* 2, (int)s0
.i
, s0
.s
,
10013 i
* 2 + 1, (int)s1
.i
, s1
.s
,
10014 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
10017 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
10021 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
, int pc_pos
)
10023 env
->regs
[15] = gen_opc_pc
[pc_pos
];
10024 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];