4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48 /* internal defines */
49 typedef struct DisasContext
{
52 /* Nonzero if this instruction has been conditionally skipped. */
54 /* The label that will be jumped to when the instruction is skipped. */
56 /* Thumb-2 condtional execution bits. */
59 struct TranslationBlock
*tb
;
60 int singlestep_enabled
;
63 #if !defined(CONFIG_USER_ONLY)
71 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
73 #if defined(CONFIG_USER_ONLY)
76 #define IS_USER(s) (s->user)
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
84 static TCGv_ptr cpu_env
;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
87 static TCGv_i32 cpu_R
[16];
88 static TCGv_i32 cpu_exclusive_addr
;
89 static TCGv_i32 cpu_exclusive_val
;
90 static TCGv_i32 cpu_exclusive_high
;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test
;
93 static TCGv_i32 cpu_exclusive_info
;
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s
, cpu_F1s
;
98 static TCGv_i64 cpu_F0d
, cpu_F1d
;
100 #include "gen-icount.h"
102 static const char *regnames
[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
111 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
113 for (i
= 0; i
< 16; i
++) {
114 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUARMState
, regs
[i
]),
118 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
120 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
121 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
122 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
123 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
126 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
127 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
128 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
135 static inline TCGv
load_cpu_offset(int offset
)
137 TCGv tmp
= tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
144 static inline void store_cpu_offset(TCGv var
, int offset
)
146 tcg_gen_st_i32(var
, cpu_env
, offset
);
147 tcg_temp_free_i32(var
);
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUARMState, name))
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
158 /* normaly, since we updated PC, we need only to add one insn */
160 addr
= (long)s
->pc
+ 2;
162 addr
= (long)s
->pc
+ 4;
163 tcg_gen_movi_i32(var
, addr
);
165 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
172 TCGv tmp
= tcg_temp_new_i32();
173 load_reg_var(s
, tmp
, reg
);
177 /* Set a CPU register. The source must be a temporary and will be
179 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
182 tcg_gen_andi_i32(var
, var
, ~1);
183 s
->is_jmp
= DISAS_JUMP
;
185 tcg_gen_mov_i32(cpu_R
[reg
], var
);
186 tcg_temp_free_i32(var
);
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
199 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
201 TCGv tmp_mask
= tcg_const_i32(mask
);
202 gen_helper_cpsr_write(var
, tmp_mask
);
203 tcg_temp_free_i32(tmp_mask
);
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
208 static void gen_exception(int excp
)
210 TCGv tmp
= tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp
, excp
);
212 gen_helper_exception(tmp
);
213 tcg_temp_free_i32(tmp
);
216 static void gen_smul_dual(TCGv a
, TCGv b
)
218 TCGv tmp1
= tcg_temp_new_i32();
219 TCGv tmp2
= tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1
, a
);
221 tcg_gen_ext16s_i32(tmp2
, b
);
222 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
223 tcg_temp_free_i32(tmp2
);
224 tcg_gen_sari_i32(a
, a
, 16);
225 tcg_gen_sari_i32(b
, b
, 16);
226 tcg_gen_mul_i32(b
, b
, a
);
227 tcg_gen_mov_i32(a
, tmp1
);
228 tcg_temp_free_i32(tmp1
);
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var
)
234 TCGv tmp
= tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp
, var
, 8);
236 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
237 tcg_gen_shli_i32(var
, var
, 8);
238 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
239 tcg_gen_or_i32(var
, var
, tmp
);
240 tcg_temp_free_i32(tmp
);
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var
)
246 tcg_gen_ext16u_i32(var
, var
);
247 tcg_gen_bswap16_i32(var
, var
);
248 tcg_gen_ext16s_i32(var
, var
);
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
255 tcg_gen_shri_i32(var
, var
, shift
);
256 tcg_gen_andi_i32(var
, var
, mask
);
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var
, int shift
, int width
)
265 tcg_gen_sari_i32(var
, var
, shift
);
266 if (shift
+ width
< 32) {
267 signbit
= 1u << (width
- 1);
268 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
269 tcg_gen_xori_i32(var
, var
, signbit
);
270 tcg_gen_subi_i32(var
, var
, signbit
);
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
277 tcg_gen_andi_i32(val
, val
, mask
);
278 tcg_gen_shli_i32(val
, val
, shift
);
279 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
280 tcg_gen_or_i32(dest
, base
, val
);
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
286 TCGv_i64 tmp64
= tcg_temp_new_i64();
288 tcg_gen_extu_i32_i64(tmp64
, b
);
289 tcg_temp_free_i32(b
);
290 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
291 tcg_gen_add_i64(a
, tmp64
, a
);
293 tcg_temp_free_i64(tmp64
);
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
300 TCGv_i64 tmp64
= tcg_temp_new_i64();
302 tcg_gen_extu_i32_i64(tmp64
, b
);
303 tcg_temp_free_i32(b
);
304 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
305 tcg_gen_sub_i64(a
, tmp64
, a
);
307 tcg_temp_free_i64(tmp64
);
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
316 TCGv_i64 tmp1
= tcg_temp_new_i64();
317 TCGv_i64 tmp2
= tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1
, a
);
320 tcg_temp_free_i32(a
);
321 tcg_gen_extu_i32_i64(tmp2
, b
);
322 tcg_temp_free_i32(b
);
323 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
324 tcg_temp_free_i64(tmp2
);
328 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
330 TCGv_i64 tmp1
= tcg_temp_new_i64();
331 TCGv_i64 tmp2
= tcg_temp_new_i64();
333 tcg_gen_ext_i32_i64(tmp1
, a
);
334 tcg_temp_free_i32(a
);
335 tcg_gen_ext_i32_i64(tmp2
, b
);
336 tcg_temp_free_i32(b
);
337 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
338 tcg_temp_free_i64(tmp2
);
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var
)
345 TCGv tmp
= tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp
, var
, 16);
347 tcg_gen_shli_i32(var
, var
, 16);
348 tcg_gen_or_i32(var
, var
, tmp
);
349 tcg_temp_free_i32(tmp
);
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
356 t0 = (t0 + t1) ^ tmp;
359 static void gen_add16(TCGv t0
, TCGv t1
)
361 TCGv tmp
= tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp
, t0
, t1
);
363 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
364 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
365 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
366 tcg_gen_add_i32(t0
, t0
, t1
);
367 tcg_gen_xor_i32(t0
, t0
, tmp
);
368 tcg_temp_free_i32(tmp
);
369 tcg_temp_free_i32(t1
);
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF))
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var
)
377 TCGv tmp
= tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp
, var
, 31);
380 tcg_temp_free_i32(tmp
);
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var
)
386 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, NF
));
387 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, ZF
));
391 static void gen_adc(TCGv t0
, TCGv t1
)
394 tcg_gen_add_i32(t0
, t0
, t1
);
395 tmp
= load_cpu_field(CF
);
396 tcg_gen_add_i32(t0
, t0
, tmp
);
397 tcg_temp_free_i32(tmp
);
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
404 tcg_gen_add_i32(dest
, t0
, t1
);
405 tmp
= load_cpu_field(CF
);
406 tcg_gen_add_i32(dest
, dest
, tmp
);
407 tcg_temp_free_i32(tmp
);
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
414 tcg_gen_sub_i32(dest
, t0
, t1
);
415 tmp
= load_cpu_field(CF
);
416 tcg_gen_add_i32(dest
, dest
, tmp
);
417 tcg_gen_subi_i32(dest
, dest
, 1);
418 tcg_temp_free_i32(tmp
);
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
424 static void shifter_out_im(TCGv var
, int shift
)
426 TCGv tmp
= tcg_temp_new_i32();
428 tcg_gen_andi_i32(tmp
, var
, 1);
430 tcg_gen_shri_i32(tmp
, var
, shift
);
432 tcg_gen_andi_i32(tmp
, tmp
, 1);
435 tcg_temp_free_i32(tmp
);
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
445 shifter_out_im(var
, 32 - shift
);
446 tcg_gen_shli_i32(var
, var
, shift
);
452 tcg_gen_shri_i32(var
, var
, 31);
455 tcg_gen_movi_i32(var
, 0);
458 shifter_out_im(var
, shift
- 1);
459 tcg_gen_shri_i32(var
, var
, shift
);
466 shifter_out_im(var
, shift
- 1);
469 tcg_gen_sari_i32(var
, var
, shift
);
471 case 3: /* ROR/RRX */
474 shifter_out_im(var
, shift
- 1);
475 tcg_gen_rotri_i32(var
, var
, shift
); break;
477 TCGv tmp
= load_cpu_field(CF
);
479 shifter_out_im(var
, 0);
480 tcg_gen_shri_i32(var
, var
, 1);
481 tcg_gen_shli_i32(tmp
, tmp
, 31);
482 tcg_gen_or_i32(var
, var
, tmp
);
483 tcg_temp_free_i32(tmp
);
488 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
489 TCGv shift
, int flags
)
493 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
494 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
495 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
496 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
500 case 0: gen_helper_shl(var
, var
, shift
); break;
501 case 1: gen_helper_shr(var
, var
, shift
); break;
502 case 2: gen_helper_sar(var
, var
, shift
); break;
503 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
504 tcg_gen_rotr_i32(var
, var
, shift
); break;
507 tcg_temp_free_i32(shift
);
510 #define PAS_OP(pfx) \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
519 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
526 tmp
= tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
529 tcg_temp_free_ptr(tmp
);
532 tmp
= tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
535 tcg_temp_free_ptr(tmp
);
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 #undef gen_pas_helper
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
566 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
573 tmp
= tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
576 tcg_temp_free_ptr(tmp
);
579 tmp
= tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
582 tcg_temp_free_ptr(tmp
);
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 #undef gen_pas_helper
603 static void gen_test_cc(int cc
, int label
)
611 tmp
= load_cpu_field(ZF
);
612 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
615 tmp
= load_cpu_field(ZF
);
616 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
619 tmp
= load_cpu_field(CF
);
620 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
623 tmp
= load_cpu_field(CF
);
624 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
627 tmp
= load_cpu_field(NF
);
628 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
631 tmp
= load_cpu_field(NF
);
632 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
635 tmp
= load_cpu_field(VF
);
636 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
639 tmp
= load_cpu_field(VF
);
640 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
642 case 8: /* hi: C && !Z */
643 inv
= gen_new_label();
644 tmp
= load_cpu_field(CF
);
645 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
646 tcg_temp_free_i32(tmp
);
647 tmp
= load_cpu_field(ZF
);
648 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
651 case 9: /* ls: !C || Z */
652 tmp
= load_cpu_field(CF
);
653 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
654 tcg_temp_free_i32(tmp
);
655 tmp
= load_cpu_field(ZF
);
656 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp
= load_cpu_field(VF
);
660 tmp2
= load_cpu_field(NF
);
661 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
662 tcg_temp_free_i32(tmp2
);
663 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp
= load_cpu_field(VF
);
667 tmp2
= load_cpu_field(NF
);
668 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
669 tcg_temp_free_i32(tmp2
);
670 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
672 case 12: /* gt: !Z && N == V */
673 inv
= gen_new_label();
674 tmp
= load_cpu_field(ZF
);
675 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
676 tcg_temp_free_i32(tmp
);
677 tmp
= load_cpu_field(VF
);
678 tmp2
= load_cpu_field(NF
);
679 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
680 tcg_temp_free_i32(tmp2
);
681 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
684 case 13: /* le: Z || N != V */
685 tmp
= load_cpu_field(ZF
);
686 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
687 tcg_temp_free_i32(tmp
);
688 tmp
= load_cpu_field(VF
);
689 tmp2
= load_cpu_field(NF
);
690 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
691 tcg_temp_free_i32(tmp2
);
692 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
695 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
698 tcg_temp_free_i32(tmp
);
701 static const uint8_t table_logic_cc
[16] = {
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
725 s
->is_jmp
= DISAS_UPDATE
;
726 if (s
->thumb
!= (addr
& 1)) {
727 tmp
= tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp
, addr
& 1);
729 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
730 tcg_temp_free_i32(tmp
);
732 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext
*s
, TCGv var
)
738 s
->is_jmp
= DISAS_UPDATE
;
739 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
740 tcg_gen_andi_i32(var
, var
, 1);
741 store_cpu_field(var
, thumb
);
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUARMState
*env
, DisasContext
*s
,
750 if (reg
== 15 && ENABLE_ARCH_7
) {
753 store_reg(s
, reg
, var
);
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUARMState
*env
, DisasContext
*s
,
764 if (reg
== 15 && ENABLE_ARCH_5
) {
767 store_reg(s
, reg
, var
);
771 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
773 TCGv tmp
= tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
777 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
779 TCGv tmp
= tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
783 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
785 TCGv tmp
= tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
789 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
791 TCGv tmp
= tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
795 static inline TCGv
gen_ld32(TCGv addr
, int index
)
797 TCGv tmp
= tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
801 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
803 TCGv_i64 tmp
= tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp
, addr
, index
);
807 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
809 tcg_gen_qemu_st8(val
, addr
, index
);
810 tcg_temp_free_i32(val
);
812 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
814 tcg_gen_qemu_st16(val
, addr
, index
);
815 tcg_temp_free_i32(val
);
817 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
819 tcg_gen_qemu_st32(val
, addr
, index
);
820 tcg_temp_free_i32(val
);
822 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
824 tcg_gen_qemu_st64(val
, addr
, index
);
825 tcg_temp_free_i64(val
);
828 static inline void gen_set_pc_im(uint32_t val
)
830 tcg_gen_movi_i32(cpu_R
[15], val
);
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext
*s
)
836 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
837 s
->is_jmp
= DISAS_UPDATE
;
840 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
843 int val
, rm
, shift
, shiftop
;
846 if (!(insn
& (1 << 25))) {
849 if (!(insn
& (1 << 23)))
852 tcg_gen_addi_i32(var
, var
, val
);
856 shift
= (insn
>> 7) & 0x1f;
857 shiftop
= (insn
>> 5) & 3;
858 offset
= load_reg(s
, rm
);
859 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
860 if (!(insn
& (1 << 23)))
861 tcg_gen_sub_i32(var
, var
, offset
);
863 tcg_gen_add_i32(var
, var
, offset
);
864 tcg_temp_free_i32(offset
);
868 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
874 if (insn
& (1 << 22)) {
876 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
877 if (!(insn
& (1 << 23)))
881 tcg_gen_addi_i32(var
, var
, val
);
885 tcg_gen_addi_i32(var
, var
, extra
);
887 offset
= load_reg(s
, rm
);
888 if (!(insn
& (1 << 23)))
889 tcg_gen_sub_i32(var
, var
, offset
);
891 tcg_gen_add_i32(var
, var
, offset
);
892 tcg_temp_free_i32(offset
);
896 static TCGv_ptr
get_fpstatus_ptr(int neon
)
898 TCGv_ptr statusptr
= tcg_temp_new_ptr();
901 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
903 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
905 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
909 #define VFP_OP2(name) \
910 static inline void gen_vfp_##name(int dp) \
912 TCGv_ptr fpst = get_fpstatus_ptr(0); \
914 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
916 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
918 tcg_temp_free_ptr(fpst); \
928 static inline void gen_vfp_F1_mul(int dp
)
930 /* Like gen_vfp_mul() but put result in F1 */
931 TCGv_ptr fpst
= get_fpstatus_ptr(0);
933 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
935 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
937 tcg_temp_free_ptr(fpst
);
940 static inline void gen_vfp_F1_neg(int dp
)
942 /* Like gen_vfp_neg() but put result in F1 */
944 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
946 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
950 static inline void gen_vfp_abs(int dp
)
953 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
955 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
958 static inline void gen_vfp_neg(int dp
)
961 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
963 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
966 static inline void gen_vfp_sqrt(int dp
)
969 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
971 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
974 static inline void gen_vfp_cmp(int dp
)
977 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
979 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
982 static inline void gen_vfp_cmpe(int dp
)
985 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
987 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
990 static inline void gen_vfp_F1_ld0(int dp
)
993 tcg_gen_movi_i64(cpu_F1d
, 0);
995 tcg_gen_movi_i32(cpu_F1s
, 0);
998 #define VFP_GEN_ITOF(name) \
999 static inline void gen_vfp_##name(int dp, int neon) \
1001 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1003 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1005 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1007 tcg_temp_free_ptr(statusptr); \
1014 #define VFP_GEN_FTOI(name) \
1015 static inline void gen_vfp_##name(int dp, int neon) \
1017 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1019 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1021 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1023 tcg_temp_free_ptr(statusptr); \
1032 #define VFP_GEN_FIX(name) \
1033 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1035 TCGv tmp_shift = tcg_const_i32(shift); \
1036 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1038 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1040 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1042 tcg_temp_free_i32(tmp_shift); \
1043 tcg_temp_free_ptr(statusptr); \
1055 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1058 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1060 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1063 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1066 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1068 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1072 vfp_reg_offset (int dp
, int reg
)
1075 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1077 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1078 + offsetof(CPU_DoubleU
, l
.upper
);
1080 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1081 + offsetof(CPU_DoubleU
, l
.lower
);
1085 /* Return the offset of a 32-bit piece of a NEON register.
1086 zero is the least significant end of the register. */
1088 neon_reg_offset (int reg
, int n
)
1092 return vfp_reg_offset(0, sreg
);
1095 static TCGv
neon_load_reg(int reg
, int pass
)
1097 TCGv tmp
= tcg_temp_new_i32();
1098 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1102 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1104 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1105 tcg_temp_free_i32(var
);
1108 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1110 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1113 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1115 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1118 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1119 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1120 #define tcg_gen_st_f32 tcg_gen_st_i32
1121 #define tcg_gen_st_f64 tcg_gen_st_i64
1123 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1126 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1128 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1131 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1134 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1136 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1139 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1142 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1144 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1147 #define ARM_CP_RW_BIT (1 << 20)
1149 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1151 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1154 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1156 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1159 static inline TCGv
iwmmxt_load_creg(int reg
)
1161 TCGv var
= tcg_temp_new_i32();
1162 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1166 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1168 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1169 tcg_temp_free_i32(var
);
1172 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1174 iwmmxt_store_reg(cpu_M0
, rn
);
1177 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1179 iwmmxt_load_reg(cpu_M0
, rn
);
1182 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1184 iwmmxt_load_reg(cpu_V1
, rn
);
1185 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1188 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1190 iwmmxt_load_reg(cpu_V1
, rn
);
1191 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1194 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1196 iwmmxt_load_reg(cpu_V1
, rn
);
1197 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1200 #define IWMMXT_OP(name) \
1201 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1203 iwmmxt_load_reg(cpu_V1, rn); \
1204 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1207 #define IWMMXT_OP_ENV(name) \
1208 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1210 iwmmxt_load_reg(cpu_V1, rn); \
1211 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1214 #define IWMMXT_OP_ENV_SIZE(name) \
1215 IWMMXT_OP_ENV(name##b) \
1216 IWMMXT_OP_ENV(name##w) \
1217 IWMMXT_OP_ENV(name##l)
1219 #define IWMMXT_OP_ENV1(name) \
1220 static inline void gen_op_iwmmxt_##name##_M0(void) \
1222 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1236 IWMMXT_OP_ENV_SIZE(unpackl
)
1237 IWMMXT_OP_ENV_SIZE(unpackh
)
1239 IWMMXT_OP_ENV1(unpacklub
)
1240 IWMMXT_OP_ENV1(unpackluw
)
1241 IWMMXT_OP_ENV1(unpacklul
)
1242 IWMMXT_OP_ENV1(unpackhub
)
1243 IWMMXT_OP_ENV1(unpackhuw
)
1244 IWMMXT_OP_ENV1(unpackhul
)
1245 IWMMXT_OP_ENV1(unpacklsb
)
1246 IWMMXT_OP_ENV1(unpacklsw
)
1247 IWMMXT_OP_ENV1(unpacklsl
)
1248 IWMMXT_OP_ENV1(unpackhsb
)
1249 IWMMXT_OP_ENV1(unpackhsw
)
1250 IWMMXT_OP_ENV1(unpackhsl
)
1252 IWMMXT_OP_ENV_SIZE(cmpeq
)
1253 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1254 IWMMXT_OP_ENV_SIZE(cmpgts
)
1256 IWMMXT_OP_ENV_SIZE(mins
)
1257 IWMMXT_OP_ENV_SIZE(minu
)
1258 IWMMXT_OP_ENV_SIZE(maxs
)
1259 IWMMXT_OP_ENV_SIZE(maxu
)
1261 IWMMXT_OP_ENV_SIZE(subn
)
1262 IWMMXT_OP_ENV_SIZE(addn
)
1263 IWMMXT_OP_ENV_SIZE(subu
)
1264 IWMMXT_OP_ENV_SIZE(addu
)
1265 IWMMXT_OP_ENV_SIZE(subs
)
1266 IWMMXT_OP_ENV_SIZE(adds
)
1268 IWMMXT_OP_ENV(avgb0
)
1269 IWMMXT_OP_ENV(avgb1
)
1270 IWMMXT_OP_ENV(avgw0
)
1271 IWMMXT_OP_ENV(avgw1
)
1275 IWMMXT_OP_ENV(packuw
)
1276 IWMMXT_OP_ENV(packul
)
1277 IWMMXT_OP_ENV(packuq
)
1278 IWMMXT_OP_ENV(packsw
)
1279 IWMMXT_OP_ENV(packsl
)
1280 IWMMXT_OP_ENV(packsq
)
1282 static void gen_op_iwmmxt_set_mup(void)
1285 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1286 tcg_gen_ori_i32(tmp
, tmp
, 2);
1287 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1290 static void gen_op_iwmmxt_set_cup(void)
1293 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1294 tcg_gen_ori_i32(tmp
, tmp
, 1);
1295 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1298 static void gen_op_iwmmxt_setpsr_nz(void)
1300 TCGv tmp
= tcg_temp_new_i32();
1301 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1302 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1305 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1307 iwmmxt_load_reg(cpu_V1
, rn
);
1308 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1309 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1312 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1318 rd
= (insn
>> 16) & 0xf;
1319 tmp
= load_reg(s
, rd
);
1321 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1322 if (insn
& (1 << 24)) {
1324 if (insn
& (1 << 23))
1325 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1327 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1328 tcg_gen_mov_i32(dest
, tmp
);
1329 if (insn
& (1 << 21))
1330 store_reg(s
, rd
, tmp
);
1332 tcg_temp_free_i32(tmp
);
1333 } else if (insn
& (1 << 21)) {
1335 tcg_gen_mov_i32(dest
, tmp
);
1336 if (insn
& (1 << 23))
1337 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1339 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1340 store_reg(s
, rd
, tmp
);
1341 } else if (!(insn
& (1 << 23)))
1346 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1348 int rd
= (insn
>> 0) & 0xf;
1351 if (insn
& (1 << 8)) {
1352 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1355 tmp
= iwmmxt_load_creg(rd
);
1358 tmp
= tcg_temp_new_i32();
1359 iwmmxt_load_reg(cpu_V0
, rd
);
1360 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1362 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1363 tcg_gen_mov_i32(dest
, tmp
);
1364 tcg_temp_free_i32(tmp
);
1368 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1369 (ie. an undefined instruction). */
1370 static int disas_iwmmxt_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
1373 int rdhi
, rdlo
, rd0
, rd1
, i
;
1375 TCGv tmp
, tmp2
, tmp3
;
1377 if ((insn
& 0x0e000e00) == 0x0c000000) {
1378 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1380 rdlo
= (insn
>> 12) & 0xf;
1381 rdhi
= (insn
>> 16) & 0xf;
1382 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1383 iwmmxt_load_reg(cpu_V0
, wrd
);
1384 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1385 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1386 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1387 } else { /* TMCRR */
1388 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1389 iwmmxt_store_reg(cpu_V0
, wrd
);
1390 gen_op_iwmmxt_set_mup();
1395 wrd
= (insn
>> 12) & 0xf;
1396 addr
= tcg_temp_new_i32();
1397 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1398 tcg_temp_free_i32(addr
);
1401 if (insn
& ARM_CP_RW_BIT
) {
1402 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1403 tmp
= tcg_temp_new_i32();
1404 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1405 iwmmxt_store_creg(wrd
, tmp
);
1408 if (insn
& (1 << 8)) {
1409 if (insn
& (1 << 22)) { /* WLDRD */
1410 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1412 } else { /* WLDRW wRd */
1413 tmp
= gen_ld32(addr
, IS_USER(s
));
1416 if (insn
& (1 << 22)) { /* WLDRH */
1417 tmp
= gen_ld16u(addr
, IS_USER(s
));
1418 } else { /* WLDRB */
1419 tmp
= gen_ld8u(addr
, IS_USER(s
));
1423 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1424 tcg_temp_free_i32(tmp
);
1426 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1429 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1430 tmp
= iwmmxt_load_creg(wrd
);
1431 gen_st32(tmp
, addr
, IS_USER(s
));
1433 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1434 tmp
= tcg_temp_new_i32();
1435 if (insn
& (1 << 8)) {
1436 if (insn
& (1 << 22)) { /* WSTRD */
1437 tcg_temp_free_i32(tmp
);
1438 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1439 } else { /* WSTRW wRd */
1440 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1441 gen_st32(tmp
, addr
, IS_USER(s
));
1444 if (insn
& (1 << 22)) { /* WSTRH */
1445 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1446 gen_st16(tmp
, addr
, IS_USER(s
));
1447 } else { /* WSTRB */
1448 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1449 gen_st8(tmp
, addr
, IS_USER(s
));
1454 tcg_temp_free_i32(addr
);
1458 if ((insn
& 0x0f000000) != 0x0e000000)
1461 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1462 case 0x000: /* WOR */
1463 wrd
= (insn
>> 12) & 0xf;
1464 rd0
= (insn
>> 0) & 0xf;
1465 rd1
= (insn
>> 16) & 0xf;
1466 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1467 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1468 gen_op_iwmmxt_setpsr_nz();
1469 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1470 gen_op_iwmmxt_set_mup();
1471 gen_op_iwmmxt_set_cup();
1473 case 0x011: /* TMCR */
1476 rd
= (insn
>> 12) & 0xf;
1477 wrd
= (insn
>> 16) & 0xf;
1479 case ARM_IWMMXT_wCID
:
1480 case ARM_IWMMXT_wCASF
:
1482 case ARM_IWMMXT_wCon
:
1483 gen_op_iwmmxt_set_cup();
1485 case ARM_IWMMXT_wCSSF
:
1486 tmp
= iwmmxt_load_creg(wrd
);
1487 tmp2
= load_reg(s
, rd
);
1488 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1489 tcg_temp_free_i32(tmp2
);
1490 iwmmxt_store_creg(wrd
, tmp
);
1492 case ARM_IWMMXT_wCGR0
:
1493 case ARM_IWMMXT_wCGR1
:
1494 case ARM_IWMMXT_wCGR2
:
1495 case ARM_IWMMXT_wCGR3
:
1496 gen_op_iwmmxt_set_cup();
1497 tmp
= load_reg(s
, rd
);
1498 iwmmxt_store_creg(wrd
, tmp
);
1504 case 0x100: /* WXOR */
1505 wrd
= (insn
>> 12) & 0xf;
1506 rd0
= (insn
>> 0) & 0xf;
1507 rd1
= (insn
>> 16) & 0xf;
1508 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1509 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1510 gen_op_iwmmxt_setpsr_nz();
1511 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1512 gen_op_iwmmxt_set_mup();
1513 gen_op_iwmmxt_set_cup();
1515 case 0x111: /* TMRC */
1518 rd
= (insn
>> 12) & 0xf;
1519 wrd
= (insn
>> 16) & 0xf;
1520 tmp
= iwmmxt_load_creg(wrd
);
1521 store_reg(s
, rd
, tmp
);
1523 case 0x300: /* WANDN */
1524 wrd
= (insn
>> 12) & 0xf;
1525 rd0
= (insn
>> 0) & 0xf;
1526 rd1
= (insn
>> 16) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1528 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1529 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1535 case 0x200: /* WAND */
1536 wrd
= (insn
>> 12) & 0xf;
1537 rd0
= (insn
>> 0) & 0xf;
1538 rd1
= (insn
>> 16) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1540 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1541 gen_op_iwmmxt_setpsr_nz();
1542 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1546 case 0x810: case 0xa10: /* WMADD */
1547 wrd
= (insn
>> 12) & 0xf;
1548 rd0
= (insn
>> 0) & 0xf;
1549 rd1
= (insn
>> 16) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1551 if (insn
& (1 << 21))
1552 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1554 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1555 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1556 gen_op_iwmmxt_set_mup();
1558 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1559 wrd
= (insn
>> 12) & 0xf;
1560 rd0
= (insn
>> 16) & 0xf;
1561 rd1
= (insn
>> 0) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1563 switch ((insn
>> 22) & 3) {
1565 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1568 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1571 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1576 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1577 gen_op_iwmmxt_set_mup();
1578 gen_op_iwmmxt_set_cup();
1580 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1581 wrd
= (insn
>> 12) & 0xf;
1582 rd0
= (insn
>> 16) & 0xf;
1583 rd1
= (insn
>> 0) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1585 switch ((insn
>> 22) & 3) {
1587 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1590 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1593 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1598 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1599 gen_op_iwmmxt_set_mup();
1600 gen_op_iwmmxt_set_cup();
1602 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1603 wrd
= (insn
>> 12) & 0xf;
1604 rd0
= (insn
>> 16) & 0xf;
1605 rd1
= (insn
>> 0) & 0xf;
1606 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1607 if (insn
& (1 << 22))
1608 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1610 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1611 if (!(insn
& (1 << 20)))
1612 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1613 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1614 gen_op_iwmmxt_set_mup();
1616 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1617 wrd
= (insn
>> 12) & 0xf;
1618 rd0
= (insn
>> 16) & 0xf;
1619 rd1
= (insn
>> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1621 if (insn
& (1 << 21)) {
1622 if (insn
& (1 << 20))
1623 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1625 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1627 if (insn
& (1 << 20))
1628 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1630 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1632 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1633 gen_op_iwmmxt_set_mup();
1635 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1636 wrd
= (insn
>> 12) & 0xf;
1637 rd0
= (insn
>> 16) & 0xf;
1638 rd1
= (insn
>> 0) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1640 if (insn
& (1 << 21))
1641 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1643 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1644 if (!(insn
& (1 << 20))) {
1645 iwmmxt_load_reg(cpu_V1
, wrd
);
1646 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1648 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1649 gen_op_iwmmxt_set_mup();
1651 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1652 wrd
= (insn
>> 12) & 0xf;
1653 rd0
= (insn
>> 16) & 0xf;
1654 rd1
= (insn
>> 0) & 0xf;
1655 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1656 switch ((insn
>> 22) & 3) {
1658 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1661 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1664 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1669 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1670 gen_op_iwmmxt_set_mup();
1671 gen_op_iwmmxt_set_cup();
1673 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1674 wrd
= (insn
>> 12) & 0xf;
1675 rd0
= (insn
>> 16) & 0xf;
1676 rd1
= (insn
>> 0) & 0xf;
1677 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1678 if (insn
& (1 << 22)) {
1679 if (insn
& (1 << 20))
1680 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1682 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1684 if (insn
& (1 << 20))
1685 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1687 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1693 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1694 wrd
= (insn
>> 12) & 0xf;
1695 rd0
= (insn
>> 16) & 0xf;
1696 rd1
= (insn
>> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1698 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1699 tcg_gen_andi_i32(tmp
, tmp
, 7);
1700 iwmmxt_load_reg(cpu_V1
, rd1
);
1701 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1702 tcg_temp_free_i32(tmp
);
1703 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1704 gen_op_iwmmxt_set_mup();
1706 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1707 if (((insn
>> 6) & 3) == 3)
1709 rd
= (insn
>> 12) & 0xf;
1710 wrd
= (insn
>> 16) & 0xf;
1711 tmp
= load_reg(s
, rd
);
1712 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1713 switch ((insn
>> 6) & 3) {
1715 tmp2
= tcg_const_i32(0xff);
1716 tmp3
= tcg_const_i32((insn
& 7) << 3);
1719 tmp2
= tcg_const_i32(0xffff);
1720 tmp3
= tcg_const_i32((insn
& 3) << 4);
1723 tmp2
= tcg_const_i32(0xffffffff);
1724 tmp3
= tcg_const_i32((insn
& 1) << 5);
1730 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1731 tcg_temp_free(tmp3
);
1732 tcg_temp_free(tmp2
);
1733 tcg_temp_free_i32(tmp
);
1734 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1735 gen_op_iwmmxt_set_mup();
1737 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1738 rd
= (insn
>> 12) & 0xf;
1739 wrd
= (insn
>> 16) & 0xf;
1740 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1742 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1743 tmp
= tcg_temp_new_i32();
1744 switch ((insn
>> 22) & 3) {
1746 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1747 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1749 tcg_gen_ext8s_i32(tmp
, tmp
);
1751 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1755 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1756 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1758 tcg_gen_ext16s_i32(tmp
, tmp
);
1760 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1764 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1765 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1768 store_reg(s
, rd
, tmp
);
1770 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1771 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1773 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1774 switch ((insn
>> 22) & 3) {
1776 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1779 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1782 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1785 tcg_gen_shli_i32(tmp
, tmp
, 28);
1787 tcg_temp_free_i32(tmp
);
1789 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1790 if (((insn
>> 6) & 3) == 3)
1792 rd
= (insn
>> 12) & 0xf;
1793 wrd
= (insn
>> 16) & 0xf;
1794 tmp
= load_reg(s
, rd
);
1795 switch ((insn
>> 6) & 3) {
1797 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1800 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1803 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1806 tcg_temp_free_i32(tmp
);
1807 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1808 gen_op_iwmmxt_set_mup();
1810 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1811 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1813 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1814 tmp2
= tcg_temp_new_i32();
1815 tcg_gen_mov_i32(tmp2
, tmp
);
1816 switch ((insn
>> 22) & 3) {
1818 for (i
= 0; i
< 7; i
++) {
1819 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1820 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1824 for (i
= 0; i
< 3; i
++) {
1825 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1826 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1830 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1831 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1835 tcg_temp_free_i32(tmp2
);
1836 tcg_temp_free_i32(tmp
);
1838 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1839 wrd
= (insn
>> 12) & 0xf;
1840 rd0
= (insn
>> 16) & 0xf;
1841 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1842 switch ((insn
>> 22) & 3) {
1844 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1847 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1850 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1855 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1856 gen_op_iwmmxt_set_mup();
1858 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1859 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1861 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1862 tmp2
= tcg_temp_new_i32();
1863 tcg_gen_mov_i32(tmp2
, tmp
);
1864 switch ((insn
>> 22) & 3) {
1866 for (i
= 0; i
< 7; i
++) {
1867 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1868 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1872 for (i
= 0; i
< 3; i
++) {
1873 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1874 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1878 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1879 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1883 tcg_temp_free_i32(tmp2
);
1884 tcg_temp_free_i32(tmp
);
1886 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1887 rd
= (insn
>> 12) & 0xf;
1888 rd0
= (insn
>> 16) & 0xf;
1889 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1891 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1892 tmp
= tcg_temp_new_i32();
1893 switch ((insn
>> 22) & 3) {
1895 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1898 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1901 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1904 store_reg(s
, rd
, tmp
);
1906 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1907 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1908 wrd
= (insn
>> 12) & 0xf;
1909 rd0
= (insn
>> 16) & 0xf;
1910 rd1
= (insn
>> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1912 switch ((insn
>> 22) & 3) {
1914 if (insn
& (1 << 21))
1915 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1917 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1920 if (insn
& (1 << 21))
1921 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1923 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1926 if (insn
& (1 << 21))
1927 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1929 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1934 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1935 gen_op_iwmmxt_set_mup();
1936 gen_op_iwmmxt_set_cup();
1938 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1939 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1940 wrd
= (insn
>> 12) & 0xf;
1941 rd0
= (insn
>> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1943 switch ((insn
>> 22) & 3) {
1945 if (insn
& (1 << 21))
1946 gen_op_iwmmxt_unpacklsb_M0();
1948 gen_op_iwmmxt_unpacklub_M0();
1951 if (insn
& (1 << 21))
1952 gen_op_iwmmxt_unpacklsw_M0();
1954 gen_op_iwmmxt_unpackluw_M0();
1957 if (insn
& (1 << 21))
1958 gen_op_iwmmxt_unpacklsl_M0();
1960 gen_op_iwmmxt_unpacklul_M0();
1965 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1966 gen_op_iwmmxt_set_mup();
1967 gen_op_iwmmxt_set_cup();
1969 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1970 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1971 wrd
= (insn
>> 12) & 0xf;
1972 rd0
= (insn
>> 16) & 0xf;
1973 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1974 switch ((insn
>> 22) & 3) {
1976 if (insn
& (1 << 21))
1977 gen_op_iwmmxt_unpackhsb_M0();
1979 gen_op_iwmmxt_unpackhub_M0();
1982 if (insn
& (1 << 21))
1983 gen_op_iwmmxt_unpackhsw_M0();
1985 gen_op_iwmmxt_unpackhuw_M0();
1988 if (insn
& (1 << 21))
1989 gen_op_iwmmxt_unpackhsl_M0();
1991 gen_op_iwmmxt_unpackhul_M0();
1996 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
2000 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2001 case 0x214: case 0x614: case 0xa14: case 0xe14:
2002 if (((insn
>> 22) & 3) == 0)
2004 wrd
= (insn
>> 12) & 0xf;
2005 rd0
= (insn
>> 16) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2007 tmp
= tcg_temp_new_i32();
2008 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2009 tcg_temp_free_i32(tmp
);
2012 switch ((insn
>> 22) & 3) {
2014 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2017 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2020 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2023 tcg_temp_free_i32(tmp
);
2024 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2025 gen_op_iwmmxt_set_mup();
2026 gen_op_iwmmxt_set_cup();
2028 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2029 case 0x014: case 0x414: case 0x814: case 0xc14:
2030 if (((insn
>> 22) & 3) == 0)
2032 wrd
= (insn
>> 12) & 0xf;
2033 rd0
= (insn
>> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2035 tmp
= tcg_temp_new_i32();
2036 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2037 tcg_temp_free_i32(tmp
);
2040 switch ((insn
>> 22) & 3) {
2042 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2045 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2048 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2051 tcg_temp_free_i32(tmp
);
2052 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2053 gen_op_iwmmxt_set_mup();
2054 gen_op_iwmmxt_set_cup();
2056 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2057 case 0x114: case 0x514: case 0x914: case 0xd14:
2058 if (((insn
>> 22) & 3) == 0)
2060 wrd
= (insn
>> 12) & 0xf;
2061 rd0
= (insn
>> 16) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2063 tmp
= tcg_temp_new_i32();
2064 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2065 tcg_temp_free_i32(tmp
);
2068 switch ((insn
>> 22) & 3) {
2070 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2073 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2076 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2079 tcg_temp_free_i32(tmp
);
2080 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2084 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2085 case 0x314: case 0x714: case 0xb14: case 0xf14:
2086 if (((insn
>> 22) & 3) == 0)
2088 wrd
= (insn
>> 12) & 0xf;
2089 rd0
= (insn
>> 16) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2091 tmp
= tcg_temp_new_i32();
2092 switch ((insn
>> 22) & 3) {
2094 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2095 tcg_temp_free_i32(tmp
);
2098 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2101 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2102 tcg_temp_free_i32(tmp
);
2105 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2108 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2109 tcg_temp_free_i32(tmp
);
2112 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2115 tcg_temp_free_i32(tmp
);
2116 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2120 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2121 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2122 wrd
= (insn
>> 12) & 0xf;
2123 rd0
= (insn
>> 16) & 0xf;
2124 rd1
= (insn
>> 0) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2126 switch ((insn
>> 22) & 3) {
2128 if (insn
& (1 << 21))
2129 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2131 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2134 if (insn
& (1 << 21))
2135 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2137 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2140 if (insn
& (1 << 21))
2141 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2143 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2148 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2149 gen_op_iwmmxt_set_mup();
2151 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2152 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2153 wrd
= (insn
>> 12) & 0xf;
2154 rd0
= (insn
>> 16) & 0xf;
2155 rd1
= (insn
>> 0) & 0xf;
2156 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2157 switch ((insn
>> 22) & 3) {
2159 if (insn
& (1 << 21))
2160 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2162 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2165 if (insn
& (1 << 21))
2166 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2168 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2171 if (insn
& (1 << 21))
2172 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2174 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2179 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2180 gen_op_iwmmxt_set_mup();
2182 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2183 case 0x402: case 0x502: case 0x602: case 0x702:
2184 wrd
= (insn
>> 12) & 0xf;
2185 rd0
= (insn
>> 16) & 0xf;
2186 rd1
= (insn
>> 0) & 0xf;
2187 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2188 tmp
= tcg_const_i32((insn
>> 20) & 3);
2189 iwmmxt_load_reg(cpu_V1
, rd1
);
2190 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2192 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2193 gen_op_iwmmxt_set_mup();
2195 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2196 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2197 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2198 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2199 wrd
= (insn
>> 12) & 0xf;
2200 rd0
= (insn
>> 16) & 0xf;
2201 rd1
= (insn
>> 0) & 0xf;
2202 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2203 switch ((insn
>> 20) & 0xf) {
2205 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2208 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2211 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2214 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2217 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2220 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2223 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2226 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2229 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2234 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2235 gen_op_iwmmxt_set_mup();
2236 gen_op_iwmmxt_set_cup();
2238 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2239 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2240 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2241 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2242 wrd
= (insn
>> 12) & 0xf;
2243 rd0
= (insn
>> 16) & 0xf;
2244 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2245 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2246 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2248 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2249 gen_op_iwmmxt_set_mup();
2250 gen_op_iwmmxt_set_cup();
2252 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2253 case 0x418: case 0x518: case 0x618: case 0x718:
2254 case 0x818: case 0x918: case 0xa18: case 0xb18:
2255 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2256 wrd
= (insn
>> 12) & 0xf;
2257 rd0
= (insn
>> 16) & 0xf;
2258 rd1
= (insn
>> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2260 switch ((insn
>> 20) & 0xf) {
2262 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2265 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2268 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2271 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2274 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2277 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2280 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2283 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2286 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2295 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2296 case 0x408: case 0x508: case 0x608: case 0x708:
2297 case 0x808: case 0x908: case 0xa08: case 0xb08:
2298 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2299 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2301 wrd
= (insn
>> 12) & 0xf;
2302 rd0
= (insn
>> 16) & 0xf;
2303 rd1
= (insn
>> 0) & 0xf;
2304 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2305 switch ((insn
>> 22) & 3) {
2307 if (insn
& (1 << 21))
2308 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2310 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2313 if (insn
& (1 << 21))
2314 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2316 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2319 if (insn
& (1 << 21))
2320 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2322 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2325 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2329 case 0x201: case 0x203: case 0x205: case 0x207:
2330 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2331 case 0x211: case 0x213: case 0x215: case 0x217:
2332 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2333 wrd
= (insn
>> 5) & 0xf;
2334 rd0
= (insn
>> 12) & 0xf;
2335 rd1
= (insn
>> 0) & 0xf;
2336 if (rd0
== 0xf || rd1
== 0xf)
2338 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2339 tmp
= load_reg(s
, rd0
);
2340 tmp2
= load_reg(s
, rd1
);
2341 switch ((insn
>> 16) & 0xf) {
2342 case 0x0: /* TMIA */
2343 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2345 case 0x8: /* TMIAPH */
2346 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2348 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2349 if (insn
& (1 << 16))
2350 tcg_gen_shri_i32(tmp
, tmp
, 16);
2351 if (insn
& (1 << 17))
2352 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2353 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2356 tcg_temp_free_i32(tmp2
);
2357 tcg_temp_free_i32(tmp
);
2360 tcg_temp_free_i32(tmp2
);
2361 tcg_temp_free_i32(tmp
);
2362 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2363 gen_op_iwmmxt_set_mup();
2372 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2373 (ie. an undefined instruction). */
2374 static int disas_dsp_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2376 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2379 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2380 /* Multiply with Internal Accumulate Format */
2381 rd0
= (insn
>> 12) & 0xf;
2383 acc
= (insn
>> 5) & 7;
2388 tmp
= load_reg(s
, rd0
);
2389 tmp2
= load_reg(s
, rd1
);
2390 switch ((insn
>> 16) & 0xf) {
2392 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2394 case 0x8: /* MIAPH */
2395 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2397 case 0xc: /* MIABB */
2398 case 0xd: /* MIABT */
2399 case 0xe: /* MIATB */
2400 case 0xf: /* MIATT */
2401 if (insn
& (1 << 16))
2402 tcg_gen_shri_i32(tmp
, tmp
, 16);
2403 if (insn
& (1 << 17))
2404 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2405 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2410 tcg_temp_free_i32(tmp2
);
2411 tcg_temp_free_i32(tmp
);
2413 gen_op_iwmmxt_movq_wRn_M0(acc
);
2417 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2418 /* Internal Accumulator Access Format */
2419 rdhi
= (insn
>> 16) & 0xf;
2420 rdlo
= (insn
>> 12) & 0xf;
2426 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2427 iwmmxt_load_reg(cpu_V0
, acc
);
2428 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2429 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2430 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2431 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2433 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2434 iwmmxt_store_reg(cpu_V0
, acc
);
2442 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2443 #define VFP_SREG(insn, bigbit, smallbit) \
2444 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2445 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2446 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2447 reg = (((insn) >> (bigbit)) & 0x0f) \
2448 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2450 if (insn & (1 << (smallbit))) \
2452 reg = ((insn) >> (bigbit)) & 0x0f; \
2455 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2456 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2457 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2458 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2459 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2460 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2462 /* Move between integer and VFP cores. */
2463 static TCGv
gen_vfp_mrs(void)
2465 TCGv tmp
= tcg_temp_new_i32();
2466 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2470 static void gen_vfp_msr(TCGv tmp
)
2472 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2473 tcg_temp_free_i32(tmp
);
2476 static void gen_neon_dup_u8(TCGv var
, int shift
)
2478 TCGv tmp
= tcg_temp_new_i32();
2480 tcg_gen_shri_i32(var
, var
, shift
);
2481 tcg_gen_ext8u_i32(var
, var
);
2482 tcg_gen_shli_i32(tmp
, var
, 8);
2483 tcg_gen_or_i32(var
, var
, tmp
);
2484 tcg_gen_shli_i32(tmp
, var
, 16);
2485 tcg_gen_or_i32(var
, var
, tmp
);
2486 tcg_temp_free_i32(tmp
);
2489 static void gen_neon_dup_low16(TCGv var
)
2491 TCGv tmp
= tcg_temp_new_i32();
2492 tcg_gen_ext16u_i32(var
, var
);
2493 tcg_gen_shli_i32(tmp
, var
, 16);
2494 tcg_gen_or_i32(var
, var
, tmp
);
2495 tcg_temp_free_i32(tmp
);
2498 static void gen_neon_dup_high16(TCGv var
)
2500 TCGv tmp
= tcg_temp_new_i32();
2501 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2502 tcg_gen_shri_i32(tmp
, var
, 16);
2503 tcg_gen_or_i32(var
, var
, tmp
);
2504 tcg_temp_free_i32(tmp
);
2507 static TCGv
gen_load_and_replicate(DisasContext
*s
, TCGv addr
, int size
)
2509 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2513 tmp
= gen_ld8u(addr
, IS_USER(s
));
2514 gen_neon_dup_u8(tmp
, 0);
2517 tmp
= gen_ld16u(addr
, IS_USER(s
));
2518 gen_neon_dup_low16(tmp
);
2521 tmp
= gen_ld32(addr
, IS_USER(s
));
2523 default: /* Avoid compiler warnings. */
2529 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2530 (ie. an undefined instruction). */
2531 static int disas_vfp_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
2533 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2539 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2542 if (!s
->vfp_enabled
) {
2543 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2544 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2546 rn
= (insn
>> 16) & 0xf;
2547 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2548 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2551 dp
= ((insn
& 0xf00) == 0xb00);
2552 switch ((insn
>> 24) & 0xf) {
2554 if (insn
& (1 << 4)) {
2555 /* single register transfer */
2556 rd
= (insn
>> 12) & 0xf;
2561 VFP_DREG_N(rn
, insn
);
2564 if (insn
& 0x00c00060
2565 && !arm_feature(env
, ARM_FEATURE_NEON
))
2568 pass
= (insn
>> 21) & 1;
2569 if (insn
& (1 << 22)) {
2571 offset
= ((insn
>> 5) & 3) * 8;
2572 } else if (insn
& (1 << 5)) {
2574 offset
= (insn
& (1 << 6)) ? 16 : 0;
2579 if (insn
& ARM_CP_RW_BIT
) {
2581 tmp
= neon_load_reg(rn
, pass
);
2585 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2586 if (insn
& (1 << 23))
2592 if (insn
& (1 << 23)) {
2594 tcg_gen_shri_i32(tmp
, tmp
, 16);
2600 tcg_gen_sari_i32(tmp
, tmp
, 16);
2609 store_reg(s
, rd
, tmp
);
2612 tmp
= load_reg(s
, rd
);
2613 if (insn
& (1 << 23)) {
2616 gen_neon_dup_u8(tmp
, 0);
2617 } else if (size
== 1) {
2618 gen_neon_dup_low16(tmp
);
2620 for (n
= 0; n
<= pass
* 2; n
++) {
2621 tmp2
= tcg_temp_new_i32();
2622 tcg_gen_mov_i32(tmp2
, tmp
);
2623 neon_store_reg(rn
, n
, tmp2
);
2625 neon_store_reg(rn
, n
, tmp
);
2630 tmp2
= neon_load_reg(rn
, pass
);
2631 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2632 tcg_temp_free_i32(tmp2
);
2635 tmp2
= neon_load_reg(rn
, pass
);
2636 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2637 tcg_temp_free_i32(tmp2
);
2642 neon_store_reg(rn
, pass
, tmp
);
2646 if ((insn
& 0x6f) != 0x00)
2648 rn
= VFP_SREG_N(insn
);
2649 if (insn
& ARM_CP_RW_BIT
) {
2651 if (insn
& (1 << 21)) {
2652 /* system register */
2657 /* VFP2 allows access to FSID from userspace.
2658 VFP3 restricts all id registers to privileged
2661 && arm_feature(env
, ARM_FEATURE_VFP3
))
2663 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2668 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2670 case ARM_VFP_FPINST
:
2671 case ARM_VFP_FPINST2
:
2672 /* Not present in VFP3. */
2674 || arm_feature(env
, ARM_FEATURE_VFP3
))
2676 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2680 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2681 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2683 tmp
= tcg_temp_new_i32();
2684 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2690 || !arm_feature(env
, ARM_FEATURE_MVFR
))
2692 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2698 gen_mov_F0_vreg(0, rn
);
2699 tmp
= gen_vfp_mrs();
2702 /* Set the 4 flag bits in the CPSR. */
2704 tcg_temp_free_i32(tmp
);
2706 store_reg(s
, rd
, tmp
);
2710 tmp
= load_reg(s
, rd
);
2711 if (insn
& (1 << 21)) {
2713 /* system register */
2718 /* Writes are ignored. */
2721 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2722 tcg_temp_free_i32(tmp
);
2728 /* TODO: VFP subarchitecture support.
2729 * For now, keep the EN bit only */
2730 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2731 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2734 case ARM_VFP_FPINST
:
2735 case ARM_VFP_FPINST2
:
2736 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2743 gen_mov_vreg_F0(0, rn
);
2748 /* data processing */
2749 /* The opcode is in bits 23, 21, 20 and 6. */
2750 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2754 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2756 /* rn is register number */
2757 VFP_DREG_N(rn
, insn
);
2760 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2761 /* Integer or single precision destination. */
2762 rd
= VFP_SREG_D(insn
);
2764 VFP_DREG_D(rd
, insn
);
2767 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2768 /* VCVT from int is always from S reg regardless of dp bit.
2769 * VCVT with immediate frac_bits has same format as SREG_M
2771 rm
= VFP_SREG_M(insn
);
2773 VFP_DREG_M(rm
, insn
);
2776 rn
= VFP_SREG_N(insn
);
2777 if (op
== 15 && rn
== 15) {
2778 /* Double precision destination. */
2779 VFP_DREG_D(rd
, insn
);
2781 rd
= VFP_SREG_D(insn
);
2783 /* NB that we implicitly rely on the encoding for the frac_bits
2784 * in VCVT of fixed to float being the same as that of an SREG_M
2786 rm
= VFP_SREG_M(insn
);
2789 veclen
= s
->vec_len
;
2790 if (op
== 15 && rn
> 3)
2793 /* Shut up compiler warnings. */
2804 /* Figure out what type of vector operation this is. */
2805 if ((rd
& bank_mask
) == 0) {
2810 delta_d
= (s
->vec_stride
>> 1) + 1;
2812 delta_d
= s
->vec_stride
+ 1;
2814 if ((rm
& bank_mask
) == 0) {
2815 /* mixed scalar/vector */
2824 /* Load the initial operands. */
2829 /* Integer source */
2830 gen_mov_F0_vreg(0, rm
);
2835 gen_mov_F0_vreg(dp
, rd
);
2836 gen_mov_F1_vreg(dp
, rm
);
2840 /* Compare with zero */
2841 gen_mov_F0_vreg(dp
, rd
);
2852 /* Source and destination the same. */
2853 gen_mov_F0_vreg(dp
, rd
);
2859 /* VCVTB, VCVTT: only present with the halfprec extension,
2860 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2862 if (dp
|| !arm_feature(env
, ARM_FEATURE_VFP_FP16
)) {
2865 /* Otherwise fall through */
2867 /* One source operand. */
2868 gen_mov_F0_vreg(dp
, rm
);
2872 /* Two source operands. */
2873 gen_mov_F0_vreg(dp
, rn
);
2874 gen_mov_F1_vreg(dp
, rm
);
2878 /* Perform the calculation. */
2880 case 0: /* VMLA: fd + (fn * fm) */
2881 /* Note that order of inputs to the add matters for NaNs */
2883 gen_mov_F0_vreg(dp
, rd
);
2886 case 1: /* VMLS: fd + -(fn * fm) */
2889 gen_mov_F0_vreg(dp
, rd
);
2892 case 2: /* VNMLS: -fd + (fn * fm) */
2893 /* Note that it isn't valid to replace (-A + B) with (B - A)
2894 * or similar plausible looking simplifications
2895 * because this will give wrong results for NaNs.
2898 gen_mov_F0_vreg(dp
, rd
);
2902 case 3: /* VNMLA: -fd + -(fn * fm) */
2905 gen_mov_F0_vreg(dp
, rd
);
2909 case 4: /* mul: fn * fm */
2912 case 5: /* nmul: -(fn * fm) */
2916 case 6: /* add: fn + fm */
2919 case 7: /* sub: fn - fm */
2922 case 8: /* div: fn / fm */
2925 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2926 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2927 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2928 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2929 /* These are fused multiply-add, and must be done as one
2930 * floating point operation with no rounding between the
2931 * multiplication and addition steps.
2932 * NB that doing the negations here as separate steps is
2933 * correct : an input NaN should come out with its sign bit
2934 * flipped if it is a negated-input.
2936 if (!arm_feature(env
, ARM_FEATURE_VFP4
)) {
2944 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
2946 frd
= tcg_temp_new_i64();
2947 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
2950 gen_helper_vfp_negd(frd
, frd
);
2952 fpst
= get_fpstatus_ptr(0);
2953 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
2954 cpu_F1d
, frd
, fpst
);
2955 tcg_temp_free_ptr(fpst
);
2956 tcg_temp_free_i64(frd
);
2962 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
2964 frd
= tcg_temp_new_i32();
2965 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
2967 gen_helper_vfp_negs(frd
, frd
);
2969 fpst
= get_fpstatus_ptr(0);
2970 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
2971 cpu_F1s
, frd
, fpst
);
2972 tcg_temp_free_ptr(fpst
);
2973 tcg_temp_free_i32(frd
);
2976 case 14: /* fconst */
2977 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2980 n
= (insn
<< 12) & 0x80000000;
2981 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
2988 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
2995 tcg_gen_movi_i32(cpu_F0s
, n
);
2998 case 15: /* extension space */
3012 case 4: /* vcvtb.f32.f16 */
3013 tmp
= gen_vfp_mrs();
3014 tcg_gen_ext16u_i32(tmp
, tmp
);
3015 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3016 tcg_temp_free_i32(tmp
);
3018 case 5: /* vcvtt.f32.f16 */
3019 tmp
= gen_vfp_mrs();
3020 tcg_gen_shri_i32(tmp
, tmp
, 16);
3021 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3022 tcg_temp_free_i32(tmp
);
3024 case 6: /* vcvtb.f16.f32 */
3025 tmp
= tcg_temp_new_i32();
3026 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3027 gen_mov_F0_vreg(0, rd
);
3028 tmp2
= gen_vfp_mrs();
3029 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3030 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3031 tcg_temp_free_i32(tmp2
);
3034 case 7: /* vcvtt.f16.f32 */
3035 tmp
= tcg_temp_new_i32();
3036 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3037 tcg_gen_shli_i32(tmp
, tmp
, 16);
3038 gen_mov_F0_vreg(0, rd
);
3039 tmp2
= gen_vfp_mrs();
3040 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3041 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3042 tcg_temp_free_i32(tmp2
);
3054 case 11: /* cmpez */
3058 case 15: /* single<->double conversion */
3060 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3062 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3064 case 16: /* fuito */
3065 gen_vfp_uito(dp
, 0);
3067 case 17: /* fsito */
3068 gen_vfp_sito(dp
, 0);
3070 case 20: /* fshto */
3071 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3073 gen_vfp_shto(dp
, 16 - rm
, 0);
3075 case 21: /* fslto */
3076 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3078 gen_vfp_slto(dp
, 32 - rm
, 0);
3080 case 22: /* fuhto */
3081 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3083 gen_vfp_uhto(dp
, 16 - rm
, 0);
3085 case 23: /* fulto */
3086 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3088 gen_vfp_ulto(dp
, 32 - rm
, 0);
3090 case 24: /* ftoui */
3091 gen_vfp_toui(dp
, 0);
3093 case 25: /* ftouiz */
3094 gen_vfp_touiz(dp
, 0);
3096 case 26: /* ftosi */
3097 gen_vfp_tosi(dp
, 0);
3099 case 27: /* ftosiz */
3100 gen_vfp_tosiz(dp
, 0);
3102 case 28: /* ftosh */
3103 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3105 gen_vfp_tosh(dp
, 16 - rm
, 0);
3107 case 29: /* ftosl */
3108 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3110 gen_vfp_tosl(dp
, 32 - rm
, 0);
3112 case 30: /* ftouh */
3113 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3115 gen_vfp_touh(dp
, 16 - rm
, 0);
3117 case 31: /* ftoul */
3118 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3120 gen_vfp_toul(dp
, 32 - rm
, 0);
3122 default: /* undefined */
3126 default: /* undefined */
3130 /* Write back the result. */
3131 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3132 ; /* Comparison, do nothing. */
3133 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3134 /* VCVT double to int: always integer result. */
3135 gen_mov_vreg_F0(0, rd
);
3136 else if (op
== 15 && rn
== 15)
3138 gen_mov_vreg_F0(!dp
, rd
);
3140 gen_mov_vreg_F0(dp
, rd
);
3142 /* break out of the loop if we have finished */
3146 if (op
== 15 && delta_m
== 0) {
3147 /* single source one-many */
3149 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3151 gen_mov_vreg_F0(dp
, rd
);
3155 /* Setup the next operands. */
3157 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3161 /* One source operand. */
3162 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3164 gen_mov_F0_vreg(dp
, rm
);
3166 /* Two source operands. */
3167 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3169 gen_mov_F0_vreg(dp
, rn
);
3171 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3173 gen_mov_F1_vreg(dp
, rm
);
3181 if ((insn
& 0x03e00000) == 0x00400000) {
3182 /* two-register transfer */
3183 rn
= (insn
>> 16) & 0xf;
3184 rd
= (insn
>> 12) & 0xf;
3186 VFP_DREG_M(rm
, insn
);
3188 rm
= VFP_SREG_M(insn
);
3191 if (insn
& ARM_CP_RW_BIT
) {
3194 gen_mov_F0_vreg(0, rm
* 2);
3195 tmp
= gen_vfp_mrs();
3196 store_reg(s
, rd
, tmp
);
3197 gen_mov_F0_vreg(0, rm
* 2 + 1);
3198 tmp
= gen_vfp_mrs();
3199 store_reg(s
, rn
, tmp
);
3201 gen_mov_F0_vreg(0, rm
);
3202 tmp
= gen_vfp_mrs();
3203 store_reg(s
, rd
, tmp
);
3204 gen_mov_F0_vreg(0, rm
+ 1);
3205 tmp
= gen_vfp_mrs();
3206 store_reg(s
, rn
, tmp
);
3211 tmp
= load_reg(s
, rd
);
3213 gen_mov_vreg_F0(0, rm
* 2);
3214 tmp
= load_reg(s
, rn
);
3216 gen_mov_vreg_F0(0, rm
* 2 + 1);
3218 tmp
= load_reg(s
, rd
);
3220 gen_mov_vreg_F0(0, rm
);
3221 tmp
= load_reg(s
, rn
);
3223 gen_mov_vreg_F0(0, rm
+ 1);
3228 rn
= (insn
>> 16) & 0xf;
3230 VFP_DREG_D(rd
, insn
);
3232 rd
= VFP_SREG_D(insn
);
3233 if ((insn
& 0x01200000) == 0x01000000) {
3234 /* Single load/store */
3235 offset
= (insn
& 0xff) << 2;
3236 if ((insn
& (1 << 23)) == 0)
3238 if (s
->thumb
&& rn
== 15) {
3239 /* This is actually UNPREDICTABLE */
3240 addr
= tcg_temp_new_i32();
3241 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3243 addr
= load_reg(s
, rn
);
3245 tcg_gen_addi_i32(addr
, addr
, offset
);
3246 if (insn
& (1 << 20)) {
3247 gen_vfp_ld(s
, dp
, addr
);
3248 gen_mov_vreg_F0(dp
, rd
);
3250 gen_mov_F0_vreg(dp
, rd
);
3251 gen_vfp_st(s
, dp
, addr
);
3253 tcg_temp_free_i32(addr
);
3255 /* load/store multiple */
3256 int w
= insn
& (1 << 21);
3258 n
= (insn
>> 1) & 0x7f;
3262 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
3263 /* P == U , W == 1 => UNDEF */
3266 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
3267 /* UNPREDICTABLE cases for bad immediates: we choose to
3268 * UNDEF to avoid generating huge numbers of TCG ops
3272 if (rn
== 15 && w
) {
3273 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3277 if (s
->thumb
&& rn
== 15) {
3278 /* This is actually UNPREDICTABLE */
3279 addr
= tcg_temp_new_i32();
3280 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3282 addr
= load_reg(s
, rn
);
3284 if (insn
& (1 << 24)) /* pre-decrement */
3285 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3291 for (i
= 0; i
< n
; i
++) {
3292 if (insn
& ARM_CP_RW_BIT
) {
3294 gen_vfp_ld(s
, dp
, addr
);
3295 gen_mov_vreg_F0(dp
, rd
+ i
);
3298 gen_mov_F0_vreg(dp
, rd
+ i
);
3299 gen_vfp_st(s
, dp
, addr
);
3301 tcg_gen_addi_i32(addr
, addr
, offset
);
3305 if (insn
& (1 << 24))
3306 offset
= -offset
* n
;
3307 else if (dp
&& (insn
& 1))
3313 tcg_gen_addi_i32(addr
, addr
, offset
);
3314 store_reg(s
, rn
, addr
);
3316 tcg_temp_free_i32(addr
);
3322 /* Should never happen. */
3328 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3330 TranslationBlock
*tb
;
3333 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3335 gen_set_pc_im(dest
);
3336 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
3338 gen_set_pc_im(dest
);
3343 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3345 if (unlikely(s
->singlestep_enabled
)) {
3346 /* An indirect jump so that we still trigger the debug exception. */
3351 gen_goto_tb(s
, 0, dest
);
3352 s
->is_jmp
= DISAS_TB_JUMP
;
3356 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3359 tcg_gen_sari_i32(t0
, t0
, 16);
3363 tcg_gen_sari_i32(t1
, t1
, 16);
3366 tcg_gen_mul_i32(t0
, t0
, t1
);
3369 /* Return the mask of PSR bits set by a MSR instruction. */
3370 static uint32_t msr_mask(CPUARMState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3374 if (flags
& (1 << 0))
3376 if (flags
& (1 << 1))
3378 if (flags
& (1 << 2))
3380 if (flags
& (1 << 3))
3383 /* Mask out undefined bits. */
3384 mask
&= ~CPSR_RESERVED
;
3385 if (!arm_feature(env
, ARM_FEATURE_V4T
))
3387 if (!arm_feature(env
, ARM_FEATURE_V5
))
3388 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3389 if (!arm_feature(env
, ARM_FEATURE_V6
))
3390 mask
&= ~(CPSR_E
| CPSR_GE
);
3391 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3393 /* Mask out execution state bits. */
3396 /* Mask out privileged bits. */
3402 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3403 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3407 /* ??? This is also undefined in system mode. */
3411 tmp
= load_cpu_field(spsr
);
3412 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3413 tcg_gen_andi_i32(t0
, t0
, mask
);
3414 tcg_gen_or_i32(tmp
, tmp
, t0
);
3415 store_cpu_field(tmp
, spsr
);
3417 gen_set_cpsr(t0
, mask
);
3419 tcg_temp_free_i32(t0
);
3424 /* Returns nonzero if access to the PSR is not permitted. */
3425 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3428 tmp
= tcg_temp_new_i32();
3429 tcg_gen_movi_i32(tmp
, val
);
3430 return gen_set_psr(s
, mask
, spsr
, tmp
);
3433 /* Generate an old-style exception return. Marks pc as dead. */
3434 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3437 store_reg(s
, 15, pc
);
3438 tmp
= load_cpu_field(spsr
);
3439 gen_set_cpsr(tmp
, 0xffffffff);
3440 tcg_temp_free_i32(tmp
);
3441 s
->is_jmp
= DISAS_UPDATE
;
3444 /* Generate a v6 exception return. Marks both values as dead. */
3445 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3447 gen_set_cpsr(cpsr
, 0xffffffff);
3448 tcg_temp_free_i32(cpsr
);
3449 store_reg(s
, 15, pc
);
3450 s
->is_jmp
= DISAS_UPDATE
;
3454 gen_set_condexec (DisasContext
*s
)
3456 if (s
->condexec_mask
) {
3457 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3458 TCGv tmp
= tcg_temp_new_i32();
3459 tcg_gen_movi_i32(tmp
, val
);
3460 store_cpu_field(tmp
, condexec_bits
);
3464 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3466 gen_set_condexec(s
);
3467 gen_set_pc_im(s
->pc
- offset
);
3468 gen_exception(excp
);
3469 s
->is_jmp
= DISAS_JUMP
;
3472 static void gen_nop_hint(DisasContext
*s
, int val
)
3476 gen_set_pc_im(s
->pc
);
3477 s
->is_jmp
= DISAS_WFI
;
3481 /* TODO: Implement SEV and WFE. May help SMP performance. */
3487 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3489 static inline void gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3492 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3493 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3494 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3499 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3502 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3503 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3504 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3509 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3510 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3511 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3512 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3513 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3515 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3516 switch ((size << 1) | u) { \
3518 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3521 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3524 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3527 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3530 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3533 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3535 default: return 1; \
3538 #define GEN_NEON_INTEGER_OP(name) do { \
3539 switch ((size << 1) | u) { \
3541 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3544 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3547 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3550 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3553 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3556 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3558 default: return 1; \
3561 static TCGv
neon_load_scratch(int scratch
)
3563 TCGv tmp
= tcg_temp_new_i32();
3564 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3568 static void neon_store_scratch(int scratch
, TCGv var
)
3570 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3571 tcg_temp_free_i32(var
);
3574 static inline TCGv
neon_get_scalar(int size
, int reg
)
3578 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3580 gen_neon_dup_high16(tmp
);
3582 gen_neon_dup_low16(tmp
);
3585 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3590 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3593 if (!q
&& size
== 2) {
3596 tmp
= tcg_const_i32(rd
);
3597 tmp2
= tcg_const_i32(rm
);
3601 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
3604 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
3607 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
3615 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
3618 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
3624 tcg_temp_free_i32(tmp
);
3625 tcg_temp_free_i32(tmp2
);
3629 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3632 if (!q
&& size
== 2) {
3635 tmp
= tcg_const_i32(rd
);
3636 tmp2
= tcg_const_i32(rm
);
3640 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
3643 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
3646 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
3654 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
3657 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
3663 tcg_temp_free_i32(tmp
);
3664 tcg_temp_free_i32(tmp2
);
3668 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3672 rd
= tcg_temp_new_i32();
3673 tmp
= tcg_temp_new_i32();
3675 tcg_gen_shli_i32(rd
, t0
, 8);
3676 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3677 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3678 tcg_gen_or_i32(rd
, rd
, tmp
);
3680 tcg_gen_shri_i32(t1
, t1
, 8);
3681 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3682 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3683 tcg_gen_or_i32(t1
, t1
, tmp
);
3684 tcg_gen_mov_i32(t0
, rd
);
3686 tcg_temp_free_i32(tmp
);
3687 tcg_temp_free_i32(rd
);
3690 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3694 rd
= tcg_temp_new_i32();
3695 tmp
= tcg_temp_new_i32();
3697 tcg_gen_shli_i32(rd
, t0
, 16);
3698 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3699 tcg_gen_or_i32(rd
, rd
, tmp
);
3700 tcg_gen_shri_i32(t1
, t1
, 16);
3701 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3702 tcg_gen_or_i32(t1
, t1
, tmp
);
3703 tcg_gen_mov_i32(t0
, rd
);
3705 tcg_temp_free_i32(tmp
);
3706 tcg_temp_free_i32(rd
);
3714 } neon_ls_element_type
[11] = {
3728 /* Translate a NEON load/store element instruction. Return nonzero if the
3729 instruction is invalid. */
3730 static int disas_neon_ls_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
3749 if (!s
->vfp_enabled
)
3751 VFP_DREG_D(rd
, insn
);
3752 rn
= (insn
>> 16) & 0xf;
3754 load
= (insn
& (1 << 21)) != 0;
3755 if ((insn
& (1 << 23)) == 0) {
3756 /* Load store all elements. */
3757 op
= (insn
>> 8) & 0xf;
3758 size
= (insn
>> 6) & 3;
3761 /* Catch UNDEF cases for bad values of align field */
3764 if (((insn
>> 5) & 1) == 1) {
3769 if (((insn
>> 4) & 3) == 3) {
3776 nregs
= neon_ls_element_type
[op
].nregs
;
3777 interleave
= neon_ls_element_type
[op
].interleave
;
3778 spacing
= neon_ls_element_type
[op
].spacing
;
3779 if (size
== 3 && (interleave
| spacing
) != 1)
3781 addr
= tcg_temp_new_i32();
3782 load_reg_var(s
, addr
, rn
);
3783 stride
= (1 << size
) * interleave
;
3784 for (reg
= 0; reg
< nregs
; reg
++) {
3785 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3786 load_reg_var(s
, addr
, rn
);
3787 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3788 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3789 load_reg_var(s
, addr
, rn
);
3790 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3794 tmp64
= gen_ld64(addr
, IS_USER(s
));
3795 neon_store_reg64(tmp64
, rd
);
3796 tcg_temp_free_i64(tmp64
);
3798 tmp64
= tcg_temp_new_i64();
3799 neon_load_reg64(tmp64
, rd
);
3800 gen_st64(tmp64
, addr
, IS_USER(s
));
3802 tcg_gen_addi_i32(addr
, addr
, stride
);
3804 for (pass
= 0; pass
< 2; pass
++) {
3807 tmp
= gen_ld32(addr
, IS_USER(s
));
3808 neon_store_reg(rd
, pass
, tmp
);
3810 tmp
= neon_load_reg(rd
, pass
);
3811 gen_st32(tmp
, addr
, IS_USER(s
));
3813 tcg_gen_addi_i32(addr
, addr
, stride
);
3814 } else if (size
== 1) {
3816 tmp
= gen_ld16u(addr
, IS_USER(s
));
3817 tcg_gen_addi_i32(addr
, addr
, stride
);
3818 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3819 tcg_gen_addi_i32(addr
, addr
, stride
);
3820 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3821 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3822 tcg_temp_free_i32(tmp2
);
3823 neon_store_reg(rd
, pass
, tmp
);
3825 tmp
= neon_load_reg(rd
, pass
);
3826 tmp2
= tcg_temp_new_i32();
3827 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3828 gen_st16(tmp
, addr
, IS_USER(s
));
3829 tcg_gen_addi_i32(addr
, addr
, stride
);
3830 gen_st16(tmp2
, addr
, IS_USER(s
));
3831 tcg_gen_addi_i32(addr
, addr
, stride
);
3833 } else /* size == 0 */ {
3836 for (n
= 0; n
< 4; n
++) {
3837 tmp
= gen_ld8u(addr
, IS_USER(s
));
3838 tcg_gen_addi_i32(addr
, addr
, stride
);
3842 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
3843 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3844 tcg_temp_free_i32(tmp
);
3847 neon_store_reg(rd
, pass
, tmp2
);
3849 tmp2
= neon_load_reg(rd
, pass
);
3850 for (n
= 0; n
< 4; n
++) {
3851 tmp
= tcg_temp_new_i32();
3853 tcg_gen_mov_i32(tmp
, tmp2
);
3855 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3857 gen_st8(tmp
, addr
, IS_USER(s
));
3858 tcg_gen_addi_i32(addr
, addr
, stride
);
3860 tcg_temp_free_i32(tmp2
);
3867 tcg_temp_free_i32(addr
);
3870 size
= (insn
>> 10) & 3;
3872 /* Load single element to all lanes. */
3873 int a
= (insn
>> 4) & 1;
3877 size
= (insn
>> 6) & 3;
3878 nregs
= ((insn
>> 8) & 3) + 1;
3881 if (nregs
!= 4 || a
== 0) {
3884 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3887 if (nregs
== 1 && a
== 1 && size
== 0) {
3890 if (nregs
== 3 && a
== 1) {
3893 addr
= tcg_temp_new_i32();
3894 load_reg_var(s
, addr
, rn
);
3896 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3897 tmp
= gen_load_and_replicate(s
, addr
, size
);
3898 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3899 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3900 if (insn
& (1 << 5)) {
3901 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
3902 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
3904 tcg_temp_free_i32(tmp
);
3906 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3907 stride
= (insn
& (1 << 5)) ? 2 : 1;
3908 for (reg
= 0; reg
< nregs
; reg
++) {
3909 tmp
= gen_load_and_replicate(s
, addr
, size
);
3910 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3911 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3912 tcg_temp_free_i32(tmp
);
3913 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3917 tcg_temp_free_i32(addr
);
3918 stride
= (1 << size
) * nregs
;
3920 /* Single element. */
3921 int idx
= (insn
>> 4) & 0xf;
3922 pass
= (insn
>> 7) & 1;
3925 shift
= ((insn
>> 5) & 3) * 8;
3929 shift
= ((insn
>> 6) & 1) * 16;
3930 stride
= (insn
& (1 << 5)) ? 2 : 1;
3934 stride
= (insn
& (1 << 6)) ? 2 : 1;
3939 nregs
= ((insn
>> 8) & 3) + 1;
3940 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3943 if (((idx
& (1 << size
)) != 0) ||
3944 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
3949 if ((idx
& 1) != 0) {
3954 if (size
== 2 && (idx
& 2) != 0) {
3959 if ((size
== 2) && ((idx
& 3) == 3)) {
3966 if ((rd
+ stride
* (nregs
- 1)) > 31) {
3967 /* Attempts to write off the end of the register file
3968 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3969 * the neon_load_reg() would write off the end of the array.
3973 addr
= tcg_temp_new_i32();
3974 load_reg_var(s
, addr
, rn
);
3975 for (reg
= 0; reg
< nregs
; reg
++) {
3979 tmp
= gen_ld8u(addr
, IS_USER(s
));
3982 tmp
= gen_ld16u(addr
, IS_USER(s
));
3985 tmp
= gen_ld32(addr
, IS_USER(s
));
3987 default: /* Avoid compiler warnings. */
3991 tmp2
= neon_load_reg(rd
, pass
);
3992 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
3993 tcg_temp_free_i32(tmp2
);
3995 neon_store_reg(rd
, pass
, tmp
);
3996 } else { /* Store */
3997 tmp
= neon_load_reg(rd
, pass
);
3999 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4002 gen_st8(tmp
, addr
, IS_USER(s
));
4005 gen_st16(tmp
, addr
, IS_USER(s
));
4008 gen_st32(tmp
, addr
, IS_USER(s
));
4013 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4015 tcg_temp_free_i32(addr
);
4016 stride
= nregs
* (1 << size
);
4022 base
= load_reg(s
, rn
);
4024 tcg_gen_addi_i32(base
, base
, stride
);
4027 index
= load_reg(s
, rm
);
4028 tcg_gen_add_i32(base
, base
, index
);
4029 tcg_temp_free_i32(index
);
4031 store_reg(s
, rn
, base
);
4036 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4037 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4039 tcg_gen_and_i32(t
, t
, c
);
4040 tcg_gen_andc_i32(f
, f
, c
);
4041 tcg_gen_or_i32(dest
, t
, f
);
4044 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4047 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4048 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4049 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4054 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4057 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4058 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4059 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4064 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4067 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4068 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4069 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4074 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4077 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4078 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4079 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4084 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4090 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4091 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4096 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4097 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4104 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4105 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4110 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4111 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4118 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4122 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4123 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4124 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4129 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4130 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4131 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4135 tcg_temp_free_i32(src
);
4138 static inline void gen_neon_addl(int size
)
4141 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4142 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4143 case 2: tcg_gen_add_i64(CPU_V001
); break;
4148 static inline void gen_neon_subl(int size
)
4151 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4152 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4153 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4158 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4161 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4162 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4163 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4168 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4171 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4172 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4177 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4181 switch ((size
<< 1) | u
) {
4182 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4183 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4184 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4185 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4187 tmp
= gen_muls_i64_i32(a
, b
);
4188 tcg_gen_mov_i64(dest
, tmp
);
4189 tcg_temp_free_i64(tmp
);
4192 tmp
= gen_mulu_i64_i32(a
, b
);
4193 tcg_gen_mov_i64(dest
, tmp
);
4194 tcg_temp_free_i64(tmp
);
4199 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4200 Don't forget to clean them now. */
4202 tcg_temp_free_i32(a
);
4203 tcg_temp_free_i32(b
);
4207 static void gen_neon_narrow_op(int op
, int u
, int size
, TCGv dest
, TCGv_i64 src
)
4211 gen_neon_unarrow_sats(size
, dest
, src
);
4213 gen_neon_narrow(size
, dest
, src
);
4217 gen_neon_narrow_satu(size
, dest
, src
);
4219 gen_neon_narrow_sats(size
, dest
, src
);
4224 /* Symbolic constants for op fields for Neon 3-register same-length.
4225 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4228 #define NEON_3R_VHADD 0
4229 #define NEON_3R_VQADD 1
4230 #define NEON_3R_VRHADD 2
4231 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4232 #define NEON_3R_VHSUB 4
4233 #define NEON_3R_VQSUB 5
4234 #define NEON_3R_VCGT 6
4235 #define NEON_3R_VCGE 7
4236 #define NEON_3R_VSHL 8
4237 #define NEON_3R_VQSHL 9
4238 #define NEON_3R_VRSHL 10
4239 #define NEON_3R_VQRSHL 11
4240 #define NEON_3R_VMAX 12
4241 #define NEON_3R_VMIN 13
4242 #define NEON_3R_VABD 14
4243 #define NEON_3R_VABA 15
4244 #define NEON_3R_VADD_VSUB 16
4245 #define NEON_3R_VTST_VCEQ 17
4246 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4247 #define NEON_3R_VMUL 19
4248 #define NEON_3R_VPMAX 20
4249 #define NEON_3R_VPMIN 21
4250 #define NEON_3R_VQDMULH_VQRDMULH 22
4251 #define NEON_3R_VPADD 23
4252 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4253 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4254 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4255 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4256 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4257 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4258 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4260 static const uint8_t neon_3r_sizes
[] = {
4261 [NEON_3R_VHADD
] = 0x7,
4262 [NEON_3R_VQADD
] = 0xf,
4263 [NEON_3R_VRHADD
] = 0x7,
4264 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4265 [NEON_3R_VHSUB
] = 0x7,
4266 [NEON_3R_VQSUB
] = 0xf,
4267 [NEON_3R_VCGT
] = 0x7,
4268 [NEON_3R_VCGE
] = 0x7,
4269 [NEON_3R_VSHL
] = 0xf,
4270 [NEON_3R_VQSHL
] = 0xf,
4271 [NEON_3R_VRSHL
] = 0xf,
4272 [NEON_3R_VQRSHL
] = 0xf,
4273 [NEON_3R_VMAX
] = 0x7,
4274 [NEON_3R_VMIN
] = 0x7,
4275 [NEON_3R_VABD
] = 0x7,
4276 [NEON_3R_VABA
] = 0x7,
4277 [NEON_3R_VADD_VSUB
] = 0xf,
4278 [NEON_3R_VTST_VCEQ
] = 0x7,
4279 [NEON_3R_VML
] = 0x7,
4280 [NEON_3R_VMUL
] = 0x7,
4281 [NEON_3R_VPMAX
] = 0x7,
4282 [NEON_3R_VPMIN
] = 0x7,
4283 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4284 [NEON_3R_VPADD
] = 0x7,
4285 [NEON_3R_VFM
] = 0x5, /* size bit 1 encodes op */
4286 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4287 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4288 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4289 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4290 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4291 [NEON_3R_VRECPS_VRSQRTS
] = 0x5, /* size bit 1 encodes op */
4294 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4295 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4298 #define NEON_2RM_VREV64 0
4299 #define NEON_2RM_VREV32 1
4300 #define NEON_2RM_VREV16 2
4301 #define NEON_2RM_VPADDL 4
4302 #define NEON_2RM_VPADDL_U 5
4303 #define NEON_2RM_VCLS 8
4304 #define NEON_2RM_VCLZ 9
4305 #define NEON_2RM_VCNT 10
4306 #define NEON_2RM_VMVN 11
4307 #define NEON_2RM_VPADAL 12
4308 #define NEON_2RM_VPADAL_U 13
4309 #define NEON_2RM_VQABS 14
4310 #define NEON_2RM_VQNEG 15
4311 #define NEON_2RM_VCGT0 16
4312 #define NEON_2RM_VCGE0 17
4313 #define NEON_2RM_VCEQ0 18
4314 #define NEON_2RM_VCLE0 19
4315 #define NEON_2RM_VCLT0 20
4316 #define NEON_2RM_VABS 22
4317 #define NEON_2RM_VNEG 23
4318 #define NEON_2RM_VCGT0_F 24
4319 #define NEON_2RM_VCGE0_F 25
4320 #define NEON_2RM_VCEQ0_F 26
4321 #define NEON_2RM_VCLE0_F 27
4322 #define NEON_2RM_VCLT0_F 28
4323 #define NEON_2RM_VABS_F 30
4324 #define NEON_2RM_VNEG_F 31
4325 #define NEON_2RM_VSWP 32
4326 #define NEON_2RM_VTRN 33
4327 #define NEON_2RM_VUZP 34
4328 #define NEON_2RM_VZIP 35
4329 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4330 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4331 #define NEON_2RM_VSHLL 38
4332 #define NEON_2RM_VCVT_F16_F32 44
4333 #define NEON_2RM_VCVT_F32_F16 46
4334 #define NEON_2RM_VRECPE 56
4335 #define NEON_2RM_VRSQRTE 57
4336 #define NEON_2RM_VRECPE_F 58
4337 #define NEON_2RM_VRSQRTE_F 59
4338 #define NEON_2RM_VCVT_FS 60
4339 #define NEON_2RM_VCVT_FU 61
4340 #define NEON_2RM_VCVT_SF 62
4341 #define NEON_2RM_VCVT_UF 63
4343 static int neon_2rm_is_float_op(int op
)
4345 /* Return true if this neon 2reg-misc op is float-to-float */
4346 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
4347 op
>= NEON_2RM_VRECPE_F
);
4350 /* Each entry in this array has bit n set if the insn allows
4351 * size value n (otherwise it will UNDEF). Since unallocated
4352 * op values will have no bits set they always UNDEF.
4354 static const uint8_t neon_2rm_sizes
[] = {
4355 [NEON_2RM_VREV64
] = 0x7,
4356 [NEON_2RM_VREV32
] = 0x3,
4357 [NEON_2RM_VREV16
] = 0x1,
4358 [NEON_2RM_VPADDL
] = 0x7,
4359 [NEON_2RM_VPADDL_U
] = 0x7,
4360 [NEON_2RM_VCLS
] = 0x7,
4361 [NEON_2RM_VCLZ
] = 0x7,
4362 [NEON_2RM_VCNT
] = 0x1,
4363 [NEON_2RM_VMVN
] = 0x1,
4364 [NEON_2RM_VPADAL
] = 0x7,
4365 [NEON_2RM_VPADAL_U
] = 0x7,
4366 [NEON_2RM_VQABS
] = 0x7,
4367 [NEON_2RM_VQNEG
] = 0x7,
4368 [NEON_2RM_VCGT0
] = 0x7,
4369 [NEON_2RM_VCGE0
] = 0x7,
4370 [NEON_2RM_VCEQ0
] = 0x7,
4371 [NEON_2RM_VCLE0
] = 0x7,
4372 [NEON_2RM_VCLT0
] = 0x7,
4373 [NEON_2RM_VABS
] = 0x7,
4374 [NEON_2RM_VNEG
] = 0x7,
4375 [NEON_2RM_VCGT0_F
] = 0x4,
4376 [NEON_2RM_VCGE0_F
] = 0x4,
4377 [NEON_2RM_VCEQ0_F
] = 0x4,
4378 [NEON_2RM_VCLE0_F
] = 0x4,
4379 [NEON_2RM_VCLT0_F
] = 0x4,
4380 [NEON_2RM_VABS_F
] = 0x4,
4381 [NEON_2RM_VNEG_F
] = 0x4,
4382 [NEON_2RM_VSWP
] = 0x1,
4383 [NEON_2RM_VTRN
] = 0x7,
4384 [NEON_2RM_VUZP
] = 0x7,
4385 [NEON_2RM_VZIP
] = 0x7,
4386 [NEON_2RM_VMOVN
] = 0x7,
4387 [NEON_2RM_VQMOVN
] = 0x7,
4388 [NEON_2RM_VSHLL
] = 0x7,
4389 [NEON_2RM_VCVT_F16_F32
] = 0x2,
4390 [NEON_2RM_VCVT_F32_F16
] = 0x2,
4391 [NEON_2RM_VRECPE
] = 0x4,
4392 [NEON_2RM_VRSQRTE
] = 0x4,
4393 [NEON_2RM_VRECPE_F
] = 0x4,
4394 [NEON_2RM_VRSQRTE_F
] = 0x4,
4395 [NEON_2RM_VCVT_FS
] = 0x4,
4396 [NEON_2RM_VCVT_FU
] = 0x4,
4397 [NEON_2RM_VCVT_SF
] = 0x4,
4398 [NEON_2RM_VCVT_UF
] = 0x4,
4401 /* Translate a NEON data processing instruction. Return nonzero if the
4402 instruction is invalid.
4403 We process data in a mixture of 32-bit and 64-bit chunks.
4404 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4406 static int disas_neon_data_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
4418 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4421 if (!s
->vfp_enabled
)
4423 q
= (insn
& (1 << 6)) != 0;
4424 u
= (insn
>> 24) & 1;
4425 VFP_DREG_D(rd
, insn
);
4426 VFP_DREG_N(rn
, insn
);
4427 VFP_DREG_M(rm
, insn
);
4428 size
= (insn
>> 20) & 3;
4429 if ((insn
& (1 << 23)) == 0) {
4430 /* Three register same length. */
4431 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4432 /* Catch invalid op and bad size combinations: UNDEF */
4433 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4436 /* All insns of this form UNDEF for either this condition or the
4437 * superset of cases "Q==1"; we catch the latter later.
4439 if (q
&& ((rd
| rn
| rm
) & 1)) {
4442 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
4443 /* 64-bit element instructions. */
4444 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4445 neon_load_reg64(cpu_V0
, rn
+ pass
);
4446 neon_load_reg64(cpu_V1
, rm
+ pass
);
4450 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
4453 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
4459 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
4462 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
4468 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4470 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4475 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4478 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4484 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4486 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4489 case NEON_3R_VQRSHL
:
4491 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4494 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4498 case NEON_3R_VADD_VSUB
:
4500 tcg_gen_sub_i64(CPU_V001
);
4502 tcg_gen_add_i64(CPU_V001
);
4508 neon_store_reg64(cpu_V0
, rd
+ pass
);
4517 case NEON_3R_VQRSHL
:
4520 /* Shift instruction operands are reversed. */
4535 case NEON_3R_FLOAT_ARITH
:
4536 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
4538 case NEON_3R_FLOAT_MINMAX
:
4539 pairwise
= u
; /* if VPMIN/VPMAX (float) */
4541 case NEON_3R_FLOAT_CMP
:
4543 /* no encoding for U=0 C=1x */
4547 case NEON_3R_FLOAT_ACMP
:
4552 case NEON_3R_VRECPS_VRSQRTS
:
4558 if (u
&& (size
!= 0)) {
4559 /* UNDEF on invalid size for polynomial subcase */
4564 if (!arm_feature(env
, ARM_FEATURE_VFP4
) || u
) {
4572 if (pairwise
&& q
) {
4573 /* All the pairwise insns UNDEF if Q is set */
4577 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4582 tmp
= neon_load_reg(rn
, 0);
4583 tmp2
= neon_load_reg(rn
, 1);
4585 tmp
= neon_load_reg(rm
, 0);
4586 tmp2
= neon_load_reg(rm
, 1);
4590 tmp
= neon_load_reg(rn
, pass
);
4591 tmp2
= neon_load_reg(rm
, pass
);
4595 GEN_NEON_INTEGER_OP(hadd
);
4598 GEN_NEON_INTEGER_OP_ENV(qadd
);
4600 case NEON_3R_VRHADD
:
4601 GEN_NEON_INTEGER_OP(rhadd
);
4603 case NEON_3R_LOGIC
: /* Logic ops. */
4604 switch ((u
<< 2) | size
) {
4606 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4609 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4612 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4615 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4618 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4621 tmp3
= neon_load_reg(rd
, pass
);
4622 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4623 tcg_temp_free_i32(tmp3
);
4626 tmp3
= neon_load_reg(rd
, pass
);
4627 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4628 tcg_temp_free_i32(tmp3
);
4631 tmp3
= neon_load_reg(rd
, pass
);
4632 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4633 tcg_temp_free_i32(tmp3
);
4638 GEN_NEON_INTEGER_OP(hsub
);
4641 GEN_NEON_INTEGER_OP_ENV(qsub
);
4644 GEN_NEON_INTEGER_OP(cgt
);
4647 GEN_NEON_INTEGER_OP(cge
);
4650 GEN_NEON_INTEGER_OP(shl
);
4653 GEN_NEON_INTEGER_OP_ENV(qshl
);
4656 GEN_NEON_INTEGER_OP(rshl
);
4658 case NEON_3R_VQRSHL
:
4659 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4662 GEN_NEON_INTEGER_OP(max
);
4665 GEN_NEON_INTEGER_OP(min
);
4668 GEN_NEON_INTEGER_OP(abd
);
4671 GEN_NEON_INTEGER_OP(abd
);
4672 tcg_temp_free_i32(tmp2
);
4673 tmp2
= neon_load_reg(rd
, pass
);
4674 gen_neon_add(size
, tmp
, tmp2
);
4676 case NEON_3R_VADD_VSUB
:
4677 if (!u
) { /* VADD */
4678 gen_neon_add(size
, tmp
, tmp2
);
4681 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4682 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4683 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4688 case NEON_3R_VTST_VCEQ
:
4689 if (!u
) { /* VTST */
4691 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4692 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4693 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4698 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4699 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4700 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4705 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
4707 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4708 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4709 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4712 tcg_temp_free_i32(tmp2
);
4713 tmp2
= neon_load_reg(rd
, pass
);
4715 gen_neon_rsb(size
, tmp
, tmp2
);
4717 gen_neon_add(size
, tmp
, tmp2
);
4721 if (u
) { /* polynomial */
4722 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4723 } else { /* Integer */
4725 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4726 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4727 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4733 GEN_NEON_INTEGER_OP(pmax
);
4736 GEN_NEON_INTEGER_OP(pmin
);
4738 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
4739 if (!u
) { /* VQDMULH */
4742 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4745 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4749 } else { /* VQRDMULH */
4752 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4755 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4763 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4764 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4765 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4769 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
4771 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4772 switch ((u
<< 2) | size
) {
4775 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4778 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
4781 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
4786 tcg_temp_free_ptr(fpstatus
);
4789 case NEON_3R_FLOAT_MULTIPLY
:
4791 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4792 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
4794 tcg_temp_free_i32(tmp2
);
4795 tmp2
= neon_load_reg(rd
, pass
);
4797 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4799 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
4802 tcg_temp_free_ptr(fpstatus
);
4805 case NEON_3R_FLOAT_CMP
:
4807 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4809 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
4812 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4814 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4817 tcg_temp_free_ptr(fpstatus
);
4820 case NEON_3R_FLOAT_ACMP
:
4822 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4824 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4826 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4828 tcg_temp_free_ptr(fpstatus
);
4831 case NEON_3R_FLOAT_MINMAX
:
4833 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4835 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
, fpstatus
);
4837 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
, fpstatus
);
4839 tcg_temp_free_ptr(fpstatus
);
4842 case NEON_3R_VRECPS_VRSQRTS
:
4844 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4846 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4850 /* VFMA, VFMS: fused multiply-add */
4851 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4852 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
4855 gen_helper_vfp_negs(tmp
, tmp
);
4857 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
4858 tcg_temp_free_i32(tmp3
);
4859 tcg_temp_free_ptr(fpstatus
);
4865 tcg_temp_free_i32(tmp2
);
4867 /* Save the result. For elementwise operations we can put it
4868 straight into the destination register. For pairwise operations
4869 we have to be careful to avoid clobbering the source operands. */
4870 if (pairwise
&& rd
== rm
) {
4871 neon_store_scratch(pass
, tmp
);
4873 neon_store_reg(rd
, pass
, tmp
);
4877 if (pairwise
&& rd
== rm
) {
4878 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4879 tmp
= neon_load_scratch(pass
);
4880 neon_store_reg(rd
, pass
, tmp
);
4883 /* End of 3 register same size operations. */
4884 } else if (insn
& (1 << 4)) {
4885 if ((insn
& 0x00380080) != 0) {
4886 /* Two registers and shift. */
4887 op
= (insn
>> 8) & 0xf;
4888 if (insn
& (1 << 7)) {
4896 while ((insn
& (1 << (size
+ 19))) == 0)
4899 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4900 /* To avoid excessive dumplication of ops we implement shift
4901 by immediate using the variable shift operations. */
4903 /* Shift by immediate:
4904 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4905 if (q
&& ((rd
| rm
) & 1)) {
4908 if (!u
&& (op
== 4 || op
== 6)) {
4911 /* Right shifts are encoded as N - shift, where N is the
4912 element size in bits. */
4914 shift
= shift
- (1 << (size
+ 3));
4922 imm
= (uint8_t) shift
;
4927 imm
= (uint16_t) shift
;
4938 for (pass
= 0; pass
< count
; pass
++) {
4940 neon_load_reg64(cpu_V0
, rm
+ pass
);
4941 tcg_gen_movi_i64(cpu_V1
, imm
);
4946 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4948 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4953 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4955 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4958 case 5: /* VSHL, VSLI */
4959 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4961 case 6: /* VQSHLU */
4962 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
4967 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4970 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4975 if (op
== 1 || op
== 3) {
4977 neon_load_reg64(cpu_V1
, rd
+ pass
);
4978 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4979 } else if (op
== 4 || (op
== 5 && u
)) {
4981 neon_load_reg64(cpu_V1
, rd
+ pass
);
4983 if (shift
< -63 || shift
> 63) {
4987 mask
= 0xffffffffffffffffull
>> -shift
;
4989 mask
= 0xffffffffffffffffull
<< shift
;
4992 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
4993 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4995 neon_store_reg64(cpu_V0
, rd
+ pass
);
4996 } else { /* size < 3 */
4997 /* Operands in T0 and T1. */
4998 tmp
= neon_load_reg(rm
, pass
);
4999 tmp2
= tcg_temp_new_i32();
5000 tcg_gen_movi_i32(tmp2
, imm
);
5004 GEN_NEON_INTEGER_OP(shl
);
5008 GEN_NEON_INTEGER_OP(rshl
);
5011 case 5: /* VSHL, VSLI */
5013 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
5014 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
5015 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
5019 case 6: /* VQSHLU */
5022 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5026 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5030 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5038 GEN_NEON_INTEGER_OP_ENV(qshl
);
5041 tcg_temp_free_i32(tmp2
);
5043 if (op
== 1 || op
== 3) {
5045 tmp2
= neon_load_reg(rd
, pass
);
5046 gen_neon_add(size
, tmp
, tmp2
);
5047 tcg_temp_free_i32(tmp2
);
5048 } else if (op
== 4 || (op
== 5 && u
)) {
5053 mask
= 0xff >> -shift
;
5055 mask
= (uint8_t)(0xff << shift
);
5061 mask
= 0xffff >> -shift
;
5063 mask
= (uint16_t)(0xffff << shift
);
5067 if (shift
< -31 || shift
> 31) {
5071 mask
= 0xffffffffu
>> -shift
;
5073 mask
= 0xffffffffu
<< shift
;
5079 tmp2
= neon_load_reg(rd
, pass
);
5080 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5081 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5082 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5083 tcg_temp_free_i32(tmp2
);
5085 neon_store_reg(rd
, pass
, tmp
);
5088 } else if (op
< 10) {
5089 /* Shift by immediate and narrow:
5090 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5091 int input_unsigned
= (op
== 8) ? !u
: u
;
5095 shift
= shift
- (1 << (size
+ 3));
5098 tmp64
= tcg_const_i64(shift
);
5099 neon_load_reg64(cpu_V0
, rm
);
5100 neon_load_reg64(cpu_V1
, rm
+ 1);
5101 for (pass
= 0; pass
< 2; pass
++) {
5109 if (input_unsigned
) {
5110 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5112 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5115 if (input_unsigned
) {
5116 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5118 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5121 tmp
= tcg_temp_new_i32();
5122 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5123 neon_store_reg(rd
, pass
, tmp
);
5125 tcg_temp_free_i64(tmp64
);
5128 imm
= (uint16_t)shift
;
5132 imm
= (uint32_t)shift
;
5134 tmp2
= tcg_const_i32(imm
);
5135 tmp4
= neon_load_reg(rm
+ 1, 0);
5136 tmp5
= neon_load_reg(rm
+ 1, 1);
5137 for (pass
= 0; pass
< 2; pass
++) {
5139 tmp
= neon_load_reg(rm
, 0);
5143 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5146 tmp3
= neon_load_reg(rm
, 1);
5150 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5152 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5153 tcg_temp_free_i32(tmp
);
5154 tcg_temp_free_i32(tmp3
);
5155 tmp
= tcg_temp_new_i32();
5156 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5157 neon_store_reg(rd
, pass
, tmp
);
5159 tcg_temp_free_i32(tmp2
);
5161 } else if (op
== 10) {
5163 if (q
|| (rd
& 1)) {
5166 tmp
= neon_load_reg(rm
, 0);
5167 tmp2
= neon_load_reg(rm
, 1);
5168 for (pass
= 0; pass
< 2; pass
++) {
5172 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5175 /* The shift is less than the width of the source
5176 type, so we can just shift the whole register. */
5177 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5178 /* Widen the result of shift: we need to clear
5179 * the potential overflow bits resulting from
5180 * left bits of the narrow input appearing as
5181 * right bits of left the neighbour narrow
5183 if (size
< 2 || !u
) {
5186 imm
= (0xffu
>> (8 - shift
));
5188 } else if (size
== 1) {
5189 imm
= 0xffff >> (16 - shift
);
5192 imm
= 0xffffffff >> (32 - shift
);
5195 imm64
= imm
| (((uint64_t)imm
) << 32);
5199 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5202 neon_store_reg64(cpu_V0
, rd
+ pass
);
5204 } else if (op
>= 14) {
5205 /* VCVT fixed-point. */
5206 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5209 /* We have already masked out the must-be-1 top bit of imm6,
5210 * hence this 32-shift where the ARM ARM has 64-imm6.
5213 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5214 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5217 gen_vfp_ulto(0, shift
, 1);
5219 gen_vfp_slto(0, shift
, 1);
5222 gen_vfp_toul(0, shift
, 1);
5224 gen_vfp_tosl(0, shift
, 1);
5226 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
5231 } else { /* (insn & 0x00380080) == 0 */
5233 if (q
&& (rd
& 1)) {
5237 op
= (insn
>> 8) & 0xf;
5238 /* One register and immediate. */
5239 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5240 invert
= (insn
& (1 << 5)) != 0;
5241 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5242 * We choose to not special-case this and will behave as if a
5243 * valid constant encoding of 0 had been given.
5262 imm
= (imm
<< 8) | (imm
<< 24);
5265 imm
= (imm
<< 8) | 0xff;
5268 imm
= (imm
<< 16) | 0xffff;
5271 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5279 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5280 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5286 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5287 if (op
& 1 && op
< 12) {
5288 tmp
= neon_load_reg(rd
, pass
);
5290 /* The immediate value has already been inverted, so
5292 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5294 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5298 tmp
= tcg_temp_new_i32();
5299 if (op
== 14 && invert
) {
5303 for (n
= 0; n
< 4; n
++) {
5304 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5305 val
|= 0xff << (n
* 8);
5307 tcg_gen_movi_i32(tmp
, val
);
5309 tcg_gen_movi_i32(tmp
, imm
);
5312 neon_store_reg(rd
, pass
, tmp
);
5315 } else { /* (insn & 0x00800010 == 0x00800000) */
5317 op
= (insn
>> 8) & 0xf;
5318 if ((insn
& (1 << 6)) == 0) {
5319 /* Three registers of different lengths. */
5323 /* undefreq: bit 0 : UNDEF if size != 0
5324 * bit 1 : UNDEF if size == 0
5325 * bit 2 : UNDEF if U == 1
5326 * Note that [1:0] set implies 'always UNDEF'
5329 /* prewiden, src1_wide, src2_wide, undefreq */
5330 static const int neon_3reg_wide
[16][4] = {
5331 {1, 0, 0, 0}, /* VADDL */
5332 {1, 1, 0, 0}, /* VADDW */
5333 {1, 0, 0, 0}, /* VSUBL */
5334 {1, 1, 0, 0}, /* VSUBW */
5335 {0, 1, 1, 0}, /* VADDHN */
5336 {0, 0, 0, 0}, /* VABAL */
5337 {0, 1, 1, 0}, /* VSUBHN */
5338 {0, 0, 0, 0}, /* VABDL */
5339 {0, 0, 0, 0}, /* VMLAL */
5340 {0, 0, 0, 6}, /* VQDMLAL */
5341 {0, 0, 0, 0}, /* VMLSL */
5342 {0, 0, 0, 6}, /* VQDMLSL */
5343 {0, 0, 0, 0}, /* Integer VMULL */
5344 {0, 0, 0, 2}, /* VQDMULL */
5345 {0, 0, 0, 5}, /* Polynomial VMULL */
5346 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5349 prewiden
= neon_3reg_wide
[op
][0];
5350 src1_wide
= neon_3reg_wide
[op
][1];
5351 src2_wide
= neon_3reg_wide
[op
][2];
5352 undefreq
= neon_3reg_wide
[op
][3];
5354 if (((undefreq
& 1) && (size
!= 0)) ||
5355 ((undefreq
& 2) && (size
== 0)) ||
5356 ((undefreq
& 4) && u
)) {
5359 if ((src1_wide
&& (rn
& 1)) ||
5360 (src2_wide
&& (rm
& 1)) ||
5361 (!src2_wide
&& (rd
& 1))) {
5365 /* Avoid overlapping operands. Wide source operands are
5366 always aligned so will never overlap with wide
5367 destinations in problematic ways. */
5368 if (rd
== rm
&& !src2_wide
) {
5369 tmp
= neon_load_reg(rm
, 1);
5370 neon_store_scratch(2, tmp
);
5371 } else if (rd
== rn
&& !src1_wide
) {
5372 tmp
= neon_load_reg(rn
, 1);
5373 neon_store_scratch(2, tmp
);
5376 for (pass
= 0; pass
< 2; pass
++) {
5378 neon_load_reg64(cpu_V0
, rn
+ pass
);
5381 if (pass
== 1 && rd
== rn
) {
5382 tmp
= neon_load_scratch(2);
5384 tmp
= neon_load_reg(rn
, pass
);
5387 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5391 neon_load_reg64(cpu_V1
, rm
+ pass
);
5394 if (pass
== 1 && rd
== rm
) {
5395 tmp2
= neon_load_scratch(2);
5397 tmp2
= neon_load_reg(rm
, pass
);
5400 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5404 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5405 gen_neon_addl(size
);
5407 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5408 gen_neon_subl(size
);
5410 case 5: case 7: /* VABAL, VABDL */
5411 switch ((size
<< 1) | u
) {
5413 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5416 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5419 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5422 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5425 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5428 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5432 tcg_temp_free_i32(tmp2
);
5433 tcg_temp_free_i32(tmp
);
5435 case 8: case 9: case 10: case 11: case 12: case 13:
5436 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5437 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5439 case 14: /* Polynomial VMULL */
5440 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5441 tcg_temp_free_i32(tmp2
);
5442 tcg_temp_free_i32(tmp
);
5444 default: /* 15 is RESERVED: caught earlier */
5449 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5450 neon_store_reg64(cpu_V0
, rd
+ pass
);
5451 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5453 neon_load_reg64(cpu_V1
, rd
+ pass
);
5455 case 10: /* VMLSL */
5456 gen_neon_negl(cpu_V0
, size
);
5458 case 5: case 8: /* VABAL, VMLAL */
5459 gen_neon_addl(size
);
5461 case 9: case 11: /* VQDMLAL, VQDMLSL */
5462 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5464 gen_neon_negl(cpu_V0
, size
);
5466 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5471 neon_store_reg64(cpu_V0
, rd
+ pass
);
5472 } else if (op
== 4 || op
== 6) {
5473 /* Narrowing operation. */
5474 tmp
= tcg_temp_new_i32();
5478 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5481 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5484 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5485 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5492 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5495 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5498 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5499 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5500 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5508 neon_store_reg(rd
, 0, tmp3
);
5509 neon_store_reg(rd
, 1, tmp
);
5512 /* Write back the result. */
5513 neon_store_reg64(cpu_V0
, rd
+ pass
);
5517 /* Two registers and a scalar. NB that for ops of this form
5518 * the ARM ARM labels bit 24 as Q, but it is in our variable
5525 case 1: /* Float VMLA scalar */
5526 case 5: /* Floating point VMLS scalar */
5527 case 9: /* Floating point VMUL scalar */
5532 case 0: /* Integer VMLA scalar */
5533 case 4: /* Integer VMLS scalar */
5534 case 8: /* Integer VMUL scalar */
5535 case 12: /* VQDMULH scalar */
5536 case 13: /* VQRDMULH scalar */
5537 if (u
&& ((rd
| rn
) & 1)) {
5540 tmp
= neon_get_scalar(size
, rm
);
5541 neon_store_scratch(0, tmp
);
5542 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5543 tmp
= neon_load_scratch(0);
5544 tmp2
= neon_load_reg(rn
, pass
);
5547 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5549 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5551 } else if (op
== 13) {
5553 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5555 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5557 } else if (op
& 1) {
5558 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5559 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5560 tcg_temp_free_ptr(fpstatus
);
5563 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5564 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5565 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5569 tcg_temp_free_i32(tmp2
);
5572 tmp2
= neon_load_reg(rd
, pass
);
5575 gen_neon_add(size
, tmp
, tmp2
);
5579 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5580 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5581 tcg_temp_free_ptr(fpstatus
);
5585 gen_neon_rsb(size
, tmp
, tmp2
);
5589 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5590 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5591 tcg_temp_free_ptr(fpstatus
);
5597 tcg_temp_free_i32(tmp2
);
5599 neon_store_reg(rd
, pass
, tmp
);
5602 case 3: /* VQDMLAL scalar */
5603 case 7: /* VQDMLSL scalar */
5604 case 11: /* VQDMULL scalar */
5609 case 2: /* VMLAL sclar */
5610 case 6: /* VMLSL scalar */
5611 case 10: /* VMULL scalar */
5615 tmp2
= neon_get_scalar(size
, rm
);
5616 /* We need a copy of tmp2 because gen_neon_mull
5617 * deletes it during pass 0. */
5618 tmp4
= tcg_temp_new_i32();
5619 tcg_gen_mov_i32(tmp4
, tmp2
);
5620 tmp3
= neon_load_reg(rn
, 1);
5622 for (pass
= 0; pass
< 2; pass
++) {
5624 tmp
= neon_load_reg(rn
, 0);
5629 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5631 neon_load_reg64(cpu_V1
, rd
+ pass
);
5635 gen_neon_negl(cpu_V0
, size
);
5638 gen_neon_addl(size
);
5641 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5643 gen_neon_negl(cpu_V0
, size
);
5645 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5651 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5656 neon_store_reg64(cpu_V0
, rd
+ pass
);
5661 default: /* 14 and 15 are RESERVED */
5665 } else { /* size == 3 */
5668 imm
= (insn
>> 8) & 0xf;
5673 if (q
&& ((rd
| rn
| rm
) & 1)) {
5678 neon_load_reg64(cpu_V0
, rn
);
5680 neon_load_reg64(cpu_V1
, rn
+ 1);
5682 } else if (imm
== 8) {
5683 neon_load_reg64(cpu_V0
, rn
+ 1);
5685 neon_load_reg64(cpu_V1
, rm
);
5688 tmp64
= tcg_temp_new_i64();
5690 neon_load_reg64(cpu_V0
, rn
);
5691 neon_load_reg64(tmp64
, rn
+ 1);
5693 neon_load_reg64(cpu_V0
, rn
+ 1);
5694 neon_load_reg64(tmp64
, rm
);
5696 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5697 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5698 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5700 neon_load_reg64(cpu_V1
, rm
);
5702 neon_load_reg64(cpu_V1
, rm
+ 1);
5705 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5706 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5707 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5708 tcg_temp_free_i64(tmp64
);
5711 neon_load_reg64(cpu_V0
, rn
);
5712 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5713 neon_load_reg64(cpu_V1
, rm
);
5714 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5715 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5717 neon_store_reg64(cpu_V0
, rd
);
5719 neon_store_reg64(cpu_V1
, rd
+ 1);
5721 } else if ((insn
& (1 << 11)) == 0) {
5722 /* Two register misc. */
5723 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5724 size
= (insn
>> 18) & 3;
5725 /* UNDEF for unknown op values and bad op-size combinations */
5726 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
5729 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
5730 q
&& ((rm
| rd
) & 1)) {
5734 case NEON_2RM_VREV64
:
5735 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5736 tmp
= neon_load_reg(rm
, pass
* 2);
5737 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5739 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5740 case 1: gen_swap_half(tmp
); break;
5741 case 2: /* no-op */ break;
5744 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5746 neon_store_reg(rd
, pass
* 2, tmp2
);
5749 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5750 case 1: gen_swap_half(tmp2
); break;
5753 neon_store_reg(rd
, pass
* 2, tmp2
);
5757 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
5758 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
5759 for (pass
= 0; pass
< q
+ 1; pass
++) {
5760 tmp
= neon_load_reg(rm
, pass
* 2);
5761 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5762 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5763 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5765 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5766 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5767 case 2: tcg_gen_add_i64(CPU_V001
); break;
5770 if (op
>= NEON_2RM_VPADAL
) {
5772 neon_load_reg64(cpu_V1
, rd
+ pass
);
5773 gen_neon_addl(size
);
5775 neon_store_reg64(cpu_V0
, rd
+ pass
);
5781 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5782 tmp
= neon_load_reg(rm
, n
);
5783 tmp2
= neon_load_reg(rd
, n
+ 1);
5784 neon_store_reg(rm
, n
, tmp2
);
5785 neon_store_reg(rd
, n
+ 1, tmp
);
5792 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
5797 if (gen_neon_zip(rd
, rm
, size
, q
)) {
5801 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
5802 /* also VQMOVUN; op field and mnemonics don't line up */
5807 for (pass
= 0; pass
< 2; pass
++) {
5808 neon_load_reg64(cpu_V0
, rm
+ pass
);
5809 tmp
= tcg_temp_new_i32();
5810 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
5815 neon_store_reg(rd
, 0, tmp2
);
5816 neon_store_reg(rd
, 1, tmp
);
5820 case NEON_2RM_VSHLL
:
5821 if (q
|| (rd
& 1)) {
5824 tmp
= neon_load_reg(rm
, 0);
5825 tmp2
= neon_load_reg(rm
, 1);
5826 for (pass
= 0; pass
< 2; pass
++) {
5829 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5830 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5831 neon_store_reg64(cpu_V0
, rd
+ pass
);
5834 case NEON_2RM_VCVT_F16_F32
:
5835 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5839 tmp
= tcg_temp_new_i32();
5840 tmp2
= tcg_temp_new_i32();
5841 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5842 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5843 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5844 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5845 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5846 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5847 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5848 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5849 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5850 neon_store_reg(rd
, 0, tmp2
);
5851 tmp2
= tcg_temp_new_i32();
5852 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5853 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5854 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5855 neon_store_reg(rd
, 1, tmp2
);
5856 tcg_temp_free_i32(tmp
);
5858 case NEON_2RM_VCVT_F32_F16
:
5859 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5863 tmp3
= tcg_temp_new_i32();
5864 tmp
= neon_load_reg(rm
, 0);
5865 tmp2
= neon_load_reg(rm
, 1);
5866 tcg_gen_ext16u_i32(tmp3
, tmp
);
5867 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5868 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5869 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5870 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5871 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5872 tcg_temp_free_i32(tmp
);
5873 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5874 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5875 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5876 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5877 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5878 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5879 tcg_temp_free_i32(tmp2
);
5880 tcg_temp_free_i32(tmp3
);
5884 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5885 if (neon_2rm_is_float_op(op
)) {
5886 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5887 neon_reg_offset(rm
, pass
));
5890 tmp
= neon_load_reg(rm
, pass
);
5893 case NEON_2RM_VREV32
:
5895 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5896 case 1: gen_swap_half(tmp
); break;
5900 case NEON_2RM_VREV16
:
5905 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5906 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5907 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5913 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5914 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5915 case 2: gen_helper_clz(tmp
, tmp
); break;
5920 gen_helper_neon_cnt_u8(tmp
, tmp
);
5923 tcg_gen_not_i32(tmp
, tmp
);
5925 case NEON_2RM_VQABS
:
5928 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
5931 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
5934 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
5939 case NEON_2RM_VQNEG
:
5942 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
5945 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
5948 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
5953 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
5954 tmp2
= tcg_const_i32(0);
5956 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5957 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5958 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
5961 tcg_temp_free(tmp2
);
5962 if (op
== NEON_2RM_VCLE0
) {
5963 tcg_gen_not_i32(tmp
, tmp
);
5966 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
5967 tmp2
= tcg_const_i32(0);
5969 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
5970 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
5971 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
5974 tcg_temp_free(tmp2
);
5975 if (op
== NEON_2RM_VCLT0
) {
5976 tcg_gen_not_i32(tmp
, tmp
);
5979 case NEON_2RM_VCEQ0
:
5980 tmp2
= tcg_const_i32(0);
5982 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5983 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5984 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5987 tcg_temp_free(tmp2
);
5991 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
5992 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
5993 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
5998 tmp2
= tcg_const_i32(0);
5999 gen_neon_rsb(size
, tmp
, tmp2
);
6000 tcg_temp_free(tmp2
);
6002 case NEON_2RM_VCGT0_F
:
6004 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6005 tmp2
= tcg_const_i32(0);
6006 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6007 tcg_temp_free(tmp2
);
6008 tcg_temp_free_ptr(fpstatus
);
6011 case NEON_2RM_VCGE0_F
:
6013 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6014 tmp2
= tcg_const_i32(0);
6015 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6016 tcg_temp_free(tmp2
);
6017 tcg_temp_free_ptr(fpstatus
);
6020 case NEON_2RM_VCEQ0_F
:
6022 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6023 tmp2
= tcg_const_i32(0);
6024 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6025 tcg_temp_free(tmp2
);
6026 tcg_temp_free_ptr(fpstatus
);
6029 case NEON_2RM_VCLE0_F
:
6031 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6032 tmp2
= tcg_const_i32(0);
6033 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6034 tcg_temp_free(tmp2
);
6035 tcg_temp_free_ptr(fpstatus
);
6038 case NEON_2RM_VCLT0_F
:
6040 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6041 tmp2
= tcg_const_i32(0);
6042 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6043 tcg_temp_free(tmp2
);
6044 tcg_temp_free_ptr(fpstatus
);
6047 case NEON_2RM_VABS_F
:
6050 case NEON_2RM_VNEG_F
:
6054 tmp2
= neon_load_reg(rd
, pass
);
6055 neon_store_reg(rm
, pass
, tmp2
);
6058 tmp2
= neon_load_reg(rd
, pass
);
6060 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6061 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6064 neon_store_reg(rm
, pass
, tmp2
);
6066 case NEON_2RM_VRECPE
:
6067 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
6069 case NEON_2RM_VRSQRTE
:
6070 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
6072 case NEON_2RM_VRECPE_F
:
6073 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6075 case NEON_2RM_VRSQRTE_F
:
6076 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6078 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6081 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6084 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6085 gen_vfp_tosiz(0, 1);
6087 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6088 gen_vfp_touiz(0, 1);
6091 /* Reserved op values were caught by the
6092 * neon_2rm_sizes[] check earlier.
6096 if (neon_2rm_is_float_op(op
)) {
6097 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
6098 neon_reg_offset(rd
, pass
));
6100 neon_store_reg(rd
, pass
, tmp
);
6105 } else if ((insn
& (1 << 10)) == 0) {
6107 int n
= ((insn
>> 8) & 3) + 1;
6108 if ((rn
+ n
) > 32) {
6109 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6110 * helper function running off the end of the register file.
6115 if (insn
& (1 << 6)) {
6116 tmp
= neon_load_reg(rd
, 0);
6118 tmp
= tcg_temp_new_i32();
6119 tcg_gen_movi_i32(tmp
, 0);
6121 tmp2
= neon_load_reg(rm
, 0);
6122 tmp4
= tcg_const_i32(rn
);
6123 tmp5
= tcg_const_i32(n
);
6124 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
6125 tcg_temp_free_i32(tmp
);
6126 if (insn
& (1 << 6)) {
6127 tmp
= neon_load_reg(rd
, 1);
6129 tmp
= tcg_temp_new_i32();
6130 tcg_gen_movi_i32(tmp
, 0);
6132 tmp3
= neon_load_reg(rm
, 1);
6133 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
6134 tcg_temp_free_i32(tmp5
);
6135 tcg_temp_free_i32(tmp4
);
6136 neon_store_reg(rd
, 0, tmp2
);
6137 neon_store_reg(rd
, 1, tmp3
);
6138 tcg_temp_free_i32(tmp
);
6139 } else if ((insn
& 0x380) == 0) {
6141 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6144 if (insn
& (1 << 19)) {
6145 tmp
= neon_load_reg(rm
, 1);
6147 tmp
= neon_load_reg(rm
, 0);
6149 if (insn
& (1 << 16)) {
6150 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
6151 } else if (insn
& (1 << 17)) {
6152 if ((insn
>> 18) & 1)
6153 gen_neon_dup_high16(tmp
);
6155 gen_neon_dup_low16(tmp
);
6157 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6158 tmp2
= tcg_temp_new_i32();
6159 tcg_gen_mov_i32(tmp2
, tmp
);
6160 neon_store_reg(rd
, pass
, tmp2
);
6162 tcg_temp_free_i32(tmp
);
6171 static int disas_coproc_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
6173 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
6174 const ARMCPRegInfo
*ri
;
6175 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6177 cpnum
= (insn
>> 8) & 0xf;
6178 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
6179 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
6182 /* First check for coprocessor space used for actual instructions */
6186 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6187 return disas_iwmmxt_insn(env
, s
, insn
);
6188 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6189 return disas_dsp_insn(env
, s
, insn
);
6194 return disas_vfp_insn (env
, s
, insn
);
6199 /* Otherwise treat as a generic register access */
6200 is64
= (insn
& (1 << 25)) == 0;
6201 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
6209 opc1
= (insn
>> 4) & 0xf;
6211 rt2
= (insn
>> 16) & 0xf;
6213 crn
= (insn
>> 16) & 0xf;
6214 opc1
= (insn
>> 21) & 7;
6215 opc2
= (insn
>> 5) & 7;
6218 isread
= (insn
>> 20) & 1;
6219 rt
= (insn
>> 12) & 0xf;
6221 ri
= get_arm_cp_reginfo(cpu
,
6222 ENCODE_CP_REG(cpnum
, is64
, crn
, crm
, opc1
, opc2
));
6224 /* Check access permissions */
6225 if (!cp_access_ok(env
, ri
, isread
)) {
6229 /* Handle special cases first */
6230 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
6237 gen_set_pc_im(s
->pc
);
6238 s
->is_jmp
= DISAS_WFI
;
6249 if (ri
->type
& ARM_CP_CONST
) {
6250 tmp64
= tcg_const_i64(ri
->resetvalue
);
6251 } else if (ri
->readfn
) {
6253 gen_set_pc_im(s
->pc
);
6254 tmp64
= tcg_temp_new_i64();
6255 tmpptr
= tcg_const_ptr(ri
);
6256 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
6257 tcg_temp_free_ptr(tmpptr
);
6259 tmp64
= tcg_temp_new_i64();
6260 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6262 tmp
= tcg_temp_new_i32();
6263 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6264 store_reg(s
, rt
, tmp
);
6265 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6266 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6267 store_reg(s
, rt2
, tmp
);
6270 if (ri
->type
& ARM_CP_CONST
) {
6271 tmp
= tcg_const_i32(ri
->resetvalue
);
6272 } else if (ri
->readfn
) {
6274 gen_set_pc_im(s
->pc
);
6275 tmp
= tcg_temp_new_i32();
6276 tmpptr
= tcg_const_ptr(ri
);
6277 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
6278 tcg_temp_free_ptr(tmpptr
);
6280 tmp
= load_cpu_offset(ri
->fieldoffset
);
6283 /* Destination register of r15 for 32 bit loads sets
6284 * the condition codes from the high 4 bits of the value
6287 tcg_temp_free_i32(tmp
);
6289 store_reg(s
, rt
, tmp
);
6294 if (ri
->type
& ARM_CP_CONST
) {
6295 /* If not forbidden by access permissions, treat as WI */
6301 TCGv_i64 tmp64
= tcg_temp_new_i64();
6302 tmplo
= load_reg(s
, rt
);
6303 tmphi
= load_reg(s
, rt2
);
6304 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
6305 tcg_temp_free_i32(tmplo
);
6306 tcg_temp_free_i32(tmphi
);
6308 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
6309 gen_set_pc_im(s
->pc
);
6310 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
6311 tcg_temp_free_ptr(tmpptr
);
6313 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6315 tcg_temp_free_i64(tmp64
);
6320 gen_set_pc_im(s
->pc
);
6321 tmp
= load_reg(s
, rt
);
6322 tmpptr
= tcg_const_ptr(ri
);
6323 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
6324 tcg_temp_free_ptr(tmpptr
);
6325 tcg_temp_free_i32(tmp
);
6327 TCGv tmp
= load_reg(s
, rt
);
6328 store_cpu_offset(tmp
, ri
->fieldoffset
);
6331 /* We default to ending the TB on a coprocessor register write,
6332 * but allow this to be suppressed by the register definition
6333 * (usually only necessary to work around guest bugs).
6335 if (!(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
6346 /* Store a 64-bit value to a register pair. Clobbers val. */
6347 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
6350 tmp
= tcg_temp_new_i32();
6351 tcg_gen_trunc_i64_i32(tmp
, val
);
6352 store_reg(s
, rlow
, tmp
);
6353 tmp
= tcg_temp_new_i32();
6354 tcg_gen_shri_i64(val
, val
, 32);
6355 tcg_gen_trunc_i64_i32(tmp
, val
);
6356 store_reg(s
, rhigh
, tmp
);
6359 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6360 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
6365 /* Load value and extend to 64 bits. */
6366 tmp
= tcg_temp_new_i64();
6367 tmp2
= load_reg(s
, rlow
);
6368 tcg_gen_extu_i32_i64(tmp
, tmp2
);
6369 tcg_temp_free_i32(tmp2
);
6370 tcg_gen_add_i64(val
, val
, tmp
);
6371 tcg_temp_free_i64(tmp
);
6374 /* load and add a 64-bit value from a register pair. */
6375 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
6381 /* Load 64-bit value rd:rn. */
6382 tmpl
= load_reg(s
, rlow
);
6383 tmph
= load_reg(s
, rhigh
);
6384 tmp
= tcg_temp_new_i64();
6385 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
6386 tcg_temp_free_i32(tmpl
);
6387 tcg_temp_free_i32(tmph
);
6388 tcg_gen_add_i64(val
, val
, tmp
);
6389 tcg_temp_free_i64(tmp
);
6392 /* Set N and Z flags from a 64-bit value. */
6393 static void gen_logicq_cc(TCGv_i64 val
)
6395 TCGv tmp
= tcg_temp_new_i32();
6396 gen_helper_logicq_cc(tmp
, val
);
6398 tcg_temp_free_i32(tmp
);
6401 /* Load/Store exclusive instructions are implemented by remembering
6402 the value/address loaded, and seeing if these are the same
6403 when the store is performed. This should be is sufficient to implement
6404 the architecturally mandated semantics, and avoids having to monitor
6407 In system emulation mode only one CPU will be running at once, so
6408 this sequence is effectively atomic. In user emulation mode we
6409 throw an exception and handle the atomic operation elsewhere. */
6410 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6411 TCGv addr
, int size
)
6417 tmp
= gen_ld8u(addr
, IS_USER(s
));
6420 tmp
= gen_ld16u(addr
, IS_USER(s
));
6424 tmp
= gen_ld32(addr
, IS_USER(s
));
6429 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6430 store_reg(s
, rt
, tmp
);
6432 TCGv tmp2
= tcg_temp_new_i32();
6433 tcg_gen_addi_i32(tmp2
, addr
, 4);
6434 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6435 tcg_temp_free_i32(tmp2
);
6436 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6437 store_reg(s
, rt2
, tmp
);
6439 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6442 static void gen_clrex(DisasContext
*s
)
6444 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6447 #ifdef CONFIG_USER_ONLY
6448 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6449 TCGv addr
, int size
)
6451 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6452 tcg_gen_movi_i32(cpu_exclusive_info
,
6453 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6454 gen_exception_insn(s
, 4, EXCP_STREX
);
6457 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6458 TCGv addr
, int size
)
6464 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6470 fail_label
= gen_new_label();
6471 done_label
= gen_new_label();
6472 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6475 tmp
= gen_ld8u(addr
, IS_USER(s
));
6478 tmp
= gen_ld16u(addr
, IS_USER(s
));
6482 tmp
= gen_ld32(addr
, IS_USER(s
));
6487 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6488 tcg_temp_free_i32(tmp
);
6490 TCGv tmp2
= tcg_temp_new_i32();
6491 tcg_gen_addi_i32(tmp2
, addr
, 4);
6492 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6493 tcg_temp_free_i32(tmp2
);
6494 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6495 tcg_temp_free_i32(tmp
);
6497 tmp
= load_reg(s
, rt
);
6500 gen_st8(tmp
, addr
, IS_USER(s
));
6503 gen_st16(tmp
, addr
, IS_USER(s
));
6507 gen_st32(tmp
, addr
, IS_USER(s
));
6513 tcg_gen_addi_i32(addr
, addr
, 4);
6514 tmp
= load_reg(s
, rt2
);
6515 gen_st32(tmp
, addr
, IS_USER(s
));
6517 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6518 tcg_gen_br(done_label
);
6519 gen_set_label(fail_label
);
6520 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6521 gen_set_label(done_label
);
6522 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6526 static void disas_arm_insn(CPUARMState
* env
, DisasContext
*s
)
6528 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6535 insn
= arm_ldl_code(s
->pc
, s
->bswap_code
);
6538 /* M variants do not implement ARM mode. */
6543 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6544 * choose to UNDEF. In ARMv5 and above the space is used
6545 * for miscellaneous unconditional instructions.
6549 /* Unconditional instructions. */
6550 if (((insn
>> 25) & 7) == 1) {
6551 /* NEON Data processing. */
6552 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6555 if (disas_neon_data_insn(env
, s
, insn
))
6559 if ((insn
& 0x0f100000) == 0x04000000) {
6560 /* NEON load/store. */
6561 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6564 if (disas_neon_ls_insn(env
, s
, insn
))
6568 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6569 ((insn
& 0x0f30f010) == 0x0710f000)) {
6570 if ((insn
& (1 << 22)) == 0) {
6572 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6576 /* Otherwise PLD; v5TE+ */
6580 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6581 ((insn
& 0x0f70f010) == 0x0650f000)) {
6583 return; /* PLI; V7 */
6585 if (((insn
& 0x0f700000) == 0x04100000) ||
6586 ((insn
& 0x0f700010) == 0x06100000)) {
6587 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6590 return; /* v7MP: Unallocated memory hint: must NOP */
6593 if ((insn
& 0x0ffffdff) == 0x01010000) {
6596 if (((insn
>> 9) & 1) != s
->bswap_code
) {
6597 /* Dynamic endianness switching not implemented. */
6601 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6602 switch ((insn
>> 4) & 0xf) {
6611 /* We don't emulate caches so these are a no-op. */
6616 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6622 op1
= (insn
& 0x1f);
6623 addr
= tcg_temp_new_i32();
6624 tmp
= tcg_const_i32(op1
);
6625 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6626 tcg_temp_free_i32(tmp
);
6627 i
= (insn
>> 23) & 3;
6629 case 0: offset
= -4; break; /* DA */
6630 case 1: offset
= 0; break; /* IA */
6631 case 2: offset
= -8; break; /* DB */
6632 case 3: offset
= 4; break; /* IB */
6636 tcg_gen_addi_i32(addr
, addr
, offset
);
6637 tmp
= load_reg(s
, 14);
6638 gen_st32(tmp
, addr
, 0);
6639 tmp
= load_cpu_field(spsr
);
6640 tcg_gen_addi_i32(addr
, addr
, 4);
6641 gen_st32(tmp
, addr
, 0);
6642 if (insn
& (1 << 21)) {
6643 /* Base writeback. */
6645 case 0: offset
= -8; break;
6646 case 1: offset
= 4; break;
6647 case 2: offset
= -4; break;
6648 case 3: offset
= 0; break;
6652 tcg_gen_addi_i32(addr
, addr
, offset
);
6653 tmp
= tcg_const_i32(op1
);
6654 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6655 tcg_temp_free_i32(tmp
);
6656 tcg_temp_free_i32(addr
);
6658 tcg_temp_free_i32(addr
);
6661 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6667 rn
= (insn
>> 16) & 0xf;
6668 addr
= load_reg(s
, rn
);
6669 i
= (insn
>> 23) & 3;
6671 case 0: offset
= -4; break; /* DA */
6672 case 1: offset
= 0; break; /* IA */
6673 case 2: offset
= -8; break; /* DB */
6674 case 3: offset
= 4; break; /* IB */
6678 tcg_gen_addi_i32(addr
, addr
, offset
);
6679 /* Load PC into tmp and CPSR into tmp2. */
6680 tmp
= gen_ld32(addr
, 0);
6681 tcg_gen_addi_i32(addr
, addr
, 4);
6682 tmp2
= gen_ld32(addr
, 0);
6683 if (insn
& (1 << 21)) {
6684 /* Base writeback. */
6686 case 0: offset
= -8; break;
6687 case 1: offset
= 4; break;
6688 case 2: offset
= -4; break;
6689 case 3: offset
= 0; break;
6693 tcg_gen_addi_i32(addr
, addr
, offset
);
6694 store_reg(s
, rn
, addr
);
6696 tcg_temp_free_i32(addr
);
6698 gen_rfe(s
, tmp
, tmp2
);
6700 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6701 /* branch link and change to thumb (blx <offset>) */
6704 val
= (uint32_t)s
->pc
;
6705 tmp
= tcg_temp_new_i32();
6706 tcg_gen_movi_i32(tmp
, val
);
6707 store_reg(s
, 14, tmp
);
6708 /* Sign-extend the 24-bit offset */
6709 offset
= (((int32_t)insn
) << 8) >> 8;
6710 /* offset * 4 + bit24 * 2 + (thumb bit) */
6711 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6712 /* pipeline offset */
6714 /* protected by ARCH(5); above, near the start of uncond block */
6717 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6718 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6719 /* iWMMXt register transfer. */
6720 if (env
->cp15
.c15_cpar
& (1 << 1))
6721 if (!disas_iwmmxt_insn(env
, s
, insn
))
6724 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6725 /* Coprocessor double register transfer. */
6727 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6728 /* Additional coprocessor register transfer. */
6729 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6732 /* cps (privileged) */
6736 if (insn
& (1 << 19)) {
6737 if (insn
& (1 << 8))
6739 if (insn
& (1 << 7))
6741 if (insn
& (1 << 6))
6743 if (insn
& (1 << 18))
6746 if (insn
& (1 << 17)) {
6748 val
|= (insn
& 0x1f);
6751 gen_set_psr_im(s
, mask
, 0, val
);
6758 /* if not always execute, we generate a conditional jump to
6760 s
->condlabel
= gen_new_label();
6761 gen_test_cc(cond
^ 1, s
->condlabel
);
6764 if ((insn
& 0x0f900000) == 0x03000000) {
6765 if ((insn
& (1 << 21)) == 0) {
6767 rd
= (insn
>> 12) & 0xf;
6768 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6769 if ((insn
& (1 << 22)) == 0) {
6771 tmp
= tcg_temp_new_i32();
6772 tcg_gen_movi_i32(tmp
, val
);
6775 tmp
= load_reg(s
, rd
);
6776 tcg_gen_ext16u_i32(tmp
, tmp
);
6777 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6779 store_reg(s
, rd
, tmp
);
6781 if (((insn
>> 12) & 0xf) != 0xf)
6783 if (((insn
>> 16) & 0xf) == 0) {
6784 gen_nop_hint(s
, insn
& 0xff);
6786 /* CPSR = immediate */
6788 shift
= ((insn
>> 8) & 0xf) * 2;
6790 val
= (val
>> shift
) | (val
<< (32 - shift
));
6791 i
= ((insn
& (1 << 22)) != 0);
6792 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6796 } else if ((insn
& 0x0f900000) == 0x01000000
6797 && (insn
& 0x00000090) != 0x00000090) {
6798 /* miscellaneous instructions */
6799 op1
= (insn
>> 21) & 3;
6800 sh
= (insn
>> 4) & 0xf;
6803 case 0x0: /* move program status register */
6806 tmp
= load_reg(s
, rm
);
6807 i
= ((op1
& 2) != 0);
6808 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6812 rd
= (insn
>> 12) & 0xf;
6816 tmp
= load_cpu_field(spsr
);
6818 tmp
= tcg_temp_new_i32();
6819 gen_helper_cpsr_read(tmp
);
6821 store_reg(s
, rd
, tmp
);
6826 /* branch/exchange thumb (bx). */
6828 tmp
= load_reg(s
, rm
);
6830 } else if (op1
== 3) {
6833 rd
= (insn
>> 12) & 0xf;
6834 tmp
= load_reg(s
, rm
);
6835 gen_helper_clz(tmp
, tmp
);
6836 store_reg(s
, rd
, tmp
);
6844 /* Trivial implementation equivalent to bx. */
6845 tmp
= load_reg(s
, rm
);
6856 /* branch link/exchange thumb (blx) */
6857 tmp
= load_reg(s
, rm
);
6858 tmp2
= tcg_temp_new_i32();
6859 tcg_gen_movi_i32(tmp2
, s
->pc
);
6860 store_reg(s
, 14, tmp2
);
6863 case 0x5: /* saturating add/subtract */
6865 rd
= (insn
>> 12) & 0xf;
6866 rn
= (insn
>> 16) & 0xf;
6867 tmp
= load_reg(s
, rm
);
6868 tmp2
= load_reg(s
, rn
);
6870 gen_helper_double_saturate(tmp2
, tmp2
);
6872 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6874 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6875 tcg_temp_free_i32(tmp2
);
6876 store_reg(s
, rd
, tmp
);
6879 /* SMC instruction (op1 == 3)
6880 and undefined instructions (op1 == 0 || op1 == 2)
6887 gen_exception_insn(s
, 4, EXCP_BKPT
);
6889 case 0x8: /* signed multiply */
6894 rs
= (insn
>> 8) & 0xf;
6895 rn
= (insn
>> 12) & 0xf;
6896 rd
= (insn
>> 16) & 0xf;
6898 /* (32 * 16) >> 16 */
6899 tmp
= load_reg(s
, rm
);
6900 tmp2
= load_reg(s
, rs
);
6902 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6905 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6906 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6907 tmp
= tcg_temp_new_i32();
6908 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6909 tcg_temp_free_i64(tmp64
);
6910 if ((sh
& 2) == 0) {
6911 tmp2
= load_reg(s
, rn
);
6912 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6913 tcg_temp_free_i32(tmp2
);
6915 store_reg(s
, rd
, tmp
);
6918 tmp
= load_reg(s
, rm
);
6919 tmp2
= load_reg(s
, rs
);
6920 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6921 tcg_temp_free_i32(tmp2
);
6923 tmp64
= tcg_temp_new_i64();
6924 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6925 tcg_temp_free_i32(tmp
);
6926 gen_addq(s
, tmp64
, rn
, rd
);
6927 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6928 tcg_temp_free_i64(tmp64
);
6931 tmp2
= load_reg(s
, rn
);
6932 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6933 tcg_temp_free_i32(tmp2
);
6935 store_reg(s
, rd
, tmp
);
6942 } else if (((insn
& 0x0e000000) == 0 &&
6943 (insn
& 0x00000090) != 0x90) ||
6944 ((insn
& 0x0e000000) == (1 << 25))) {
6945 int set_cc
, logic_cc
, shiftop
;
6947 op1
= (insn
>> 21) & 0xf;
6948 set_cc
= (insn
>> 20) & 1;
6949 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6951 /* data processing instruction */
6952 if (insn
& (1 << 25)) {
6953 /* immediate operand */
6955 shift
= ((insn
>> 8) & 0xf) * 2;
6957 val
= (val
>> shift
) | (val
<< (32 - shift
));
6959 tmp2
= tcg_temp_new_i32();
6960 tcg_gen_movi_i32(tmp2
, val
);
6961 if (logic_cc
&& shift
) {
6962 gen_set_CF_bit31(tmp2
);
6967 tmp2
= load_reg(s
, rm
);
6968 shiftop
= (insn
>> 5) & 3;
6969 if (!(insn
& (1 << 4))) {
6970 shift
= (insn
>> 7) & 0x1f;
6971 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6973 rs
= (insn
>> 8) & 0xf;
6974 tmp
= load_reg(s
, rs
);
6975 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6978 if (op1
!= 0x0f && op1
!= 0x0d) {
6979 rn
= (insn
>> 16) & 0xf;
6980 tmp
= load_reg(s
, rn
);
6984 rd
= (insn
>> 12) & 0xf;
6987 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6991 store_reg_bx(env
, s
, rd
, tmp
);
6994 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6998 store_reg_bx(env
, s
, rd
, tmp
);
7001 if (set_cc
&& rd
== 15) {
7002 /* SUBS r15, ... is used for exception return. */
7006 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7007 gen_exception_return(s
, tmp
);
7010 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7012 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7014 store_reg_bx(env
, s
, rd
, tmp
);
7019 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
7021 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7023 store_reg_bx(env
, s
, rd
, tmp
);
7027 gen_helper_add_cc(tmp
, tmp
, tmp2
);
7029 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7031 store_reg_bx(env
, s
, rd
, tmp
);
7035 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
7037 gen_add_carry(tmp
, tmp
, tmp2
);
7039 store_reg_bx(env
, s
, rd
, tmp
);
7043 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
7045 gen_sub_carry(tmp
, tmp
, tmp2
);
7047 store_reg_bx(env
, s
, rd
, tmp
);
7051 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
7053 gen_sub_carry(tmp
, tmp2
, tmp
);
7055 store_reg_bx(env
, s
, rd
, tmp
);
7059 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7062 tcg_temp_free_i32(tmp
);
7066 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7069 tcg_temp_free_i32(tmp
);
7073 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7075 tcg_temp_free_i32(tmp
);
7079 gen_helper_add_cc(tmp
, tmp
, tmp2
);
7081 tcg_temp_free_i32(tmp
);
7084 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7088 store_reg_bx(env
, s
, rd
, tmp
);
7091 if (logic_cc
&& rd
== 15) {
7092 /* MOVS r15, ... is used for exception return. */
7096 gen_exception_return(s
, tmp2
);
7101 store_reg_bx(env
, s
, rd
, tmp2
);
7105 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
7109 store_reg_bx(env
, s
, rd
, tmp
);
7113 tcg_gen_not_i32(tmp2
, tmp2
);
7117 store_reg_bx(env
, s
, rd
, tmp2
);
7120 if (op1
!= 0x0f && op1
!= 0x0d) {
7121 tcg_temp_free_i32(tmp2
);
7124 /* other instructions */
7125 op1
= (insn
>> 24) & 0xf;
7129 /* multiplies, extra load/stores */
7130 sh
= (insn
>> 5) & 3;
7133 rd
= (insn
>> 16) & 0xf;
7134 rn
= (insn
>> 12) & 0xf;
7135 rs
= (insn
>> 8) & 0xf;
7137 op1
= (insn
>> 20) & 0xf;
7139 case 0: case 1: case 2: case 3: case 6:
7141 tmp
= load_reg(s
, rs
);
7142 tmp2
= load_reg(s
, rm
);
7143 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7144 tcg_temp_free_i32(tmp2
);
7145 if (insn
& (1 << 22)) {
7146 /* Subtract (mls) */
7148 tmp2
= load_reg(s
, rn
);
7149 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7150 tcg_temp_free_i32(tmp2
);
7151 } else if (insn
& (1 << 21)) {
7153 tmp2
= load_reg(s
, rn
);
7154 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7155 tcg_temp_free_i32(tmp2
);
7157 if (insn
& (1 << 20))
7159 store_reg(s
, rd
, tmp
);
7162 /* 64 bit mul double accumulate (UMAAL) */
7164 tmp
= load_reg(s
, rs
);
7165 tmp2
= load_reg(s
, rm
);
7166 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7167 gen_addq_lo(s
, tmp64
, rn
);
7168 gen_addq_lo(s
, tmp64
, rd
);
7169 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7170 tcg_temp_free_i64(tmp64
);
7172 case 8: case 9: case 10: case 11:
7173 case 12: case 13: case 14: case 15:
7174 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7175 tmp
= load_reg(s
, rs
);
7176 tmp2
= load_reg(s
, rm
);
7177 if (insn
& (1 << 22)) {
7178 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7180 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7182 if (insn
& (1 << 21)) { /* mult accumulate */
7183 gen_addq(s
, tmp64
, rn
, rd
);
7185 if (insn
& (1 << 20)) {
7186 gen_logicq_cc(tmp64
);
7188 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7189 tcg_temp_free_i64(tmp64
);
7195 rn
= (insn
>> 16) & 0xf;
7196 rd
= (insn
>> 12) & 0xf;
7197 if (insn
& (1 << 23)) {
7198 /* load/store exclusive */
7199 op1
= (insn
>> 21) & 0x3;
7204 addr
= tcg_temp_local_new_i32();
7205 load_reg_var(s
, addr
, rn
);
7206 if (insn
& (1 << 20)) {
7209 gen_load_exclusive(s
, rd
, 15, addr
, 2);
7211 case 1: /* ldrexd */
7212 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
7214 case 2: /* ldrexb */
7215 gen_load_exclusive(s
, rd
, 15, addr
, 0);
7217 case 3: /* ldrexh */
7218 gen_load_exclusive(s
, rd
, 15, addr
, 1);
7227 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
7229 case 1: /* strexd */
7230 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
7232 case 2: /* strexb */
7233 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
7235 case 3: /* strexh */
7236 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
7242 tcg_temp_free(addr
);
7244 /* SWP instruction */
7247 /* ??? This is not really atomic. However we know
7248 we never have multiple CPUs running in parallel,
7249 so it is good enough. */
7250 addr
= load_reg(s
, rn
);
7251 tmp
= load_reg(s
, rm
);
7252 if (insn
& (1 << 22)) {
7253 tmp2
= gen_ld8u(addr
, IS_USER(s
));
7254 gen_st8(tmp
, addr
, IS_USER(s
));
7256 tmp2
= gen_ld32(addr
, IS_USER(s
));
7257 gen_st32(tmp
, addr
, IS_USER(s
));
7259 tcg_temp_free_i32(addr
);
7260 store_reg(s
, rd
, tmp2
);
7266 /* Misc load/store */
7267 rn
= (insn
>> 16) & 0xf;
7268 rd
= (insn
>> 12) & 0xf;
7269 addr
= load_reg(s
, rn
);
7270 if (insn
& (1 << 24))
7271 gen_add_datah_offset(s
, insn
, 0, addr
);
7273 if (insn
& (1 << 20)) {
7277 tmp
= gen_ld16u(addr
, IS_USER(s
));
7280 tmp
= gen_ld8s(addr
, IS_USER(s
));
7284 tmp
= gen_ld16s(addr
, IS_USER(s
));
7288 } else if (sh
& 2) {
7293 tmp
= load_reg(s
, rd
);
7294 gen_st32(tmp
, addr
, IS_USER(s
));
7295 tcg_gen_addi_i32(addr
, addr
, 4);
7296 tmp
= load_reg(s
, rd
+ 1);
7297 gen_st32(tmp
, addr
, IS_USER(s
));
7301 tmp
= gen_ld32(addr
, IS_USER(s
));
7302 store_reg(s
, rd
, tmp
);
7303 tcg_gen_addi_i32(addr
, addr
, 4);
7304 tmp
= gen_ld32(addr
, IS_USER(s
));
7308 address_offset
= -4;
7311 tmp
= load_reg(s
, rd
);
7312 gen_st16(tmp
, addr
, IS_USER(s
));
7315 /* Perform base writeback before the loaded value to
7316 ensure correct behavior with overlapping index registers.
7317 ldrd with base writeback is is undefined if the
7318 destination and index registers overlap. */
7319 if (!(insn
& (1 << 24))) {
7320 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
7321 store_reg(s
, rn
, addr
);
7322 } else if (insn
& (1 << 21)) {
7324 tcg_gen_addi_i32(addr
, addr
, address_offset
);
7325 store_reg(s
, rn
, addr
);
7327 tcg_temp_free_i32(addr
);
7330 /* Complete the load. */
7331 store_reg(s
, rd
, tmp
);
7340 if (insn
& (1 << 4)) {
7342 /* Armv6 Media instructions. */
7344 rn
= (insn
>> 16) & 0xf;
7345 rd
= (insn
>> 12) & 0xf;
7346 rs
= (insn
>> 8) & 0xf;
7347 switch ((insn
>> 23) & 3) {
7348 case 0: /* Parallel add/subtract. */
7349 op1
= (insn
>> 20) & 7;
7350 tmp
= load_reg(s
, rn
);
7351 tmp2
= load_reg(s
, rm
);
7352 sh
= (insn
>> 5) & 7;
7353 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
7355 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
7356 tcg_temp_free_i32(tmp2
);
7357 store_reg(s
, rd
, tmp
);
7360 if ((insn
& 0x00700020) == 0) {
7361 /* Halfword pack. */
7362 tmp
= load_reg(s
, rn
);
7363 tmp2
= load_reg(s
, rm
);
7364 shift
= (insn
>> 7) & 0x1f;
7365 if (insn
& (1 << 6)) {
7369 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7370 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7371 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7375 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7376 tcg_gen_ext16u_i32(tmp
, tmp
);
7377 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7379 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7380 tcg_temp_free_i32(tmp2
);
7381 store_reg(s
, rd
, tmp
);
7382 } else if ((insn
& 0x00200020) == 0x00200000) {
7384 tmp
= load_reg(s
, rm
);
7385 shift
= (insn
>> 7) & 0x1f;
7386 if (insn
& (1 << 6)) {
7389 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7391 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7393 sh
= (insn
>> 16) & 0x1f;
7394 tmp2
= tcg_const_i32(sh
);
7395 if (insn
& (1 << 22))
7396 gen_helper_usat(tmp
, tmp
, tmp2
);
7398 gen_helper_ssat(tmp
, tmp
, tmp2
);
7399 tcg_temp_free_i32(tmp2
);
7400 store_reg(s
, rd
, tmp
);
7401 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
7403 tmp
= load_reg(s
, rm
);
7404 sh
= (insn
>> 16) & 0x1f;
7405 tmp2
= tcg_const_i32(sh
);
7406 if (insn
& (1 << 22))
7407 gen_helper_usat16(tmp
, tmp
, tmp2
);
7409 gen_helper_ssat16(tmp
, tmp
, tmp2
);
7410 tcg_temp_free_i32(tmp2
);
7411 store_reg(s
, rd
, tmp
);
7412 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
7414 tmp
= load_reg(s
, rn
);
7415 tmp2
= load_reg(s
, rm
);
7416 tmp3
= tcg_temp_new_i32();
7417 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
7418 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7419 tcg_temp_free_i32(tmp3
);
7420 tcg_temp_free_i32(tmp2
);
7421 store_reg(s
, rd
, tmp
);
7422 } else if ((insn
& 0x000003e0) == 0x00000060) {
7423 tmp
= load_reg(s
, rm
);
7424 shift
= (insn
>> 10) & 3;
7425 /* ??? In many cases it's not necessary to do a
7426 rotate, a shift is sufficient. */
7428 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7429 op1
= (insn
>> 20) & 7;
7431 case 0: gen_sxtb16(tmp
); break;
7432 case 2: gen_sxtb(tmp
); break;
7433 case 3: gen_sxth(tmp
); break;
7434 case 4: gen_uxtb16(tmp
); break;
7435 case 6: gen_uxtb(tmp
); break;
7436 case 7: gen_uxth(tmp
); break;
7437 default: goto illegal_op
;
7440 tmp2
= load_reg(s
, rn
);
7441 if ((op1
& 3) == 0) {
7442 gen_add16(tmp
, tmp2
);
7444 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7445 tcg_temp_free_i32(tmp2
);
7448 store_reg(s
, rd
, tmp
);
7449 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7451 tmp
= load_reg(s
, rm
);
7452 if (insn
& (1 << 22)) {
7453 if (insn
& (1 << 7)) {
7457 gen_helper_rbit(tmp
, tmp
);
7460 if (insn
& (1 << 7))
7463 tcg_gen_bswap32_i32(tmp
, tmp
);
7465 store_reg(s
, rd
, tmp
);
7470 case 2: /* Multiplies (Type 3). */
7471 switch ((insn
>> 20) & 0x7) {
7473 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
7474 /* op2 not 00x or 11x : UNDEF */
7477 /* Signed multiply most significant [accumulate].
7478 (SMMUL, SMMLA, SMMLS) */
7479 tmp
= load_reg(s
, rm
);
7480 tmp2
= load_reg(s
, rs
);
7481 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7484 tmp
= load_reg(s
, rd
);
7485 if (insn
& (1 << 6)) {
7486 tmp64
= gen_subq_msw(tmp64
, tmp
);
7488 tmp64
= gen_addq_msw(tmp64
, tmp
);
7491 if (insn
& (1 << 5)) {
7492 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7494 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7495 tmp
= tcg_temp_new_i32();
7496 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7497 tcg_temp_free_i64(tmp64
);
7498 store_reg(s
, rn
, tmp
);
7502 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7503 if (insn
& (1 << 7)) {
7506 tmp
= load_reg(s
, rm
);
7507 tmp2
= load_reg(s
, rs
);
7508 if (insn
& (1 << 5))
7509 gen_swap_half(tmp2
);
7510 gen_smul_dual(tmp
, tmp2
);
7511 if (insn
& (1 << 6)) {
7512 /* This subtraction cannot overflow. */
7513 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7515 /* This addition cannot overflow 32 bits;
7516 * however it may overflow considered as a signed
7517 * operation, in which case we must set the Q flag.
7519 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7521 tcg_temp_free_i32(tmp2
);
7522 if (insn
& (1 << 22)) {
7523 /* smlald, smlsld */
7524 tmp64
= tcg_temp_new_i64();
7525 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7526 tcg_temp_free_i32(tmp
);
7527 gen_addq(s
, tmp64
, rd
, rn
);
7528 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7529 tcg_temp_free_i64(tmp64
);
7531 /* smuad, smusd, smlad, smlsd */
7534 tmp2
= load_reg(s
, rd
);
7535 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7536 tcg_temp_free_i32(tmp2
);
7538 store_reg(s
, rn
, tmp
);
7544 if (!arm_feature(env
, ARM_FEATURE_ARM_DIV
)) {
7547 if (((insn
>> 5) & 7) || (rd
!= 15)) {
7550 tmp
= load_reg(s
, rm
);
7551 tmp2
= load_reg(s
, rs
);
7552 if (insn
& (1 << 21)) {
7553 gen_helper_udiv(tmp
, tmp
, tmp2
);
7555 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7557 tcg_temp_free_i32(tmp2
);
7558 store_reg(s
, rn
, tmp
);
7565 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7567 case 0: /* Unsigned sum of absolute differences. */
7569 tmp
= load_reg(s
, rm
);
7570 tmp2
= load_reg(s
, rs
);
7571 gen_helper_usad8(tmp
, tmp
, tmp2
);
7572 tcg_temp_free_i32(tmp2
);
7574 tmp2
= load_reg(s
, rd
);
7575 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7576 tcg_temp_free_i32(tmp2
);
7578 store_reg(s
, rn
, tmp
);
7580 case 0x20: case 0x24: case 0x28: case 0x2c:
7581 /* Bitfield insert/clear. */
7583 shift
= (insn
>> 7) & 0x1f;
7584 i
= (insn
>> 16) & 0x1f;
7587 tmp
= tcg_temp_new_i32();
7588 tcg_gen_movi_i32(tmp
, 0);
7590 tmp
= load_reg(s
, rm
);
7593 tmp2
= load_reg(s
, rd
);
7594 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7595 tcg_temp_free_i32(tmp2
);
7597 store_reg(s
, rd
, tmp
);
7599 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7600 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7602 tmp
= load_reg(s
, rm
);
7603 shift
= (insn
>> 7) & 0x1f;
7604 i
= ((insn
>> 16) & 0x1f) + 1;
7609 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7611 gen_sbfx(tmp
, shift
, i
);
7614 store_reg(s
, rd
, tmp
);
7624 /* Check for undefined extension instructions
7625 * per the ARM Bible IE:
7626 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7628 sh
= (0xf << 20) | (0xf << 4);
7629 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7633 /* load/store byte/word */
7634 rn
= (insn
>> 16) & 0xf;
7635 rd
= (insn
>> 12) & 0xf;
7636 tmp2
= load_reg(s
, rn
);
7637 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7638 if (insn
& (1 << 24))
7639 gen_add_data_offset(s
, insn
, tmp2
);
7640 if (insn
& (1 << 20)) {
7642 if (insn
& (1 << 22)) {
7643 tmp
= gen_ld8u(tmp2
, i
);
7645 tmp
= gen_ld32(tmp2
, i
);
7649 tmp
= load_reg(s
, rd
);
7650 if (insn
& (1 << 22))
7651 gen_st8(tmp
, tmp2
, i
);
7653 gen_st32(tmp
, tmp2
, i
);
7655 if (!(insn
& (1 << 24))) {
7656 gen_add_data_offset(s
, insn
, tmp2
);
7657 store_reg(s
, rn
, tmp2
);
7658 } else if (insn
& (1 << 21)) {
7659 store_reg(s
, rn
, tmp2
);
7661 tcg_temp_free_i32(tmp2
);
7663 if (insn
& (1 << 20)) {
7664 /* Complete the load. */
7665 store_reg_from_load(env
, s
, rd
, tmp
);
7671 int j
, n
, user
, loaded_base
;
7673 /* load/store multiple words */
7674 /* XXX: store correct base if write back */
7676 if (insn
& (1 << 22)) {
7678 goto illegal_op
; /* only usable in supervisor mode */
7680 if ((insn
& (1 << 15)) == 0)
7683 rn
= (insn
>> 16) & 0xf;
7684 addr
= load_reg(s
, rn
);
7686 /* compute total size */
7688 TCGV_UNUSED(loaded_var
);
7691 if (insn
& (1 << i
))
7694 /* XXX: test invalid n == 0 case ? */
7695 if (insn
& (1 << 23)) {
7696 if (insn
& (1 << 24)) {
7698 tcg_gen_addi_i32(addr
, addr
, 4);
7700 /* post increment */
7703 if (insn
& (1 << 24)) {
7705 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7707 /* post decrement */
7709 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7714 if (insn
& (1 << i
)) {
7715 if (insn
& (1 << 20)) {
7717 tmp
= gen_ld32(addr
, IS_USER(s
));
7719 tmp2
= tcg_const_i32(i
);
7720 gen_helper_set_user_reg(tmp2
, tmp
);
7721 tcg_temp_free_i32(tmp2
);
7722 tcg_temp_free_i32(tmp
);
7723 } else if (i
== rn
) {
7727 store_reg_from_load(env
, s
, i
, tmp
);
7732 /* special case: r15 = PC + 8 */
7733 val
= (long)s
->pc
+ 4;
7734 tmp
= tcg_temp_new_i32();
7735 tcg_gen_movi_i32(tmp
, val
);
7737 tmp
= tcg_temp_new_i32();
7738 tmp2
= tcg_const_i32(i
);
7739 gen_helper_get_user_reg(tmp
, tmp2
);
7740 tcg_temp_free_i32(tmp2
);
7742 tmp
= load_reg(s
, i
);
7744 gen_st32(tmp
, addr
, IS_USER(s
));
7747 /* no need to add after the last transfer */
7749 tcg_gen_addi_i32(addr
, addr
, 4);
7752 if (insn
& (1 << 21)) {
7754 if (insn
& (1 << 23)) {
7755 if (insn
& (1 << 24)) {
7758 /* post increment */
7759 tcg_gen_addi_i32(addr
, addr
, 4);
7762 if (insn
& (1 << 24)) {
7765 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7767 /* post decrement */
7768 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7771 store_reg(s
, rn
, addr
);
7773 tcg_temp_free_i32(addr
);
7776 store_reg(s
, rn
, loaded_var
);
7778 if ((insn
& (1 << 22)) && !user
) {
7779 /* Restore CPSR from SPSR. */
7780 tmp
= load_cpu_field(spsr
);
7781 gen_set_cpsr(tmp
, 0xffffffff);
7782 tcg_temp_free_i32(tmp
);
7783 s
->is_jmp
= DISAS_UPDATE
;
7792 /* branch (and link) */
7793 val
= (int32_t)s
->pc
;
7794 if (insn
& (1 << 24)) {
7795 tmp
= tcg_temp_new_i32();
7796 tcg_gen_movi_i32(tmp
, val
);
7797 store_reg(s
, 14, tmp
);
7799 offset
= (((int32_t)insn
<< 8) >> 8);
7800 val
+= (offset
<< 2) + 4;
7808 if (disas_coproc_insn(env
, s
, insn
))
7813 gen_set_pc_im(s
->pc
);
7814 s
->is_jmp
= DISAS_SWI
;
7818 gen_exception_insn(s
, 4, EXCP_UDEF
);
7824 /* Return true if this is a Thumb-2 logical op. */
7826 thumb2_logic_op(int op
)
7831 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7832 then set condition code flags based on the result of the operation.
7833 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7834 to the high bit of T1.
7835 Returns zero if the opcode is valid. */
7838 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7845 tcg_gen_and_i32(t0
, t0
, t1
);
7849 tcg_gen_andc_i32(t0
, t0
, t1
);
7853 tcg_gen_or_i32(t0
, t0
, t1
);
7857 tcg_gen_orc_i32(t0
, t0
, t1
);
7861 tcg_gen_xor_i32(t0
, t0
, t1
);
7866 gen_helper_add_cc(t0
, t0
, t1
);
7868 tcg_gen_add_i32(t0
, t0
, t1
);
7872 gen_helper_adc_cc(t0
, t0
, t1
);
7878 gen_helper_sbc_cc(t0
, t0
, t1
);
7880 gen_sub_carry(t0
, t0
, t1
);
7884 gen_helper_sub_cc(t0
, t0
, t1
);
7886 tcg_gen_sub_i32(t0
, t0
, t1
);
7890 gen_helper_sub_cc(t0
, t1
, t0
);
7892 tcg_gen_sub_i32(t0
, t1
, t0
);
7894 default: /* 5, 6, 7, 9, 12, 15. */
7900 gen_set_CF_bit31(t1
);
7905 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7907 static int disas_thumb2_insn(CPUARMState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7909 uint32_t insn
, imm
, shift
, offset
;
7910 uint32_t rd
, rn
, rm
, rs
;
7921 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7922 || arm_feature (env
, ARM_FEATURE_M
))) {
7923 /* Thumb-1 cores may need to treat bl and blx as a pair of
7924 16-bit instructions to get correct prefetch abort behavior. */
7926 if ((insn
& (1 << 12)) == 0) {
7928 /* Second half of blx. */
7929 offset
= ((insn
& 0x7ff) << 1);
7930 tmp
= load_reg(s
, 14);
7931 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7932 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7934 tmp2
= tcg_temp_new_i32();
7935 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7936 store_reg(s
, 14, tmp2
);
7940 if (insn
& (1 << 11)) {
7941 /* Second half of bl. */
7942 offset
= ((insn
& 0x7ff) << 1) | 1;
7943 tmp
= load_reg(s
, 14);
7944 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7946 tmp2
= tcg_temp_new_i32();
7947 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7948 store_reg(s
, 14, tmp2
);
7952 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7953 /* Instruction spans a page boundary. Implement it as two
7954 16-bit instructions in case the second half causes an
7956 offset
= ((int32_t)insn
<< 21) >> 9;
7957 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7960 /* Fall through to 32-bit decode. */
7963 insn
= arm_lduw_code(s
->pc
, s
->bswap_code
);
7965 insn
|= (uint32_t)insn_hw1
<< 16;
7967 if ((insn
& 0xf800e800) != 0xf000e800) {
7971 rn
= (insn
>> 16) & 0xf;
7972 rs
= (insn
>> 12) & 0xf;
7973 rd
= (insn
>> 8) & 0xf;
7975 switch ((insn
>> 25) & 0xf) {
7976 case 0: case 1: case 2: case 3:
7977 /* 16-bit instructions. Should never happen. */
7980 if (insn
& (1 << 22)) {
7981 /* Other load/store, table branch. */
7982 if (insn
& 0x01200000) {
7983 /* Load/store doubleword. */
7985 addr
= tcg_temp_new_i32();
7986 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7988 addr
= load_reg(s
, rn
);
7990 offset
= (insn
& 0xff) * 4;
7991 if ((insn
& (1 << 23)) == 0)
7993 if (insn
& (1 << 24)) {
7994 tcg_gen_addi_i32(addr
, addr
, offset
);
7997 if (insn
& (1 << 20)) {
7999 tmp
= gen_ld32(addr
, IS_USER(s
));
8000 store_reg(s
, rs
, tmp
);
8001 tcg_gen_addi_i32(addr
, addr
, 4);
8002 tmp
= gen_ld32(addr
, IS_USER(s
));
8003 store_reg(s
, rd
, tmp
);
8006 tmp
= load_reg(s
, rs
);
8007 gen_st32(tmp
, addr
, IS_USER(s
));
8008 tcg_gen_addi_i32(addr
, addr
, 4);
8009 tmp
= load_reg(s
, rd
);
8010 gen_st32(tmp
, addr
, IS_USER(s
));
8012 if (insn
& (1 << 21)) {
8013 /* Base writeback. */
8016 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
8017 store_reg(s
, rn
, addr
);
8019 tcg_temp_free_i32(addr
);
8021 } else if ((insn
& (1 << 23)) == 0) {
8022 /* Load/store exclusive word. */
8023 addr
= tcg_temp_local_new();
8024 load_reg_var(s
, addr
, rn
);
8025 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
8026 if (insn
& (1 << 20)) {
8027 gen_load_exclusive(s
, rs
, 15, addr
, 2);
8029 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
8031 tcg_temp_free(addr
);
8032 } else if ((insn
& (1 << 6)) == 0) {
8035 addr
= tcg_temp_new_i32();
8036 tcg_gen_movi_i32(addr
, s
->pc
);
8038 addr
= load_reg(s
, rn
);
8040 tmp
= load_reg(s
, rm
);
8041 tcg_gen_add_i32(addr
, addr
, tmp
);
8042 if (insn
& (1 << 4)) {
8044 tcg_gen_add_i32(addr
, addr
, tmp
);
8045 tcg_temp_free_i32(tmp
);
8046 tmp
= gen_ld16u(addr
, IS_USER(s
));
8048 tcg_temp_free_i32(tmp
);
8049 tmp
= gen_ld8u(addr
, IS_USER(s
));
8051 tcg_temp_free_i32(addr
);
8052 tcg_gen_shli_i32(tmp
, tmp
, 1);
8053 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
8054 store_reg(s
, 15, tmp
);
8056 /* Load/store exclusive byte/halfword/doubleword. */
8058 op
= (insn
>> 4) & 0x3;
8062 addr
= tcg_temp_local_new();
8063 load_reg_var(s
, addr
, rn
);
8064 if (insn
& (1 << 20)) {
8065 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
8067 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
8069 tcg_temp_free(addr
);
8072 /* Load/store multiple, RFE, SRS. */
8073 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
8074 /* Not available in user mode. */
8077 if (insn
& (1 << 20)) {
8079 addr
= load_reg(s
, rn
);
8080 if ((insn
& (1 << 24)) == 0)
8081 tcg_gen_addi_i32(addr
, addr
, -8);
8082 /* Load PC into tmp and CPSR into tmp2. */
8083 tmp
= gen_ld32(addr
, 0);
8084 tcg_gen_addi_i32(addr
, addr
, 4);
8085 tmp2
= gen_ld32(addr
, 0);
8086 if (insn
& (1 << 21)) {
8087 /* Base writeback. */
8088 if (insn
& (1 << 24)) {
8089 tcg_gen_addi_i32(addr
, addr
, 4);
8091 tcg_gen_addi_i32(addr
, addr
, -4);
8093 store_reg(s
, rn
, addr
);
8095 tcg_temp_free_i32(addr
);
8097 gen_rfe(s
, tmp
, tmp2
);
8101 addr
= tcg_temp_new_i32();
8102 tmp
= tcg_const_i32(op
);
8103 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
8104 tcg_temp_free_i32(tmp
);
8105 if ((insn
& (1 << 24)) == 0) {
8106 tcg_gen_addi_i32(addr
, addr
, -8);
8108 tmp
= load_reg(s
, 14);
8109 gen_st32(tmp
, addr
, 0);
8110 tcg_gen_addi_i32(addr
, addr
, 4);
8111 tmp
= tcg_temp_new_i32();
8112 gen_helper_cpsr_read(tmp
);
8113 gen_st32(tmp
, addr
, 0);
8114 if (insn
& (1 << 21)) {
8115 if ((insn
& (1 << 24)) == 0) {
8116 tcg_gen_addi_i32(addr
, addr
, -4);
8118 tcg_gen_addi_i32(addr
, addr
, 4);
8120 tmp
= tcg_const_i32(op
);
8121 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
8122 tcg_temp_free_i32(tmp
);
8124 tcg_temp_free_i32(addr
);
8128 int i
, loaded_base
= 0;
8130 /* Load/store multiple. */
8131 addr
= load_reg(s
, rn
);
8133 for (i
= 0; i
< 16; i
++) {
8134 if (insn
& (1 << i
))
8137 if (insn
& (1 << 24)) {
8138 tcg_gen_addi_i32(addr
, addr
, -offset
);
8141 TCGV_UNUSED(loaded_var
);
8142 for (i
= 0; i
< 16; i
++) {
8143 if ((insn
& (1 << i
)) == 0)
8145 if (insn
& (1 << 20)) {
8147 tmp
= gen_ld32(addr
, IS_USER(s
));
8150 } else if (i
== rn
) {
8154 store_reg(s
, i
, tmp
);
8158 tmp
= load_reg(s
, i
);
8159 gen_st32(tmp
, addr
, IS_USER(s
));
8161 tcg_gen_addi_i32(addr
, addr
, 4);
8164 store_reg(s
, rn
, loaded_var
);
8166 if (insn
& (1 << 21)) {
8167 /* Base register writeback. */
8168 if (insn
& (1 << 24)) {
8169 tcg_gen_addi_i32(addr
, addr
, -offset
);
8171 /* Fault if writeback register is in register list. */
8172 if (insn
& (1 << rn
))
8174 store_reg(s
, rn
, addr
);
8176 tcg_temp_free_i32(addr
);
8183 op
= (insn
>> 21) & 0xf;
8185 /* Halfword pack. */
8186 tmp
= load_reg(s
, rn
);
8187 tmp2
= load_reg(s
, rm
);
8188 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
8189 if (insn
& (1 << 5)) {
8193 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8194 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8195 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8199 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8200 tcg_gen_ext16u_i32(tmp
, tmp
);
8201 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8203 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8204 tcg_temp_free_i32(tmp2
);
8205 store_reg(s
, rd
, tmp
);
8207 /* Data processing register constant shift. */
8209 tmp
= tcg_temp_new_i32();
8210 tcg_gen_movi_i32(tmp
, 0);
8212 tmp
= load_reg(s
, rn
);
8214 tmp2
= load_reg(s
, rm
);
8216 shiftop
= (insn
>> 4) & 3;
8217 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8218 conds
= (insn
& (1 << 20)) != 0;
8219 logic_cc
= (conds
&& thumb2_logic_op(op
));
8220 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8221 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
8223 tcg_temp_free_i32(tmp2
);
8225 store_reg(s
, rd
, tmp
);
8227 tcg_temp_free_i32(tmp
);
8231 case 13: /* Misc data processing. */
8232 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
8233 if (op
< 4 && (insn
& 0xf000) != 0xf000)
8236 case 0: /* Register controlled shift. */
8237 tmp
= load_reg(s
, rn
);
8238 tmp2
= load_reg(s
, rm
);
8239 if ((insn
& 0x70) != 0)
8241 op
= (insn
>> 21) & 3;
8242 logic_cc
= (insn
& (1 << 20)) != 0;
8243 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
8246 store_reg_bx(env
, s
, rd
, tmp
);
8248 case 1: /* Sign/zero extend. */
8249 tmp
= load_reg(s
, rm
);
8250 shift
= (insn
>> 4) & 3;
8251 /* ??? In many cases it's not necessary to do a
8252 rotate, a shift is sufficient. */
8254 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8255 op
= (insn
>> 20) & 7;
8257 case 0: gen_sxth(tmp
); break;
8258 case 1: gen_uxth(tmp
); break;
8259 case 2: gen_sxtb16(tmp
); break;
8260 case 3: gen_uxtb16(tmp
); break;
8261 case 4: gen_sxtb(tmp
); break;
8262 case 5: gen_uxtb(tmp
); break;
8263 default: goto illegal_op
;
8266 tmp2
= load_reg(s
, rn
);
8267 if ((op
>> 1) == 1) {
8268 gen_add16(tmp
, tmp2
);
8270 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8271 tcg_temp_free_i32(tmp2
);
8274 store_reg(s
, rd
, tmp
);
8276 case 2: /* SIMD add/subtract. */
8277 op
= (insn
>> 20) & 7;
8278 shift
= (insn
>> 4) & 7;
8279 if ((op
& 3) == 3 || (shift
& 3) == 3)
8281 tmp
= load_reg(s
, rn
);
8282 tmp2
= load_reg(s
, rm
);
8283 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
8284 tcg_temp_free_i32(tmp2
);
8285 store_reg(s
, rd
, tmp
);
8287 case 3: /* Other data processing. */
8288 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
8290 /* Saturating add/subtract. */
8291 tmp
= load_reg(s
, rn
);
8292 tmp2
= load_reg(s
, rm
);
8294 gen_helper_double_saturate(tmp
, tmp
);
8296 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
8298 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
8299 tcg_temp_free_i32(tmp2
);
8301 tmp
= load_reg(s
, rn
);
8303 case 0x0a: /* rbit */
8304 gen_helper_rbit(tmp
, tmp
);
8306 case 0x08: /* rev */
8307 tcg_gen_bswap32_i32(tmp
, tmp
);
8309 case 0x09: /* rev16 */
8312 case 0x0b: /* revsh */
8315 case 0x10: /* sel */
8316 tmp2
= load_reg(s
, rm
);
8317 tmp3
= tcg_temp_new_i32();
8318 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
8319 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8320 tcg_temp_free_i32(tmp3
);
8321 tcg_temp_free_i32(tmp2
);
8323 case 0x18: /* clz */
8324 gen_helper_clz(tmp
, tmp
);
8330 store_reg(s
, rd
, tmp
);
8332 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8333 op
= (insn
>> 4) & 0xf;
8334 tmp
= load_reg(s
, rn
);
8335 tmp2
= load_reg(s
, rm
);
8336 switch ((insn
>> 20) & 7) {
8337 case 0: /* 32 x 32 -> 32 */
8338 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8339 tcg_temp_free_i32(tmp2
);
8341 tmp2
= load_reg(s
, rs
);
8343 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8345 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8346 tcg_temp_free_i32(tmp2
);
8349 case 1: /* 16 x 16 -> 32 */
8350 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8351 tcg_temp_free_i32(tmp2
);
8353 tmp2
= load_reg(s
, rs
);
8354 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8355 tcg_temp_free_i32(tmp2
);
8358 case 2: /* Dual multiply add. */
8359 case 4: /* Dual multiply subtract. */
8361 gen_swap_half(tmp2
);
8362 gen_smul_dual(tmp
, tmp2
);
8363 if (insn
& (1 << 22)) {
8364 /* This subtraction cannot overflow. */
8365 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8367 /* This addition cannot overflow 32 bits;
8368 * however it may overflow considered as a signed
8369 * operation, in which case we must set the Q flag.
8371 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8373 tcg_temp_free_i32(tmp2
);
8376 tmp2
= load_reg(s
, rs
);
8377 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8378 tcg_temp_free_i32(tmp2
);
8381 case 3: /* 32 * 16 -> 32msb */
8383 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8386 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8387 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8388 tmp
= tcg_temp_new_i32();
8389 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8390 tcg_temp_free_i64(tmp64
);
8393 tmp2
= load_reg(s
, rs
);
8394 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8395 tcg_temp_free_i32(tmp2
);
8398 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8399 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8401 tmp
= load_reg(s
, rs
);
8402 if (insn
& (1 << 20)) {
8403 tmp64
= gen_addq_msw(tmp64
, tmp
);
8405 tmp64
= gen_subq_msw(tmp64
, tmp
);
8408 if (insn
& (1 << 4)) {
8409 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8411 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8412 tmp
= tcg_temp_new_i32();
8413 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8414 tcg_temp_free_i64(tmp64
);
8416 case 7: /* Unsigned sum of absolute differences. */
8417 gen_helper_usad8(tmp
, tmp
, tmp2
);
8418 tcg_temp_free_i32(tmp2
);
8420 tmp2
= load_reg(s
, rs
);
8421 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8422 tcg_temp_free_i32(tmp2
);
8426 store_reg(s
, rd
, tmp
);
8428 case 6: case 7: /* 64-bit multiply, Divide. */
8429 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
8430 tmp
= load_reg(s
, rn
);
8431 tmp2
= load_reg(s
, rm
);
8432 if ((op
& 0x50) == 0x10) {
8434 if (!arm_feature(env
, ARM_FEATURE_THUMB_DIV
)) {
8438 gen_helper_udiv(tmp
, tmp
, tmp2
);
8440 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8441 tcg_temp_free_i32(tmp2
);
8442 store_reg(s
, rd
, tmp
);
8443 } else if ((op
& 0xe) == 0xc) {
8444 /* Dual multiply accumulate long. */
8446 gen_swap_half(tmp2
);
8447 gen_smul_dual(tmp
, tmp2
);
8449 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8451 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8453 tcg_temp_free_i32(tmp2
);
8455 tmp64
= tcg_temp_new_i64();
8456 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8457 tcg_temp_free_i32(tmp
);
8458 gen_addq(s
, tmp64
, rs
, rd
);
8459 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8460 tcg_temp_free_i64(tmp64
);
8463 /* Unsigned 64-bit multiply */
8464 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8468 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8469 tcg_temp_free_i32(tmp2
);
8470 tmp64
= tcg_temp_new_i64();
8471 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8472 tcg_temp_free_i32(tmp
);
8474 /* Signed 64-bit multiply */
8475 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8480 gen_addq_lo(s
, tmp64
, rs
);
8481 gen_addq_lo(s
, tmp64
, rd
);
8482 } else if (op
& 0x40) {
8483 /* 64-bit accumulate. */
8484 gen_addq(s
, tmp64
, rs
, rd
);
8486 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8487 tcg_temp_free_i64(tmp64
);
8492 case 6: case 7: case 14: case 15:
8494 if (((insn
>> 24) & 3) == 3) {
8495 /* Translate into the equivalent ARM encoding. */
8496 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8497 if (disas_neon_data_insn(env
, s
, insn
))
8500 if (insn
& (1 << 28))
8502 if (disas_coproc_insn (env
, s
, insn
))
8506 case 8: case 9: case 10: case 11:
8507 if (insn
& (1 << 15)) {
8508 /* Branches, misc control. */
8509 if (insn
& 0x5000) {
8510 /* Unconditional branch. */
8511 /* signextend(hw1[10:0]) -> offset[:12]. */
8512 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8513 /* hw1[10:0] -> offset[11:1]. */
8514 offset
|= (insn
& 0x7ff) << 1;
8515 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8516 offset[24:22] already have the same value because of the
8517 sign extension above. */
8518 offset
^= ((~insn
) & (1 << 13)) << 10;
8519 offset
^= ((~insn
) & (1 << 11)) << 11;
8521 if (insn
& (1 << 14)) {
8522 /* Branch and link. */
8523 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8527 if (insn
& (1 << 12)) {
8532 offset
&= ~(uint32_t)2;
8533 /* thumb2 bx, no need to check */
8534 gen_bx_im(s
, offset
);
8536 } else if (((insn
>> 23) & 7) == 7) {
8538 if (insn
& (1 << 13))
8541 if (insn
& (1 << 26)) {
8542 /* Secure monitor call (v6Z) */
8543 goto illegal_op
; /* not implemented. */
8545 op
= (insn
>> 20) & 7;
8547 case 0: /* msr cpsr. */
8549 tmp
= load_reg(s
, rn
);
8550 addr
= tcg_const_i32(insn
& 0xff);
8551 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8552 tcg_temp_free_i32(addr
);
8553 tcg_temp_free_i32(tmp
);
8558 case 1: /* msr spsr. */
8561 tmp
= load_reg(s
, rn
);
8563 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8567 case 2: /* cps, nop-hint. */
8568 if (((insn
>> 8) & 7) == 0) {
8569 gen_nop_hint(s
, insn
& 0xff);
8571 /* Implemented as NOP in user mode. */
8576 if (insn
& (1 << 10)) {
8577 if (insn
& (1 << 7))
8579 if (insn
& (1 << 6))
8581 if (insn
& (1 << 5))
8583 if (insn
& (1 << 9))
8584 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8586 if (insn
& (1 << 8)) {
8588 imm
|= (insn
& 0x1f);
8591 gen_set_psr_im(s
, offset
, 0, imm
);
8594 case 3: /* Special control operations. */
8596 op
= (insn
>> 4) & 0xf;
8604 /* These execute as NOPs. */
8611 /* Trivial implementation equivalent to bx. */
8612 tmp
= load_reg(s
, rn
);
8615 case 5: /* Exception return. */
8619 if (rn
!= 14 || rd
!= 15) {
8622 tmp
= load_reg(s
, rn
);
8623 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8624 gen_exception_return(s
, tmp
);
8626 case 6: /* mrs cpsr. */
8627 tmp
= tcg_temp_new_i32();
8629 addr
= tcg_const_i32(insn
& 0xff);
8630 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8631 tcg_temp_free_i32(addr
);
8633 gen_helper_cpsr_read(tmp
);
8635 store_reg(s
, rd
, tmp
);
8637 case 7: /* mrs spsr. */
8638 /* Not accessible in user mode. */
8639 if (IS_USER(s
) || IS_M(env
))
8641 tmp
= load_cpu_field(spsr
);
8642 store_reg(s
, rd
, tmp
);
8647 /* Conditional branch. */
8648 op
= (insn
>> 22) & 0xf;
8649 /* Generate a conditional jump to next instruction. */
8650 s
->condlabel
= gen_new_label();
8651 gen_test_cc(op
^ 1, s
->condlabel
);
8654 /* offset[11:1] = insn[10:0] */
8655 offset
= (insn
& 0x7ff) << 1;
8656 /* offset[17:12] = insn[21:16]. */
8657 offset
|= (insn
& 0x003f0000) >> 4;
8658 /* offset[31:20] = insn[26]. */
8659 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8660 /* offset[18] = insn[13]. */
8661 offset
|= (insn
& (1 << 13)) << 5;
8662 /* offset[19] = insn[11]. */
8663 offset
|= (insn
& (1 << 11)) << 8;
8665 /* jump to the offset */
8666 gen_jmp(s
, s
->pc
+ offset
);
8669 /* Data processing immediate. */
8670 if (insn
& (1 << 25)) {
8671 if (insn
& (1 << 24)) {
8672 if (insn
& (1 << 20))
8674 /* Bitfield/Saturate. */
8675 op
= (insn
>> 21) & 7;
8677 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8679 tmp
= tcg_temp_new_i32();
8680 tcg_gen_movi_i32(tmp
, 0);
8682 tmp
= load_reg(s
, rn
);
8685 case 2: /* Signed bitfield extract. */
8687 if (shift
+ imm
> 32)
8690 gen_sbfx(tmp
, shift
, imm
);
8692 case 6: /* Unsigned bitfield extract. */
8694 if (shift
+ imm
> 32)
8697 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8699 case 3: /* Bitfield insert/clear. */
8702 imm
= imm
+ 1 - shift
;
8704 tmp2
= load_reg(s
, rd
);
8705 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8706 tcg_temp_free_i32(tmp2
);
8711 default: /* Saturate. */
8714 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8716 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8718 tmp2
= tcg_const_i32(imm
);
8721 if ((op
& 1) && shift
== 0)
8722 gen_helper_usat16(tmp
, tmp
, tmp2
);
8724 gen_helper_usat(tmp
, tmp
, tmp2
);
8727 if ((op
& 1) && shift
== 0)
8728 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8730 gen_helper_ssat(tmp
, tmp
, tmp2
);
8732 tcg_temp_free_i32(tmp2
);
8735 store_reg(s
, rd
, tmp
);
8737 imm
= ((insn
& 0x04000000) >> 15)
8738 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8739 if (insn
& (1 << 22)) {
8740 /* 16-bit immediate. */
8741 imm
|= (insn
>> 4) & 0xf000;
8742 if (insn
& (1 << 23)) {
8744 tmp
= load_reg(s
, rd
);
8745 tcg_gen_ext16u_i32(tmp
, tmp
);
8746 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8749 tmp
= tcg_temp_new_i32();
8750 tcg_gen_movi_i32(tmp
, imm
);
8753 /* Add/sub 12-bit immediate. */
8755 offset
= s
->pc
& ~(uint32_t)3;
8756 if (insn
& (1 << 23))
8760 tmp
= tcg_temp_new_i32();
8761 tcg_gen_movi_i32(tmp
, offset
);
8763 tmp
= load_reg(s
, rn
);
8764 if (insn
& (1 << 23))
8765 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8767 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8770 store_reg(s
, rd
, tmp
);
8773 int shifter_out
= 0;
8774 /* modified 12-bit immediate. */
8775 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8776 imm
= (insn
& 0xff);
8779 /* Nothing to do. */
8781 case 1: /* 00XY00XY */
8784 case 2: /* XY00XY00 */
8788 case 3: /* XYXYXYXY */
8792 default: /* Rotated constant. */
8793 shift
= (shift
<< 1) | (imm
>> 7);
8795 imm
= imm
<< (32 - shift
);
8799 tmp2
= tcg_temp_new_i32();
8800 tcg_gen_movi_i32(tmp2
, imm
);
8801 rn
= (insn
>> 16) & 0xf;
8803 tmp
= tcg_temp_new_i32();
8804 tcg_gen_movi_i32(tmp
, 0);
8806 tmp
= load_reg(s
, rn
);
8808 op
= (insn
>> 21) & 0xf;
8809 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8810 shifter_out
, tmp
, tmp2
))
8812 tcg_temp_free_i32(tmp2
);
8813 rd
= (insn
>> 8) & 0xf;
8815 store_reg(s
, rd
, tmp
);
8817 tcg_temp_free_i32(tmp
);
8822 case 12: /* Load/store single data item. */
8827 if ((insn
& 0x01100000) == 0x01000000) {
8828 if (disas_neon_ls_insn(env
, s
, insn
))
8832 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8834 if (!(insn
& (1 << 20))) {
8838 /* Byte or halfword load space with dest == r15 : memory hints.
8839 * Catch them early so we don't emit pointless addressing code.
8840 * This space is a mix of:
8841 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8842 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8844 * unallocated hints, which must be treated as NOPs
8845 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8846 * which is easiest for the decoding logic
8847 * Some space which must UNDEF
8849 int op1
= (insn
>> 23) & 3;
8850 int op2
= (insn
>> 6) & 0x3f;
8855 /* UNPREDICTABLE, unallocated hint or
8856 * PLD/PLDW/PLI (literal)
8861 return 0; /* PLD/PLDW/PLI or unallocated hint */
8863 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
8864 return 0; /* PLD/PLDW/PLI or unallocated hint */
8866 /* UNDEF space, or an UNPREDICTABLE */
8872 addr
= tcg_temp_new_i32();
8874 /* s->pc has already been incremented by 4. */
8875 imm
= s
->pc
& 0xfffffffc;
8876 if (insn
& (1 << 23))
8877 imm
+= insn
& 0xfff;
8879 imm
-= insn
& 0xfff;
8880 tcg_gen_movi_i32(addr
, imm
);
8882 addr
= load_reg(s
, rn
);
8883 if (insn
& (1 << 23)) {
8884 /* Positive offset. */
8886 tcg_gen_addi_i32(addr
, addr
, imm
);
8889 switch ((insn
>> 8) & 0xf) {
8890 case 0x0: /* Shifted Register. */
8891 shift
= (insn
>> 4) & 0xf;
8893 tcg_temp_free_i32(addr
);
8896 tmp
= load_reg(s
, rm
);
8898 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8899 tcg_gen_add_i32(addr
, addr
, tmp
);
8900 tcg_temp_free_i32(tmp
);
8902 case 0xc: /* Negative offset. */
8903 tcg_gen_addi_i32(addr
, addr
, -imm
);
8905 case 0xe: /* User privilege. */
8906 tcg_gen_addi_i32(addr
, addr
, imm
);
8909 case 0x9: /* Post-decrement. */
8912 case 0xb: /* Post-increment. */
8916 case 0xd: /* Pre-decrement. */
8919 case 0xf: /* Pre-increment. */
8920 tcg_gen_addi_i32(addr
, addr
, imm
);
8924 tcg_temp_free_i32(addr
);
8929 if (insn
& (1 << 20)) {
8932 case 0: tmp
= gen_ld8u(addr
, user
); break;
8933 case 4: tmp
= gen_ld8s(addr
, user
); break;
8934 case 1: tmp
= gen_ld16u(addr
, user
); break;
8935 case 5: tmp
= gen_ld16s(addr
, user
); break;
8936 case 2: tmp
= gen_ld32(addr
, user
); break;
8938 tcg_temp_free_i32(addr
);
8944 store_reg(s
, rs
, tmp
);
8948 tmp
= load_reg(s
, rs
);
8950 case 0: gen_st8(tmp
, addr
, user
); break;
8951 case 1: gen_st16(tmp
, addr
, user
); break;
8952 case 2: gen_st32(tmp
, addr
, user
); break;
8954 tcg_temp_free_i32(addr
);
8959 tcg_gen_addi_i32(addr
, addr
, imm
);
8961 store_reg(s
, rn
, addr
);
8963 tcg_temp_free_i32(addr
);
8975 static void disas_thumb_insn(CPUARMState
*env
, DisasContext
*s
)
8977 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8984 if (s
->condexec_mask
) {
8985 cond
= s
->condexec_cond
;
8986 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
8987 s
->condlabel
= gen_new_label();
8988 gen_test_cc(cond
^ 1, s
->condlabel
);
8993 insn
= arm_lduw_code(s
->pc
, s
->bswap_code
);
8996 switch (insn
>> 12) {
9000 op
= (insn
>> 11) & 3;
9003 rn
= (insn
>> 3) & 7;
9004 tmp
= load_reg(s
, rn
);
9005 if (insn
& (1 << 10)) {
9007 tmp2
= tcg_temp_new_i32();
9008 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
9011 rm
= (insn
>> 6) & 7;
9012 tmp2
= load_reg(s
, rm
);
9014 if (insn
& (1 << 9)) {
9015 if (s
->condexec_mask
)
9016 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9018 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9020 if (s
->condexec_mask
)
9021 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9023 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9025 tcg_temp_free_i32(tmp2
);
9026 store_reg(s
, rd
, tmp
);
9028 /* shift immediate */
9029 rm
= (insn
>> 3) & 7;
9030 shift
= (insn
>> 6) & 0x1f;
9031 tmp
= load_reg(s
, rm
);
9032 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
9033 if (!s
->condexec_mask
)
9035 store_reg(s
, rd
, tmp
);
9039 /* arithmetic large immediate */
9040 op
= (insn
>> 11) & 3;
9041 rd
= (insn
>> 8) & 0x7;
9042 if (op
== 0) { /* mov */
9043 tmp
= tcg_temp_new_i32();
9044 tcg_gen_movi_i32(tmp
, insn
& 0xff);
9045 if (!s
->condexec_mask
)
9047 store_reg(s
, rd
, tmp
);
9049 tmp
= load_reg(s
, rd
);
9050 tmp2
= tcg_temp_new_i32();
9051 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
9054 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9055 tcg_temp_free_i32(tmp
);
9056 tcg_temp_free_i32(tmp2
);
9059 if (s
->condexec_mask
)
9060 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9062 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9063 tcg_temp_free_i32(tmp2
);
9064 store_reg(s
, rd
, tmp
);
9067 if (s
->condexec_mask
)
9068 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9070 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9071 tcg_temp_free_i32(tmp2
);
9072 store_reg(s
, rd
, tmp
);
9078 if (insn
& (1 << 11)) {
9079 rd
= (insn
>> 8) & 7;
9080 /* load pc-relative. Bit 1 of PC is ignored. */
9081 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
9082 val
&= ~(uint32_t)2;
9083 addr
= tcg_temp_new_i32();
9084 tcg_gen_movi_i32(addr
, val
);
9085 tmp
= gen_ld32(addr
, IS_USER(s
));
9086 tcg_temp_free_i32(addr
);
9087 store_reg(s
, rd
, tmp
);
9090 if (insn
& (1 << 10)) {
9091 /* data processing extended or blx */
9092 rd
= (insn
& 7) | ((insn
>> 4) & 8);
9093 rm
= (insn
>> 3) & 0xf;
9094 op
= (insn
>> 8) & 3;
9097 tmp
= load_reg(s
, rd
);
9098 tmp2
= load_reg(s
, rm
);
9099 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9100 tcg_temp_free_i32(tmp2
);
9101 store_reg(s
, rd
, tmp
);
9104 tmp
= load_reg(s
, rd
);
9105 tmp2
= load_reg(s
, rm
);
9106 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9107 tcg_temp_free_i32(tmp2
);
9108 tcg_temp_free_i32(tmp
);
9110 case 2: /* mov/cpy */
9111 tmp
= load_reg(s
, rm
);
9112 store_reg(s
, rd
, tmp
);
9114 case 3:/* branch [and link] exchange thumb register */
9115 tmp
= load_reg(s
, rm
);
9116 if (insn
& (1 << 7)) {
9118 val
= (uint32_t)s
->pc
| 1;
9119 tmp2
= tcg_temp_new_i32();
9120 tcg_gen_movi_i32(tmp2
, val
);
9121 store_reg(s
, 14, tmp2
);
9123 /* already thumb, no need to check */
9130 /* data processing register */
9132 rm
= (insn
>> 3) & 7;
9133 op
= (insn
>> 6) & 0xf;
9134 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
9135 /* the shift/rotate ops want the operands backwards */
9144 if (op
== 9) { /* neg */
9145 tmp
= tcg_temp_new_i32();
9146 tcg_gen_movi_i32(tmp
, 0);
9147 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
9148 tmp
= load_reg(s
, rd
);
9153 tmp2
= load_reg(s
, rm
);
9156 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9157 if (!s
->condexec_mask
)
9161 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
9162 if (!s
->condexec_mask
)
9166 if (s
->condexec_mask
) {
9167 gen_helper_shl(tmp2
, tmp2
, tmp
);
9169 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
9174 if (s
->condexec_mask
) {
9175 gen_helper_shr(tmp2
, tmp2
, tmp
);
9177 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
9182 if (s
->condexec_mask
) {
9183 gen_helper_sar(tmp2
, tmp2
, tmp
);
9185 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
9190 if (s
->condexec_mask
)
9193 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
9196 if (s
->condexec_mask
)
9197 gen_sub_carry(tmp
, tmp
, tmp2
);
9199 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
9202 if (s
->condexec_mask
) {
9203 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
9204 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
9206 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
9211 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9216 if (s
->condexec_mask
)
9217 tcg_gen_neg_i32(tmp
, tmp2
);
9219 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9222 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9226 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9230 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9231 if (!s
->condexec_mask
)
9235 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9236 if (!s
->condexec_mask
)
9240 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
9241 if (!s
->condexec_mask
)
9245 tcg_gen_not_i32(tmp2
, tmp2
);
9246 if (!s
->condexec_mask
)
9254 store_reg(s
, rm
, tmp2
);
9256 tcg_temp_free_i32(tmp
);
9258 store_reg(s
, rd
, tmp
);
9259 tcg_temp_free_i32(tmp2
);
9262 tcg_temp_free_i32(tmp
);
9263 tcg_temp_free_i32(tmp2
);
9268 /* load/store register offset. */
9270 rn
= (insn
>> 3) & 7;
9271 rm
= (insn
>> 6) & 7;
9272 op
= (insn
>> 9) & 7;
9273 addr
= load_reg(s
, rn
);
9274 tmp
= load_reg(s
, rm
);
9275 tcg_gen_add_i32(addr
, addr
, tmp
);
9276 tcg_temp_free_i32(tmp
);
9278 if (op
< 3) /* store */
9279 tmp
= load_reg(s
, rd
);
9283 gen_st32(tmp
, addr
, IS_USER(s
));
9286 gen_st16(tmp
, addr
, IS_USER(s
));
9289 gen_st8(tmp
, addr
, IS_USER(s
));
9292 tmp
= gen_ld8s(addr
, IS_USER(s
));
9295 tmp
= gen_ld32(addr
, IS_USER(s
));
9298 tmp
= gen_ld16u(addr
, IS_USER(s
));
9301 tmp
= gen_ld8u(addr
, IS_USER(s
));
9304 tmp
= gen_ld16s(addr
, IS_USER(s
));
9307 if (op
>= 3) /* load */
9308 store_reg(s
, rd
, tmp
);
9309 tcg_temp_free_i32(addr
);
9313 /* load/store word immediate offset */
9315 rn
= (insn
>> 3) & 7;
9316 addr
= load_reg(s
, rn
);
9317 val
= (insn
>> 4) & 0x7c;
9318 tcg_gen_addi_i32(addr
, addr
, val
);
9320 if (insn
& (1 << 11)) {
9322 tmp
= gen_ld32(addr
, IS_USER(s
));
9323 store_reg(s
, rd
, tmp
);
9326 tmp
= load_reg(s
, rd
);
9327 gen_st32(tmp
, addr
, IS_USER(s
));
9329 tcg_temp_free_i32(addr
);
9333 /* load/store byte immediate offset */
9335 rn
= (insn
>> 3) & 7;
9336 addr
= load_reg(s
, rn
);
9337 val
= (insn
>> 6) & 0x1f;
9338 tcg_gen_addi_i32(addr
, addr
, val
);
9340 if (insn
& (1 << 11)) {
9342 tmp
= gen_ld8u(addr
, IS_USER(s
));
9343 store_reg(s
, rd
, tmp
);
9346 tmp
= load_reg(s
, rd
);
9347 gen_st8(tmp
, addr
, IS_USER(s
));
9349 tcg_temp_free_i32(addr
);
9353 /* load/store halfword immediate offset */
9355 rn
= (insn
>> 3) & 7;
9356 addr
= load_reg(s
, rn
);
9357 val
= (insn
>> 5) & 0x3e;
9358 tcg_gen_addi_i32(addr
, addr
, val
);
9360 if (insn
& (1 << 11)) {
9362 tmp
= gen_ld16u(addr
, IS_USER(s
));
9363 store_reg(s
, rd
, tmp
);
9366 tmp
= load_reg(s
, rd
);
9367 gen_st16(tmp
, addr
, IS_USER(s
));
9369 tcg_temp_free_i32(addr
);
9373 /* load/store from stack */
9374 rd
= (insn
>> 8) & 7;
9375 addr
= load_reg(s
, 13);
9376 val
= (insn
& 0xff) * 4;
9377 tcg_gen_addi_i32(addr
, addr
, val
);
9379 if (insn
& (1 << 11)) {
9381 tmp
= gen_ld32(addr
, IS_USER(s
));
9382 store_reg(s
, rd
, tmp
);
9385 tmp
= load_reg(s
, rd
);
9386 gen_st32(tmp
, addr
, IS_USER(s
));
9388 tcg_temp_free_i32(addr
);
9392 /* add to high reg */
9393 rd
= (insn
>> 8) & 7;
9394 if (insn
& (1 << 11)) {
9396 tmp
= load_reg(s
, 13);
9398 /* PC. bit 1 is ignored. */
9399 tmp
= tcg_temp_new_i32();
9400 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
9402 val
= (insn
& 0xff) * 4;
9403 tcg_gen_addi_i32(tmp
, tmp
, val
);
9404 store_reg(s
, rd
, tmp
);
9409 op
= (insn
>> 8) & 0xf;
9412 /* adjust stack pointer */
9413 tmp
= load_reg(s
, 13);
9414 val
= (insn
& 0x7f) * 4;
9415 if (insn
& (1 << 7))
9416 val
= -(int32_t)val
;
9417 tcg_gen_addi_i32(tmp
, tmp
, val
);
9418 store_reg(s
, 13, tmp
);
9421 case 2: /* sign/zero extend. */
9424 rm
= (insn
>> 3) & 7;
9425 tmp
= load_reg(s
, rm
);
9426 switch ((insn
>> 6) & 3) {
9427 case 0: gen_sxth(tmp
); break;
9428 case 1: gen_sxtb(tmp
); break;
9429 case 2: gen_uxth(tmp
); break;
9430 case 3: gen_uxtb(tmp
); break;
9432 store_reg(s
, rd
, tmp
);
9434 case 4: case 5: case 0xc: case 0xd:
9436 addr
= load_reg(s
, 13);
9437 if (insn
& (1 << 8))
9441 for (i
= 0; i
< 8; i
++) {
9442 if (insn
& (1 << i
))
9445 if ((insn
& (1 << 11)) == 0) {
9446 tcg_gen_addi_i32(addr
, addr
, -offset
);
9448 for (i
= 0; i
< 8; i
++) {
9449 if (insn
& (1 << i
)) {
9450 if (insn
& (1 << 11)) {
9452 tmp
= gen_ld32(addr
, IS_USER(s
));
9453 store_reg(s
, i
, tmp
);
9456 tmp
= load_reg(s
, i
);
9457 gen_st32(tmp
, addr
, IS_USER(s
));
9459 /* advance to the next address. */
9460 tcg_gen_addi_i32(addr
, addr
, 4);
9464 if (insn
& (1 << 8)) {
9465 if (insn
& (1 << 11)) {
9467 tmp
= gen_ld32(addr
, IS_USER(s
));
9468 /* don't set the pc until the rest of the instruction
9472 tmp
= load_reg(s
, 14);
9473 gen_st32(tmp
, addr
, IS_USER(s
));
9475 tcg_gen_addi_i32(addr
, addr
, 4);
9477 if ((insn
& (1 << 11)) == 0) {
9478 tcg_gen_addi_i32(addr
, addr
, -offset
);
9480 /* write back the new stack pointer */
9481 store_reg(s
, 13, addr
);
9482 /* set the new PC value */
9483 if ((insn
& 0x0900) == 0x0900) {
9484 store_reg_from_load(env
, s
, 15, tmp
);
9488 case 1: case 3: case 9: case 11: /* czb */
9490 tmp
= load_reg(s
, rm
);
9491 s
->condlabel
= gen_new_label();
9493 if (insn
& (1 << 11))
9494 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9496 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9497 tcg_temp_free_i32(tmp
);
9498 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9499 val
= (uint32_t)s
->pc
+ 2;
9504 case 15: /* IT, nop-hint. */
9505 if ((insn
& 0xf) == 0) {
9506 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9510 s
->condexec_cond
= (insn
>> 4) & 0xe;
9511 s
->condexec_mask
= insn
& 0x1f;
9512 /* No actual code generated for this insn, just setup state. */
9515 case 0xe: /* bkpt */
9517 gen_exception_insn(s
, 2, EXCP_BKPT
);
9522 rn
= (insn
>> 3) & 0x7;
9524 tmp
= load_reg(s
, rn
);
9525 switch ((insn
>> 6) & 3) {
9526 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9527 case 1: gen_rev16(tmp
); break;
9528 case 3: gen_revsh(tmp
); break;
9529 default: goto illegal_op
;
9531 store_reg(s
, rd
, tmp
);
9535 switch ((insn
>> 5) & 7) {
9539 if (((insn
>> 3) & 1) != s
->bswap_code
) {
9540 /* Dynamic endianness switching not implemented. */
9551 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9554 addr
= tcg_const_i32(19);
9555 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9556 tcg_temp_free_i32(addr
);
9560 addr
= tcg_const_i32(16);
9561 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9562 tcg_temp_free_i32(addr
);
9564 tcg_temp_free_i32(tmp
);
9567 if (insn
& (1 << 4)) {
9568 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9572 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9587 /* load/store multiple */
9589 TCGV_UNUSED(loaded_var
);
9590 rn
= (insn
>> 8) & 0x7;
9591 addr
= load_reg(s
, rn
);
9592 for (i
= 0; i
< 8; i
++) {
9593 if (insn
& (1 << i
)) {
9594 if (insn
& (1 << 11)) {
9596 tmp
= gen_ld32(addr
, IS_USER(s
));
9600 store_reg(s
, i
, tmp
);
9604 tmp
= load_reg(s
, i
);
9605 gen_st32(tmp
, addr
, IS_USER(s
));
9607 /* advance to the next address */
9608 tcg_gen_addi_i32(addr
, addr
, 4);
9611 if ((insn
& (1 << rn
)) == 0) {
9612 /* base reg not in list: base register writeback */
9613 store_reg(s
, rn
, addr
);
9615 /* base reg in list: if load, complete it now */
9616 if (insn
& (1 << 11)) {
9617 store_reg(s
, rn
, loaded_var
);
9619 tcg_temp_free_i32(addr
);
9624 /* conditional branch or swi */
9625 cond
= (insn
>> 8) & 0xf;
9631 gen_set_pc_im(s
->pc
);
9632 s
->is_jmp
= DISAS_SWI
;
9635 /* generate a conditional jump to next instruction */
9636 s
->condlabel
= gen_new_label();
9637 gen_test_cc(cond
^ 1, s
->condlabel
);
9640 /* jump to the offset */
9641 val
= (uint32_t)s
->pc
+ 2;
9642 offset
= ((int32_t)insn
<< 24) >> 24;
9648 if (insn
& (1 << 11)) {
9649 if (disas_thumb2_insn(env
, s
, insn
))
9653 /* unconditional branch */
9654 val
= (uint32_t)s
->pc
;
9655 offset
= ((int32_t)insn
<< 21) >> 21;
9656 val
+= (offset
<< 1) + 2;
9661 if (disas_thumb2_insn(env
, s
, insn
))
9667 gen_exception_insn(s
, 4, EXCP_UDEF
);
9671 gen_exception_insn(s
, 2, EXCP_UDEF
);
9674 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9675 basic block 'tb'. If search_pc is TRUE, also generate PC
9676 information for each intermediate instruction. */
9677 static inline void gen_intermediate_code_internal(CPUARMState
*env
,
9678 TranslationBlock
*tb
,
9681 DisasContext dc1
, *dc
= &dc1
;
9683 uint16_t *gen_opc_end
;
9685 target_ulong pc_start
;
9686 uint32_t next_page_start
;
9690 /* generate intermediate code */
9695 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9697 dc
->is_jmp
= DISAS_NEXT
;
9699 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9701 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9702 dc
->bswap_code
= ARM_TBFLAG_BSWAP_CODE(tb
->flags
);
9703 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9704 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9705 #if !defined(CONFIG_USER_ONLY)
9706 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9708 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9709 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9710 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9711 cpu_F0s
= tcg_temp_new_i32();
9712 cpu_F1s
= tcg_temp_new_i32();
9713 cpu_F0d
= tcg_temp_new_i64();
9714 cpu_F1d
= tcg_temp_new_i64();
9717 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9718 cpu_M0
= tcg_temp_new_i64();
9719 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9722 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9724 max_insns
= CF_COUNT_MASK
;
9728 tcg_clear_temp_count();
9730 /* A note on handling of the condexec (IT) bits:
9732 * We want to avoid the overhead of having to write the updated condexec
9733 * bits back to the CPUARMState for every instruction in an IT block. So:
9734 * (1) if the condexec bits are not already zero then we write
9735 * zero back into the CPUARMState now. This avoids complications trying
9736 * to do it at the end of the block. (For example if we don't do this
9737 * it's hard to identify whether we can safely skip writing condexec
9738 * at the end of the TB, which we definitely want to do for the case
9739 * where a TB doesn't do anything with the IT state at all.)
9740 * (2) if we are going to leave the TB then we call gen_set_condexec()
9741 * which will write the correct value into CPUARMState if zero is wrong.
9742 * This is done both for leaving the TB at the end, and for leaving
9743 * it because of an exception we know will happen, which is done in
9744 * gen_exception_insn(). The latter is necessary because we need to
9745 * leave the TB with the PC/IT state just prior to execution of the
9746 * instruction which caused the exception.
9747 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9748 * then the CPUARMState will be wrong and we need to reset it.
9749 * This is handled in the same way as restoration of the
9750 * PC in these situations: we will be called again with search_pc=1
9751 * and generate a mapping of the condexec bits for each PC in
9752 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9753 * this to restore the condexec bits.
9755 * Note that there are no instructions which can read the condexec
9756 * bits, and none which can write non-static values to them, so
9757 * we don't need to care about whether CPUARMState is correct in the
9761 /* Reset the conditional execution bits immediately. This avoids
9762 complications trying to do it at the end of the block. */
9763 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9765 TCGv tmp
= tcg_temp_new_i32();
9766 tcg_gen_movi_i32(tmp
, 0);
9767 store_cpu_field(tmp
, condexec_bits
);
9770 #ifdef CONFIG_USER_ONLY
9771 /* Intercept jump to the magic kernel page. */
9772 if (dc
->pc
>= 0xffff0000) {
9773 /* We always get here via a jump, so know we are not in a
9774 conditional execution block. */
9775 gen_exception(EXCP_KERNEL_TRAP
);
9776 dc
->is_jmp
= DISAS_UPDATE
;
9780 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9781 /* We always get here via a jump, so know we are not in a
9782 conditional execution block. */
9783 gen_exception(EXCP_EXCEPTION_EXIT
);
9784 dc
->is_jmp
= DISAS_UPDATE
;
9789 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9790 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9791 if (bp
->pc
== dc
->pc
) {
9792 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9793 /* Advance PC so that clearing the breakpoint will
9794 invalidate this TB. */
9796 goto done_generating
;
9802 j
= gen_opc_ptr
- gen_opc_buf
;
9806 gen_opc_instr_start
[lj
++] = 0;
9808 gen_opc_pc
[lj
] = dc
->pc
;
9809 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9810 gen_opc_instr_start
[lj
] = 1;
9811 gen_opc_icount
[lj
] = num_insns
;
9814 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9817 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
9818 tcg_gen_debug_insn_start(dc
->pc
);
9822 disas_thumb_insn(env
, dc
);
9823 if (dc
->condexec_mask
) {
9824 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9825 | ((dc
->condexec_mask
>> 4) & 1);
9826 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9827 if (dc
->condexec_mask
== 0) {
9828 dc
->condexec_cond
= 0;
9832 disas_arm_insn(env
, dc
);
9835 if (dc
->condjmp
&& !dc
->is_jmp
) {
9836 gen_set_label(dc
->condlabel
);
9840 if (tcg_check_temp_count()) {
9841 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
9844 /* Translation stops when a conditional branch is encountered.
9845 * Otherwise the subsequent code could get translated several times.
9846 * Also stop translation when a page boundary is reached. This
9847 * ensures prefetch aborts occur at the right place. */
9849 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9850 !env
->singlestep_enabled
&&
9852 dc
->pc
< next_page_start
&&
9853 num_insns
< max_insns
);
9855 if (tb
->cflags
& CF_LAST_IO
) {
9857 /* FIXME: This can theoretically happen with self-modifying
9859 cpu_abort(env
, "IO on conditional branch instruction");
9864 /* At this stage dc->condjmp will only be set when the skipped
9865 instruction was a conditional branch or trap, and the PC has
9866 already been written. */
9867 if (unlikely(env
->singlestep_enabled
)) {
9868 /* Make sure the pc is updated, and raise a debug exception. */
9870 gen_set_condexec(dc
);
9871 if (dc
->is_jmp
== DISAS_SWI
) {
9872 gen_exception(EXCP_SWI
);
9874 gen_exception(EXCP_DEBUG
);
9876 gen_set_label(dc
->condlabel
);
9878 if (dc
->condjmp
|| !dc
->is_jmp
) {
9879 gen_set_pc_im(dc
->pc
);
9882 gen_set_condexec(dc
);
9883 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9884 gen_exception(EXCP_SWI
);
9886 /* FIXME: Single stepping a WFI insn will not halt
9888 gen_exception(EXCP_DEBUG
);
9891 /* While branches must always occur at the end of an IT block,
9892 there are a few other things that can cause us to terminate
9893 the TB in the middel of an IT block:
9894 - Exception generating instructions (bkpt, swi, undefined).
9896 - Hardware watchpoints.
9897 Hardware breakpoints have already been handled and skip this code.
9899 gen_set_condexec(dc
);
9900 switch(dc
->is_jmp
) {
9902 gen_goto_tb(dc
, 1, dc
->pc
);
9907 /* indicate that the hash table must be used to find the next TB */
9911 /* nothing more to generate */
9917 gen_exception(EXCP_SWI
);
9921 gen_set_label(dc
->condlabel
);
9922 gen_set_condexec(dc
);
9923 gen_goto_tb(dc
, 1, dc
->pc
);
9929 gen_icount_end(tb
, num_insns
);
9930 *gen_opc_ptr
= INDEX_op_end
;
9933 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9934 qemu_log("----------------\n");
9935 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9936 log_target_disas(pc_start
, dc
->pc
- pc_start
,
9937 dc
->thumb
| (dc
->bswap_code
<< 1));
9942 j
= gen_opc_ptr
- gen_opc_buf
;
9945 gen_opc_instr_start
[lj
++] = 0;
9947 tb
->size
= dc
->pc
- pc_start
;
9948 tb
->icount
= num_insns
;
9952 void gen_intermediate_code(CPUARMState
*env
, TranslationBlock
*tb
)
9954 gen_intermediate_code_internal(env
, tb
, 0);
9957 void gen_intermediate_code_pc(CPUARMState
*env
, TranslationBlock
*tb
)
9959 gen_intermediate_code_internal(env
, tb
, 1);
9962 static const char *cpu_mode_names
[16] = {
9963 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9964 "???", "???", "???", "und", "???", "???", "???", "sys"
9967 void cpu_dump_state(CPUARMState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
9977 /* ??? This assumes float64 and double have the same layout.
9978 Oh well, it's only debug dumps. */
9987 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
9989 cpu_fprintf(f
, "\n");
9991 cpu_fprintf(f
, " ");
9993 psr
= cpsr_read(env
);
9994 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
9996 psr
& (1 << 31) ? 'N' : '-',
9997 psr
& (1 << 30) ? 'Z' : '-',
9998 psr
& (1 << 29) ? 'C' : '-',
9999 psr
& (1 << 28) ? 'V' : '-',
10000 psr
& CPSR_T
? 'T' : 'A',
10001 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
10004 for (i
= 0; i
< 16; i
++) {
10005 d
.d
= env
->vfp
.regs
[i
];
10009 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
10010 i
* 2, (int)s0
.i
, s0
.s
,
10011 i
* 2 + 1, (int)s1
.i
, s1
.s
,
10012 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
10015 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
10019 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
, int pc_pos
)
10021 env
->regs
[15] = gen_opc_pc
[pc_pos
];
10022 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];