4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39 /* currently all emulated v5 cores are also v5TE, so don't bother */
40 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
41 #define ENABLE_ARCH_5J 0
42 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
47 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
49 /* internal defines */
50 typedef struct DisasContext
{
53 /* Nonzero if this instruction has been conditionally skipped. */
55 /* The label that will be jumped to when the instruction is skipped. */
57 /* Thumb-2 condtional execution bits. */
60 struct TranslationBlock
*tb
;
61 int singlestep_enabled
;
63 #if !defined(CONFIG_USER_ONLY)
71 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
73 #if defined(CONFIG_USER_ONLY)
76 #define IS_USER(s) (s->user)
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
84 static TCGv_ptr cpu_env
;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
87 static TCGv_i32 cpu_R
[16];
88 static TCGv_i32 cpu_exclusive_addr
;
89 static TCGv_i32 cpu_exclusive_val
;
90 static TCGv_i32 cpu_exclusive_high
;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test
;
93 static TCGv_i32 cpu_exclusive_info
;
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s
, cpu_F1s
;
98 static TCGv_i64 cpu_F0d
, cpu_F1d
;
100 #include "gen-icount.h"
102 static const char *regnames
[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
111 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
113 for (i
= 0; i
< 16; i
++) {
114 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUState
, regs
[i
]),
118 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUState
, exclusive_addr
), "exclusive_addr");
120 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
121 offsetof(CPUState
, exclusive_val
), "exclusive_val");
122 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
123 offsetof(CPUState
, exclusive_high
), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
126 offsetof(CPUState
, exclusive_test
), "exclusive_test");
127 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
128 offsetof(CPUState
, exclusive_info
), "exclusive_info");
135 static inline TCGv
load_cpu_offset(int offset
)
137 TCGv tmp
= tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
144 static inline void store_cpu_offset(TCGv var
, int offset
)
146 tcg_gen_st_i32(var
, cpu_env
, offset
);
147 tcg_temp_free_i32(var
);
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUState, name))
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
158 /* normaly, since we updated PC, we need only to add one insn */
160 addr
= (long)s
->pc
+ 2;
162 addr
= (long)s
->pc
+ 4;
163 tcg_gen_movi_i32(var
, addr
);
165 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
172 TCGv tmp
= tcg_temp_new_i32();
173 load_reg_var(s
, tmp
, reg
);
177 /* Set a CPU register. The source must be a temporary and will be
179 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
182 tcg_gen_andi_i32(var
, var
, ~1);
183 s
->is_jmp
= DISAS_JUMP
;
185 tcg_gen_mov_i32(cpu_R
[reg
], var
);
186 tcg_temp_free_i32(var
);
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
199 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
201 TCGv tmp_mask
= tcg_const_i32(mask
);
202 gen_helper_cpsr_write(var
, tmp_mask
);
203 tcg_temp_free_i32(tmp_mask
);
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
208 static void gen_exception(int excp
)
210 TCGv tmp
= tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp
, excp
);
212 gen_helper_exception(tmp
);
213 tcg_temp_free_i32(tmp
);
216 static void gen_smul_dual(TCGv a
, TCGv b
)
218 TCGv tmp1
= tcg_temp_new_i32();
219 TCGv tmp2
= tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1
, a
);
221 tcg_gen_ext16s_i32(tmp2
, b
);
222 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
223 tcg_temp_free_i32(tmp2
);
224 tcg_gen_sari_i32(a
, a
, 16);
225 tcg_gen_sari_i32(b
, b
, 16);
226 tcg_gen_mul_i32(b
, b
, a
);
227 tcg_gen_mov_i32(a
, tmp1
);
228 tcg_temp_free_i32(tmp1
);
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var
)
234 TCGv tmp
= tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp
, var
, 8);
236 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
237 tcg_gen_shli_i32(var
, var
, 8);
238 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
239 tcg_gen_or_i32(var
, var
, tmp
);
240 tcg_temp_free_i32(tmp
);
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var
)
246 tcg_gen_ext16u_i32(var
, var
);
247 tcg_gen_bswap16_i32(var
, var
);
248 tcg_gen_ext16s_i32(var
, var
);
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
255 tcg_gen_shri_i32(var
, var
, shift
);
256 tcg_gen_andi_i32(var
, var
, mask
);
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var
, int shift
, int width
)
265 tcg_gen_sari_i32(var
, var
, shift
);
266 if (shift
+ width
< 32) {
267 signbit
= 1u << (width
- 1);
268 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
269 tcg_gen_xori_i32(var
, var
, signbit
);
270 tcg_gen_subi_i32(var
, var
, signbit
);
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
277 tcg_gen_andi_i32(val
, val
, mask
);
278 tcg_gen_shli_i32(val
, val
, shift
);
279 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
280 tcg_gen_or_i32(dest
, base
, val
);
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
286 TCGv_i64 tmp64
= tcg_temp_new_i64();
288 tcg_gen_extu_i32_i64(tmp64
, b
);
289 tcg_temp_free_i32(b
);
290 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
291 tcg_gen_add_i64(a
, tmp64
, a
);
293 tcg_temp_free_i64(tmp64
);
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
300 TCGv_i64 tmp64
= tcg_temp_new_i64();
302 tcg_gen_extu_i32_i64(tmp64
, b
);
303 tcg_temp_free_i32(b
);
304 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
305 tcg_gen_sub_i64(a
, tmp64
, a
);
307 tcg_temp_free_i64(tmp64
);
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
316 TCGv_i64 tmp1
= tcg_temp_new_i64();
317 TCGv_i64 tmp2
= tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1
, a
);
320 tcg_temp_free_i32(a
);
321 tcg_gen_extu_i32_i64(tmp2
, b
);
322 tcg_temp_free_i32(b
);
323 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
324 tcg_temp_free_i64(tmp2
);
328 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
330 TCGv_i64 tmp1
= tcg_temp_new_i64();
331 TCGv_i64 tmp2
= tcg_temp_new_i64();
333 tcg_gen_ext_i32_i64(tmp1
, a
);
334 tcg_temp_free_i32(a
);
335 tcg_gen_ext_i32_i64(tmp2
, b
);
336 tcg_temp_free_i32(b
);
337 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
338 tcg_temp_free_i64(tmp2
);
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var
)
345 TCGv tmp
= tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp
, var
, 16);
347 tcg_gen_shli_i32(var
, var
, 16);
348 tcg_gen_or_i32(var
, var
, tmp
);
349 tcg_temp_free_i32(tmp
);
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
356 t0 = (t0 + t1) ^ tmp;
359 static void gen_add16(TCGv t0
, TCGv t1
)
361 TCGv tmp
= tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp
, t0
, t1
);
363 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
364 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
365 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
366 tcg_gen_add_i32(t0
, t0
, t1
);
367 tcg_gen_xor_i32(t0
, t0
, tmp
);
368 tcg_temp_free_i32(tmp
);
369 tcg_temp_free_i32(t1
);
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var
)
377 TCGv tmp
= tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp
, var
, 31);
380 tcg_temp_free_i32(tmp
);
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var
)
386 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
387 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
391 static void gen_adc(TCGv t0
, TCGv t1
)
394 tcg_gen_add_i32(t0
, t0
, t1
);
395 tmp
= load_cpu_field(CF
);
396 tcg_gen_add_i32(t0
, t0
, tmp
);
397 tcg_temp_free_i32(tmp
);
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
404 tcg_gen_add_i32(dest
, t0
, t1
);
405 tmp
= load_cpu_field(CF
);
406 tcg_gen_add_i32(dest
, dest
, tmp
);
407 tcg_temp_free_i32(tmp
);
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
414 tcg_gen_sub_i32(dest
, t0
, t1
);
415 tmp
= load_cpu_field(CF
);
416 tcg_gen_add_i32(dest
, dest
, tmp
);
417 tcg_gen_subi_i32(dest
, dest
, 1);
418 tcg_temp_free_i32(tmp
);
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
424 static void shifter_out_im(TCGv var
, int shift
)
426 TCGv tmp
= tcg_temp_new_i32();
428 tcg_gen_andi_i32(tmp
, var
, 1);
430 tcg_gen_shri_i32(tmp
, var
, shift
);
432 tcg_gen_andi_i32(tmp
, tmp
, 1);
435 tcg_temp_free_i32(tmp
);
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
445 shifter_out_im(var
, 32 - shift
);
446 tcg_gen_shli_i32(var
, var
, shift
);
452 tcg_gen_shri_i32(var
, var
, 31);
455 tcg_gen_movi_i32(var
, 0);
458 shifter_out_im(var
, shift
- 1);
459 tcg_gen_shri_i32(var
, var
, shift
);
466 shifter_out_im(var
, shift
- 1);
469 tcg_gen_sari_i32(var
, var
, shift
);
471 case 3: /* ROR/RRX */
474 shifter_out_im(var
, shift
- 1);
475 tcg_gen_rotri_i32(var
, var
, shift
); break;
477 TCGv tmp
= load_cpu_field(CF
);
479 shifter_out_im(var
, 0);
480 tcg_gen_shri_i32(var
, var
, 1);
481 tcg_gen_shli_i32(tmp
, tmp
, 31);
482 tcg_gen_or_i32(var
, var
, tmp
);
483 tcg_temp_free_i32(tmp
);
488 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
489 TCGv shift
, int flags
)
493 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
494 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
495 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
496 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
500 case 0: gen_helper_shl(var
, var
, shift
); break;
501 case 1: gen_helper_shr(var
, var
, shift
); break;
502 case 2: gen_helper_sar(var
, var
, shift
); break;
503 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
504 tcg_gen_rotr_i32(var
, var
, shift
); break;
507 tcg_temp_free_i32(shift
);
510 #define PAS_OP(pfx) \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
519 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
526 tmp
= tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
529 tcg_temp_free_ptr(tmp
);
532 tmp
= tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
535 tcg_temp_free_ptr(tmp
);
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 #undef gen_pas_helper
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
566 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
573 tmp
= tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
576 tcg_temp_free_ptr(tmp
);
579 tmp
= tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
582 tcg_temp_free_ptr(tmp
);
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 #undef gen_pas_helper
603 static void gen_test_cc(int cc
, int label
)
611 tmp
= load_cpu_field(ZF
);
612 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
615 tmp
= load_cpu_field(ZF
);
616 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
619 tmp
= load_cpu_field(CF
);
620 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
623 tmp
= load_cpu_field(CF
);
624 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
627 tmp
= load_cpu_field(NF
);
628 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
631 tmp
= load_cpu_field(NF
);
632 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
635 tmp
= load_cpu_field(VF
);
636 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
639 tmp
= load_cpu_field(VF
);
640 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
642 case 8: /* hi: C && !Z */
643 inv
= gen_new_label();
644 tmp
= load_cpu_field(CF
);
645 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
646 tcg_temp_free_i32(tmp
);
647 tmp
= load_cpu_field(ZF
);
648 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
651 case 9: /* ls: !C || Z */
652 tmp
= load_cpu_field(CF
);
653 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
654 tcg_temp_free_i32(tmp
);
655 tmp
= load_cpu_field(ZF
);
656 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp
= load_cpu_field(VF
);
660 tmp2
= load_cpu_field(NF
);
661 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
662 tcg_temp_free_i32(tmp2
);
663 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp
= load_cpu_field(VF
);
667 tmp2
= load_cpu_field(NF
);
668 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
669 tcg_temp_free_i32(tmp2
);
670 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
672 case 12: /* gt: !Z && N == V */
673 inv
= gen_new_label();
674 tmp
= load_cpu_field(ZF
);
675 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
676 tcg_temp_free_i32(tmp
);
677 tmp
= load_cpu_field(VF
);
678 tmp2
= load_cpu_field(NF
);
679 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
680 tcg_temp_free_i32(tmp2
);
681 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
684 case 13: /* le: Z || N != V */
685 tmp
= load_cpu_field(ZF
);
686 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
687 tcg_temp_free_i32(tmp
);
688 tmp
= load_cpu_field(VF
);
689 tmp2
= load_cpu_field(NF
);
690 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
691 tcg_temp_free_i32(tmp2
);
692 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
695 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
698 tcg_temp_free_i32(tmp
);
701 static const uint8_t table_logic_cc
[16] = {
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
725 s
->is_jmp
= DISAS_UPDATE
;
726 if (s
->thumb
!= (addr
& 1)) {
727 tmp
= tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp
, addr
& 1);
729 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
730 tcg_temp_free_i32(tmp
);
732 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext
*s
, TCGv var
)
738 s
->is_jmp
= DISAS_UPDATE
;
739 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
740 tcg_gen_andi_i32(var
, var
, 1);
741 store_cpu_field(var
, thumb
);
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUState
*env
, DisasContext
*s
,
750 if (reg
== 15 && ENABLE_ARCH_7
) {
753 store_reg(s
, reg
, var
);
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUState
*env
, DisasContext
*s
,
764 if (reg
== 15 && ENABLE_ARCH_5
) {
767 store_reg(s
, reg
, var
);
771 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
773 TCGv tmp
= tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
777 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
779 TCGv tmp
= tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
783 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
785 TCGv tmp
= tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
789 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
791 TCGv tmp
= tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
795 static inline TCGv
gen_ld32(TCGv addr
, int index
)
797 TCGv tmp
= tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
801 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
803 TCGv_i64 tmp
= tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp
, addr
, index
);
807 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
809 tcg_gen_qemu_st8(val
, addr
, index
);
810 tcg_temp_free_i32(val
);
812 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
814 tcg_gen_qemu_st16(val
, addr
, index
);
815 tcg_temp_free_i32(val
);
817 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
819 tcg_gen_qemu_st32(val
, addr
, index
);
820 tcg_temp_free_i32(val
);
822 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
824 tcg_gen_qemu_st64(val
, addr
, index
);
825 tcg_temp_free_i64(val
);
828 static inline void gen_set_pc_im(uint32_t val
)
830 tcg_gen_movi_i32(cpu_R
[15], val
);
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext
*s
)
836 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
837 s
->is_jmp
= DISAS_UPDATE
;
840 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
843 int val
, rm
, shift
, shiftop
;
846 if (!(insn
& (1 << 25))) {
849 if (!(insn
& (1 << 23)))
852 tcg_gen_addi_i32(var
, var
, val
);
856 shift
= (insn
>> 7) & 0x1f;
857 shiftop
= (insn
>> 5) & 3;
858 offset
= load_reg(s
, rm
);
859 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
860 if (!(insn
& (1 << 23)))
861 tcg_gen_sub_i32(var
, var
, offset
);
863 tcg_gen_add_i32(var
, var
, offset
);
864 tcg_temp_free_i32(offset
);
868 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
874 if (insn
& (1 << 22)) {
876 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
877 if (!(insn
& (1 << 23)))
881 tcg_gen_addi_i32(var
, var
, val
);
885 tcg_gen_addi_i32(var
, var
, extra
);
887 offset
= load_reg(s
, rm
);
888 if (!(insn
& (1 << 23)))
889 tcg_gen_sub_i32(var
, var
, offset
);
891 tcg_gen_add_i32(var
, var
, offset
);
892 tcg_temp_free_i32(offset
);
896 #define VFP_OP2(name) \
897 static inline void gen_vfp_##name(int dp) \
900 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
902 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
912 static inline void gen_vfp_abs(int dp
)
915 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
917 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
920 static inline void gen_vfp_neg(int dp
)
923 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
925 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
928 static inline void gen_vfp_sqrt(int dp
)
931 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
933 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
936 static inline void gen_vfp_cmp(int dp
)
939 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
941 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
944 static inline void gen_vfp_cmpe(int dp
)
947 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
949 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
952 static inline void gen_vfp_F1_ld0(int dp
)
955 tcg_gen_movi_i64(cpu_F1d
, 0);
957 tcg_gen_movi_i32(cpu_F1s
, 0);
960 static inline void gen_vfp_uito(int dp
)
963 gen_helper_vfp_uitod(cpu_F0d
, cpu_F0s
, cpu_env
);
965 gen_helper_vfp_uitos(cpu_F0s
, cpu_F0s
, cpu_env
);
968 static inline void gen_vfp_sito(int dp
)
971 gen_helper_vfp_sitod(cpu_F0d
, cpu_F0s
, cpu_env
);
973 gen_helper_vfp_sitos(cpu_F0s
, cpu_F0s
, cpu_env
);
976 static inline void gen_vfp_toui(int dp
)
979 gen_helper_vfp_touid(cpu_F0s
, cpu_F0d
, cpu_env
);
981 gen_helper_vfp_touis(cpu_F0s
, cpu_F0s
, cpu_env
);
984 static inline void gen_vfp_touiz(int dp
)
987 gen_helper_vfp_touizd(cpu_F0s
, cpu_F0d
, cpu_env
);
989 gen_helper_vfp_touizs(cpu_F0s
, cpu_F0s
, cpu_env
);
992 static inline void gen_vfp_tosi(int dp
)
995 gen_helper_vfp_tosid(cpu_F0s
, cpu_F0d
, cpu_env
);
997 gen_helper_vfp_tosis(cpu_F0s
, cpu_F0s
, cpu_env
);
1000 static inline void gen_vfp_tosiz(int dp
)
1003 gen_helper_vfp_tosizd(cpu_F0s
, cpu_F0d
, cpu_env
);
1005 gen_helper_vfp_tosizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1008 #define VFP_GEN_FIX(name) \
1009 static inline void gen_vfp_##name(int dp, int shift) \
1011 TCGv tmp_shift = tcg_const_i32(shift); \
1013 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1015 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1016 tcg_temp_free_i32(tmp_shift); \
1028 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1031 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1033 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1036 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1039 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1041 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1045 vfp_reg_offset (int dp
, int reg
)
1048 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1050 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1051 + offsetof(CPU_DoubleU
, l
.upper
);
1053 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1054 + offsetof(CPU_DoubleU
, l
.lower
);
1058 /* Return the offset of a 32-bit piece of a NEON register.
1059 zero is the least significant end of the register. */
1061 neon_reg_offset (int reg
, int n
)
1065 return vfp_reg_offset(0, sreg
);
1068 static TCGv
neon_load_reg(int reg
, int pass
)
1070 TCGv tmp
= tcg_temp_new_i32();
1071 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1075 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1077 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1078 tcg_temp_free_i32(var
);
1081 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1083 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1086 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1088 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1091 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1092 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1093 #define tcg_gen_st_f32 tcg_gen_st_i32
1094 #define tcg_gen_st_f64 tcg_gen_st_i64
1096 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1099 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1101 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1104 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1107 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1109 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1112 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1115 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1117 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1120 #define ARM_CP_RW_BIT (1 << 20)
1122 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1124 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1127 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1129 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1132 static inline TCGv
iwmmxt_load_creg(int reg
)
1134 TCGv var
= tcg_temp_new_i32();
1135 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1139 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1141 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1142 tcg_temp_free_i32(var
);
1145 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1147 iwmmxt_store_reg(cpu_M0
, rn
);
1150 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1152 iwmmxt_load_reg(cpu_M0
, rn
);
1155 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1157 iwmmxt_load_reg(cpu_V1
, rn
);
1158 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1161 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1163 iwmmxt_load_reg(cpu_V1
, rn
);
1164 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1167 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1169 iwmmxt_load_reg(cpu_V1
, rn
);
1170 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1173 #define IWMMXT_OP(name) \
1174 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1176 iwmmxt_load_reg(cpu_V1, rn); \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1180 #define IWMMXT_OP_SIZE(name) \
1181 IWMMXT_OP(name##b) \
1182 IWMMXT_OP(name##w) \
1185 #define IWMMXT_OP_1(name) \
1186 static inline void gen_op_iwmmxt_##name##_M0(void) \
1188 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
1202 IWMMXT_OP_SIZE(unpackl
)
1203 IWMMXT_OP_SIZE(unpackh
)
1205 IWMMXT_OP_1(unpacklub
)
1206 IWMMXT_OP_1(unpackluw
)
1207 IWMMXT_OP_1(unpacklul
)
1208 IWMMXT_OP_1(unpackhub
)
1209 IWMMXT_OP_1(unpackhuw
)
1210 IWMMXT_OP_1(unpackhul
)
1211 IWMMXT_OP_1(unpacklsb
)
1212 IWMMXT_OP_1(unpacklsw
)
1213 IWMMXT_OP_1(unpacklsl
)
1214 IWMMXT_OP_1(unpackhsb
)
1215 IWMMXT_OP_1(unpackhsw
)
1216 IWMMXT_OP_1(unpackhsl
)
1218 IWMMXT_OP_SIZE(cmpeq
)
1219 IWMMXT_OP_SIZE(cmpgtu
)
1220 IWMMXT_OP_SIZE(cmpgts
)
1222 IWMMXT_OP_SIZE(mins
)
1223 IWMMXT_OP_SIZE(minu
)
1224 IWMMXT_OP_SIZE(maxs
)
1225 IWMMXT_OP_SIZE(maxu
)
1227 IWMMXT_OP_SIZE(subn
)
1228 IWMMXT_OP_SIZE(addn
)
1229 IWMMXT_OP_SIZE(subu
)
1230 IWMMXT_OP_SIZE(addu
)
1231 IWMMXT_OP_SIZE(subs
)
1232 IWMMXT_OP_SIZE(adds
)
1248 static void gen_op_iwmmxt_set_mup(void)
1251 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1252 tcg_gen_ori_i32(tmp
, tmp
, 2);
1253 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1256 static void gen_op_iwmmxt_set_cup(void)
1259 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1260 tcg_gen_ori_i32(tmp
, tmp
, 1);
1261 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1264 static void gen_op_iwmmxt_setpsr_nz(void)
1266 TCGv tmp
= tcg_temp_new_i32();
1267 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1268 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1271 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1273 iwmmxt_load_reg(cpu_V1
, rn
);
1274 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1275 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1278 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1284 rd
= (insn
>> 16) & 0xf;
1285 tmp
= load_reg(s
, rd
);
1287 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1288 if (insn
& (1 << 24)) {
1290 if (insn
& (1 << 23))
1291 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1293 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1294 tcg_gen_mov_i32(dest
, tmp
);
1295 if (insn
& (1 << 21))
1296 store_reg(s
, rd
, tmp
);
1298 tcg_temp_free_i32(tmp
);
1299 } else if (insn
& (1 << 21)) {
1301 tcg_gen_mov_i32(dest
, tmp
);
1302 if (insn
& (1 << 23))
1303 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1305 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1306 store_reg(s
, rd
, tmp
);
1307 } else if (!(insn
& (1 << 23)))
1312 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1314 int rd
= (insn
>> 0) & 0xf;
1317 if (insn
& (1 << 8)) {
1318 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1321 tmp
= iwmmxt_load_creg(rd
);
1324 tmp
= tcg_temp_new_i32();
1325 iwmmxt_load_reg(cpu_V0
, rd
);
1326 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1328 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1329 tcg_gen_mov_i32(dest
, tmp
);
1330 tcg_temp_free_i32(tmp
);
1334 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1335 (ie. an undefined instruction). */
1336 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1339 int rdhi
, rdlo
, rd0
, rd1
, i
;
1341 TCGv tmp
, tmp2
, tmp3
;
1343 if ((insn
& 0x0e000e00) == 0x0c000000) {
1344 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1346 rdlo
= (insn
>> 12) & 0xf;
1347 rdhi
= (insn
>> 16) & 0xf;
1348 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1349 iwmmxt_load_reg(cpu_V0
, wrd
);
1350 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1351 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1352 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1353 } else { /* TMCRR */
1354 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1355 iwmmxt_store_reg(cpu_V0
, wrd
);
1356 gen_op_iwmmxt_set_mup();
1361 wrd
= (insn
>> 12) & 0xf;
1362 addr
= tcg_temp_new_i32();
1363 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1364 tcg_temp_free_i32(addr
);
1367 if (insn
& ARM_CP_RW_BIT
) {
1368 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1369 tmp
= tcg_temp_new_i32();
1370 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1371 iwmmxt_store_creg(wrd
, tmp
);
1374 if (insn
& (1 << 8)) {
1375 if (insn
& (1 << 22)) { /* WLDRD */
1376 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1378 } else { /* WLDRW wRd */
1379 tmp
= gen_ld32(addr
, IS_USER(s
));
1382 if (insn
& (1 << 22)) { /* WLDRH */
1383 tmp
= gen_ld16u(addr
, IS_USER(s
));
1384 } else { /* WLDRB */
1385 tmp
= gen_ld8u(addr
, IS_USER(s
));
1389 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1390 tcg_temp_free_i32(tmp
);
1392 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1395 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1396 tmp
= iwmmxt_load_creg(wrd
);
1397 gen_st32(tmp
, addr
, IS_USER(s
));
1399 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1400 tmp
= tcg_temp_new_i32();
1401 if (insn
& (1 << 8)) {
1402 if (insn
& (1 << 22)) { /* WSTRD */
1403 tcg_temp_free_i32(tmp
);
1404 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1405 } else { /* WSTRW wRd */
1406 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1407 gen_st32(tmp
, addr
, IS_USER(s
));
1410 if (insn
& (1 << 22)) { /* WSTRH */
1411 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1412 gen_st16(tmp
, addr
, IS_USER(s
));
1413 } else { /* WSTRB */
1414 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1415 gen_st8(tmp
, addr
, IS_USER(s
));
1420 tcg_temp_free_i32(addr
);
1424 if ((insn
& 0x0f000000) != 0x0e000000)
1427 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1428 case 0x000: /* WOR */
1429 wrd
= (insn
>> 12) & 0xf;
1430 rd0
= (insn
>> 0) & 0xf;
1431 rd1
= (insn
>> 16) & 0xf;
1432 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1433 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1434 gen_op_iwmmxt_setpsr_nz();
1435 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1436 gen_op_iwmmxt_set_mup();
1437 gen_op_iwmmxt_set_cup();
1439 case 0x011: /* TMCR */
1442 rd
= (insn
>> 12) & 0xf;
1443 wrd
= (insn
>> 16) & 0xf;
1445 case ARM_IWMMXT_wCID
:
1446 case ARM_IWMMXT_wCASF
:
1448 case ARM_IWMMXT_wCon
:
1449 gen_op_iwmmxt_set_cup();
1451 case ARM_IWMMXT_wCSSF
:
1452 tmp
= iwmmxt_load_creg(wrd
);
1453 tmp2
= load_reg(s
, rd
);
1454 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1455 tcg_temp_free_i32(tmp2
);
1456 iwmmxt_store_creg(wrd
, tmp
);
1458 case ARM_IWMMXT_wCGR0
:
1459 case ARM_IWMMXT_wCGR1
:
1460 case ARM_IWMMXT_wCGR2
:
1461 case ARM_IWMMXT_wCGR3
:
1462 gen_op_iwmmxt_set_cup();
1463 tmp
= load_reg(s
, rd
);
1464 iwmmxt_store_creg(wrd
, tmp
);
1470 case 0x100: /* WXOR */
1471 wrd
= (insn
>> 12) & 0xf;
1472 rd0
= (insn
>> 0) & 0xf;
1473 rd1
= (insn
>> 16) & 0xf;
1474 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1475 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1476 gen_op_iwmmxt_setpsr_nz();
1477 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1478 gen_op_iwmmxt_set_mup();
1479 gen_op_iwmmxt_set_cup();
1481 case 0x111: /* TMRC */
1484 rd
= (insn
>> 12) & 0xf;
1485 wrd
= (insn
>> 16) & 0xf;
1486 tmp
= iwmmxt_load_creg(wrd
);
1487 store_reg(s
, rd
, tmp
);
1489 case 0x300: /* WANDN */
1490 wrd
= (insn
>> 12) & 0xf;
1491 rd0
= (insn
>> 0) & 0xf;
1492 rd1
= (insn
>> 16) & 0xf;
1493 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1494 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1501 case 0x200: /* WAND */
1502 wrd
= (insn
>> 12) & 0xf;
1503 rd0
= (insn
>> 0) & 0xf;
1504 rd1
= (insn
>> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1506 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1507 gen_op_iwmmxt_setpsr_nz();
1508 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1509 gen_op_iwmmxt_set_mup();
1510 gen_op_iwmmxt_set_cup();
1512 case 0x810: case 0xa10: /* WMADD */
1513 wrd
= (insn
>> 12) & 0xf;
1514 rd0
= (insn
>> 0) & 0xf;
1515 rd1
= (insn
>> 16) & 0xf;
1516 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1517 if (insn
& (1 << 21))
1518 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1520 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1521 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1522 gen_op_iwmmxt_set_mup();
1524 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1525 wrd
= (insn
>> 12) & 0xf;
1526 rd0
= (insn
>> 16) & 0xf;
1527 rd1
= (insn
>> 0) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1529 switch ((insn
>> 22) & 3) {
1531 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1534 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1537 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1542 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1546 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1547 wrd
= (insn
>> 12) & 0xf;
1548 rd0
= (insn
>> 16) & 0xf;
1549 rd1
= (insn
>> 0) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1551 switch ((insn
>> 22) & 3) {
1553 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1556 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1559 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1564 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1565 gen_op_iwmmxt_set_mup();
1566 gen_op_iwmmxt_set_cup();
1568 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1569 wrd
= (insn
>> 12) & 0xf;
1570 rd0
= (insn
>> 16) & 0xf;
1571 rd1
= (insn
>> 0) & 0xf;
1572 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1573 if (insn
& (1 << 22))
1574 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1576 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1577 if (!(insn
& (1 << 20)))
1578 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1579 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1580 gen_op_iwmmxt_set_mup();
1582 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1583 wrd
= (insn
>> 12) & 0xf;
1584 rd0
= (insn
>> 16) & 0xf;
1585 rd1
= (insn
>> 0) & 0xf;
1586 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1587 if (insn
& (1 << 21)) {
1588 if (insn
& (1 << 20))
1589 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1591 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1593 if (insn
& (1 << 20))
1594 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1596 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1598 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1599 gen_op_iwmmxt_set_mup();
1601 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1602 wrd
= (insn
>> 12) & 0xf;
1603 rd0
= (insn
>> 16) & 0xf;
1604 rd1
= (insn
>> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1606 if (insn
& (1 << 21))
1607 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1609 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1610 if (!(insn
& (1 << 20))) {
1611 iwmmxt_load_reg(cpu_V1
, wrd
);
1612 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1614 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1615 gen_op_iwmmxt_set_mup();
1617 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1618 wrd
= (insn
>> 12) & 0xf;
1619 rd0
= (insn
>> 16) & 0xf;
1620 rd1
= (insn
>> 0) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1622 switch ((insn
>> 22) & 3) {
1624 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1627 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1630 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1635 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1639 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1640 wrd
= (insn
>> 12) & 0xf;
1641 rd0
= (insn
>> 16) & 0xf;
1642 rd1
= (insn
>> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1644 if (insn
& (1 << 22)) {
1645 if (insn
& (1 << 20))
1646 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1648 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1650 if (insn
& (1 << 20))
1651 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1653 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1655 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1659 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1660 wrd
= (insn
>> 12) & 0xf;
1661 rd0
= (insn
>> 16) & 0xf;
1662 rd1
= (insn
>> 0) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1664 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1665 tcg_gen_andi_i32(tmp
, tmp
, 7);
1666 iwmmxt_load_reg(cpu_V1
, rd1
);
1667 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1668 tcg_temp_free_i32(tmp
);
1669 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1670 gen_op_iwmmxt_set_mup();
1672 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1673 if (((insn
>> 6) & 3) == 3)
1675 rd
= (insn
>> 12) & 0xf;
1676 wrd
= (insn
>> 16) & 0xf;
1677 tmp
= load_reg(s
, rd
);
1678 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1679 switch ((insn
>> 6) & 3) {
1681 tmp2
= tcg_const_i32(0xff);
1682 tmp3
= tcg_const_i32((insn
& 7) << 3);
1685 tmp2
= tcg_const_i32(0xffff);
1686 tmp3
= tcg_const_i32((insn
& 3) << 4);
1689 tmp2
= tcg_const_i32(0xffffffff);
1690 tmp3
= tcg_const_i32((insn
& 1) << 5);
1696 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1697 tcg_temp_free(tmp3
);
1698 tcg_temp_free(tmp2
);
1699 tcg_temp_free_i32(tmp
);
1700 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1701 gen_op_iwmmxt_set_mup();
1703 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1704 rd
= (insn
>> 12) & 0xf;
1705 wrd
= (insn
>> 16) & 0xf;
1706 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1708 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1709 tmp
= tcg_temp_new_i32();
1710 switch ((insn
>> 22) & 3) {
1712 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1713 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1715 tcg_gen_ext8s_i32(tmp
, tmp
);
1717 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1721 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1722 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1724 tcg_gen_ext16s_i32(tmp
, tmp
);
1726 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1730 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1731 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1734 store_reg(s
, rd
, tmp
);
1736 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1737 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1739 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1740 switch ((insn
>> 22) & 3) {
1742 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1745 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1748 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1751 tcg_gen_shli_i32(tmp
, tmp
, 28);
1753 tcg_temp_free_i32(tmp
);
1755 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1756 if (((insn
>> 6) & 3) == 3)
1758 rd
= (insn
>> 12) & 0xf;
1759 wrd
= (insn
>> 16) & 0xf;
1760 tmp
= load_reg(s
, rd
);
1761 switch ((insn
>> 6) & 3) {
1763 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1766 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1769 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1772 tcg_temp_free_i32(tmp
);
1773 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1774 gen_op_iwmmxt_set_mup();
1776 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1777 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1779 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1780 tmp2
= tcg_temp_new_i32();
1781 tcg_gen_mov_i32(tmp2
, tmp
);
1782 switch ((insn
>> 22) & 3) {
1784 for (i
= 0; i
< 7; i
++) {
1785 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1786 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1790 for (i
= 0; i
< 3; i
++) {
1791 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1792 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1796 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1797 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1801 tcg_temp_free_i32(tmp2
);
1802 tcg_temp_free_i32(tmp
);
1804 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1805 wrd
= (insn
>> 12) & 0xf;
1806 rd0
= (insn
>> 16) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1808 switch ((insn
>> 22) & 3) {
1810 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1813 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1816 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1821 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1822 gen_op_iwmmxt_set_mup();
1824 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1825 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1827 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1828 tmp2
= tcg_temp_new_i32();
1829 tcg_gen_mov_i32(tmp2
, tmp
);
1830 switch ((insn
>> 22) & 3) {
1832 for (i
= 0; i
< 7; i
++) {
1833 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1834 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1838 for (i
= 0; i
< 3; i
++) {
1839 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1840 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1844 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1845 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1849 tcg_temp_free_i32(tmp2
);
1850 tcg_temp_free_i32(tmp
);
1852 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1853 rd
= (insn
>> 12) & 0xf;
1854 rd0
= (insn
>> 16) & 0xf;
1855 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1857 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1858 tmp
= tcg_temp_new_i32();
1859 switch ((insn
>> 22) & 3) {
1861 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1864 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1867 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1870 store_reg(s
, rd
, tmp
);
1872 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1873 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1874 wrd
= (insn
>> 12) & 0xf;
1875 rd0
= (insn
>> 16) & 0xf;
1876 rd1
= (insn
>> 0) & 0xf;
1877 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1878 switch ((insn
>> 22) & 3) {
1880 if (insn
& (1 << 21))
1881 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1883 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1886 if (insn
& (1 << 21))
1887 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1889 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1892 if (insn
& (1 << 21))
1893 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1895 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1900 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1901 gen_op_iwmmxt_set_mup();
1902 gen_op_iwmmxt_set_cup();
1904 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1905 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1906 wrd
= (insn
>> 12) & 0xf;
1907 rd0
= (insn
>> 16) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1909 switch ((insn
>> 22) & 3) {
1911 if (insn
& (1 << 21))
1912 gen_op_iwmmxt_unpacklsb_M0();
1914 gen_op_iwmmxt_unpacklub_M0();
1917 if (insn
& (1 << 21))
1918 gen_op_iwmmxt_unpacklsw_M0();
1920 gen_op_iwmmxt_unpackluw_M0();
1923 if (insn
& (1 << 21))
1924 gen_op_iwmmxt_unpacklsl_M0();
1926 gen_op_iwmmxt_unpacklul_M0();
1931 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1932 gen_op_iwmmxt_set_mup();
1933 gen_op_iwmmxt_set_cup();
1935 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1936 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1937 wrd
= (insn
>> 12) & 0xf;
1938 rd0
= (insn
>> 16) & 0xf;
1939 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1940 switch ((insn
>> 22) & 3) {
1942 if (insn
& (1 << 21))
1943 gen_op_iwmmxt_unpackhsb_M0();
1945 gen_op_iwmmxt_unpackhub_M0();
1948 if (insn
& (1 << 21))
1949 gen_op_iwmmxt_unpackhsw_M0();
1951 gen_op_iwmmxt_unpackhuw_M0();
1954 if (insn
& (1 << 21))
1955 gen_op_iwmmxt_unpackhsl_M0();
1957 gen_op_iwmmxt_unpackhul_M0();
1962 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1963 gen_op_iwmmxt_set_mup();
1964 gen_op_iwmmxt_set_cup();
1966 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1967 case 0x214: case 0x614: case 0xa14: case 0xe14:
1968 if (((insn
>> 22) & 3) == 0)
1970 wrd
= (insn
>> 12) & 0xf;
1971 rd0
= (insn
>> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1973 tmp
= tcg_temp_new_i32();
1974 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
1975 tcg_temp_free_i32(tmp
);
1978 switch ((insn
>> 22) & 3) {
1980 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_M0
, tmp
);
1983 gen_helper_iwmmxt_srll(cpu_M0
, cpu_M0
, tmp
);
1986 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_M0
, tmp
);
1989 tcg_temp_free_i32(tmp
);
1990 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1991 gen_op_iwmmxt_set_mup();
1992 gen_op_iwmmxt_set_cup();
1994 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1995 case 0x014: case 0x414: case 0x814: case 0xc14:
1996 if (((insn
>> 22) & 3) == 0)
1998 wrd
= (insn
>> 12) & 0xf;
1999 rd0
= (insn
>> 16) & 0xf;
2000 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2001 tmp
= tcg_temp_new_i32();
2002 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2003 tcg_temp_free_i32(tmp
);
2006 switch ((insn
>> 22) & 3) {
2008 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_M0
, tmp
);
2011 gen_helper_iwmmxt_sral(cpu_M0
, cpu_M0
, tmp
);
2014 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_M0
, tmp
);
2017 tcg_temp_free_i32(tmp
);
2018 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2019 gen_op_iwmmxt_set_mup();
2020 gen_op_iwmmxt_set_cup();
2022 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2023 case 0x114: case 0x514: case 0x914: case 0xd14:
2024 if (((insn
>> 22) & 3) == 0)
2026 wrd
= (insn
>> 12) & 0xf;
2027 rd0
= (insn
>> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2029 tmp
= tcg_temp_new_i32();
2030 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2031 tcg_temp_free_i32(tmp
);
2034 switch ((insn
>> 22) & 3) {
2036 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_M0
, tmp
);
2039 gen_helper_iwmmxt_slll(cpu_M0
, cpu_M0
, tmp
);
2042 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_M0
, tmp
);
2045 tcg_temp_free_i32(tmp
);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2047 gen_op_iwmmxt_set_mup();
2048 gen_op_iwmmxt_set_cup();
2050 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2051 case 0x314: case 0x714: case 0xb14: case 0xf14:
2052 if (((insn
>> 22) & 3) == 0)
2054 wrd
= (insn
>> 12) & 0xf;
2055 rd0
= (insn
>> 16) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2057 tmp
= tcg_temp_new_i32();
2058 switch ((insn
>> 22) & 3) {
2060 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2061 tcg_temp_free_i32(tmp
);
2064 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_M0
, tmp
);
2067 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2068 tcg_temp_free_i32(tmp
);
2071 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_M0
, tmp
);
2074 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2075 tcg_temp_free_i32(tmp
);
2078 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_M0
, tmp
);
2081 tcg_temp_free_i32(tmp
);
2082 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2086 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2087 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2088 wrd
= (insn
>> 12) & 0xf;
2089 rd0
= (insn
>> 16) & 0xf;
2090 rd1
= (insn
>> 0) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2092 switch ((insn
>> 22) & 3) {
2094 if (insn
& (1 << 21))
2095 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2097 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2100 if (insn
& (1 << 21))
2101 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2103 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2106 if (insn
& (1 << 21))
2107 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2109 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2114 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2115 gen_op_iwmmxt_set_mup();
2117 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2118 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2119 wrd
= (insn
>> 12) & 0xf;
2120 rd0
= (insn
>> 16) & 0xf;
2121 rd1
= (insn
>> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2123 switch ((insn
>> 22) & 3) {
2125 if (insn
& (1 << 21))
2126 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2128 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2131 if (insn
& (1 << 21))
2132 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2134 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2137 if (insn
& (1 << 21))
2138 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2140 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2145 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2146 gen_op_iwmmxt_set_mup();
2148 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2149 case 0x402: case 0x502: case 0x602: case 0x702:
2150 wrd
= (insn
>> 12) & 0xf;
2151 rd0
= (insn
>> 16) & 0xf;
2152 rd1
= (insn
>> 0) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2154 tmp
= tcg_const_i32((insn
>> 20) & 3);
2155 iwmmxt_load_reg(cpu_V1
, rd1
);
2156 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2158 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2159 gen_op_iwmmxt_set_mup();
2161 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2162 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2163 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2164 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2165 wrd
= (insn
>> 12) & 0xf;
2166 rd0
= (insn
>> 16) & 0xf;
2167 rd1
= (insn
>> 0) & 0xf;
2168 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2169 switch ((insn
>> 20) & 0xf) {
2171 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2174 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2177 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2180 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2183 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2186 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2189 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2192 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2195 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2200 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2204 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2205 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2206 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2207 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2208 wrd
= (insn
>> 12) & 0xf;
2209 rd0
= (insn
>> 16) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2211 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2212 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_M0
, tmp
);
2214 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2215 gen_op_iwmmxt_set_mup();
2216 gen_op_iwmmxt_set_cup();
2218 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2219 case 0x418: case 0x518: case 0x618: case 0x718:
2220 case 0x818: case 0x918: case 0xa18: case 0xb18:
2221 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2222 wrd
= (insn
>> 12) & 0xf;
2223 rd0
= (insn
>> 16) & 0xf;
2224 rd1
= (insn
>> 0) & 0xf;
2225 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2226 switch ((insn
>> 20) & 0xf) {
2228 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2231 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2234 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2237 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2240 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2243 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2246 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2249 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2252 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2257 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2261 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2262 case 0x408: case 0x508: case 0x608: case 0x708:
2263 case 0x808: case 0x908: case 0xa08: case 0xb08:
2264 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2265 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2267 wrd
= (insn
>> 12) & 0xf;
2268 rd0
= (insn
>> 16) & 0xf;
2269 rd1
= (insn
>> 0) & 0xf;
2270 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2271 switch ((insn
>> 22) & 3) {
2273 if (insn
& (1 << 21))
2274 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2276 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2279 if (insn
& (1 << 21))
2280 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2282 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2285 if (insn
& (1 << 21))
2286 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2288 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2295 case 0x201: case 0x203: case 0x205: case 0x207:
2296 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2297 case 0x211: case 0x213: case 0x215: case 0x217:
2298 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2299 wrd
= (insn
>> 5) & 0xf;
2300 rd0
= (insn
>> 12) & 0xf;
2301 rd1
= (insn
>> 0) & 0xf;
2302 if (rd0
== 0xf || rd1
== 0xf)
2304 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2305 tmp
= load_reg(s
, rd0
);
2306 tmp2
= load_reg(s
, rd1
);
2307 switch ((insn
>> 16) & 0xf) {
2308 case 0x0: /* TMIA */
2309 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2311 case 0x8: /* TMIAPH */
2312 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2314 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2315 if (insn
& (1 << 16))
2316 tcg_gen_shri_i32(tmp
, tmp
, 16);
2317 if (insn
& (1 << 17))
2318 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2319 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2322 tcg_temp_free_i32(tmp2
);
2323 tcg_temp_free_i32(tmp
);
2326 tcg_temp_free_i32(tmp2
);
2327 tcg_temp_free_i32(tmp
);
2328 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2329 gen_op_iwmmxt_set_mup();
2338 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2339 (ie. an undefined instruction). */
2340 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2342 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2345 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2346 /* Multiply with Internal Accumulate Format */
2347 rd0
= (insn
>> 12) & 0xf;
2349 acc
= (insn
>> 5) & 7;
2354 tmp
= load_reg(s
, rd0
);
2355 tmp2
= load_reg(s
, rd1
);
2356 switch ((insn
>> 16) & 0xf) {
2358 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2360 case 0x8: /* MIAPH */
2361 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2363 case 0xc: /* MIABB */
2364 case 0xd: /* MIABT */
2365 case 0xe: /* MIATB */
2366 case 0xf: /* MIATT */
2367 if (insn
& (1 << 16))
2368 tcg_gen_shri_i32(tmp
, tmp
, 16);
2369 if (insn
& (1 << 17))
2370 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2371 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2376 tcg_temp_free_i32(tmp2
);
2377 tcg_temp_free_i32(tmp
);
2379 gen_op_iwmmxt_movq_wRn_M0(acc
);
2383 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2384 /* Internal Accumulator Access Format */
2385 rdhi
= (insn
>> 16) & 0xf;
2386 rdlo
= (insn
>> 12) & 0xf;
2392 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2393 iwmmxt_load_reg(cpu_V0
, acc
);
2394 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2395 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2396 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2397 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2399 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2400 iwmmxt_store_reg(cpu_V0
, acc
);
2408 /* Disassemble system coprocessor instruction. Return nonzero if
2409 instruction is not defined. */
2410 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2413 uint32_t rd
= (insn
>> 12) & 0xf;
2414 uint32_t cp
= (insn
>> 8) & 0xf;
2419 if (insn
& ARM_CP_RW_BIT
) {
2420 if (!env
->cp
[cp
].cp_read
)
2422 gen_set_pc_im(s
->pc
);
2423 tmp
= tcg_temp_new_i32();
2424 tmp2
= tcg_const_i32(insn
);
2425 gen_helper_get_cp(tmp
, cpu_env
, tmp2
);
2426 tcg_temp_free(tmp2
);
2427 store_reg(s
, rd
, tmp
);
2429 if (!env
->cp
[cp
].cp_write
)
2431 gen_set_pc_im(s
->pc
);
2432 tmp
= load_reg(s
, rd
);
2433 tmp2
= tcg_const_i32(insn
);
2434 gen_helper_set_cp(cpu_env
, tmp2
, tmp
);
2435 tcg_temp_free(tmp2
);
2436 tcg_temp_free_i32(tmp
);
2441 static int cp15_user_ok(uint32_t insn
)
2443 int cpn
= (insn
>> 16) & 0xf;
2444 int cpm
= insn
& 0xf;
2445 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2447 if (cpn
== 13 && cpm
== 0) {
2449 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2453 /* ISB, DSB, DMB. */
2454 if ((cpm
== 5 && op
== 4)
2455 || (cpm
== 10 && (op
== 4 || op
== 5)))
2461 static int cp15_tls_load_store(CPUState
*env
, DisasContext
*s
, uint32_t insn
, uint32_t rd
)
2464 int cpn
= (insn
>> 16) & 0xf;
2465 int cpm
= insn
& 0xf;
2466 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2468 if (!arm_feature(env
, ARM_FEATURE_V6K
))
2471 if (!(cpn
== 13 && cpm
== 0))
2474 if (insn
& ARM_CP_RW_BIT
) {
2477 tmp
= load_cpu_field(cp15
.c13_tls1
);
2480 tmp
= load_cpu_field(cp15
.c13_tls2
);
2483 tmp
= load_cpu_field(cp15
.c13_tls3
);
2488 store_reg(s
, rd
, tmp
);
2491 tmp
= load_reg(s
, rd
);
2494 store_cpu_field(tmp
, cp15
.c13_tls1
);
2497 store_cpu_field(tmp
, cp15
.c13_tls2
);
2500 store_cpu_field(tmp
, cp15
.c13_tls3
);
2503 tcg_temp_free_i32(tmp
);
2510 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2511 instruction is not defined. */
2512 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2517 /* M profile cores use memory mapped registers instead of cp15. */
2518 if (arm_feature(env
, ARM_FEATURE_M
))
2521 if ((insn
& (1 << 25)) == 0) {
2522 if (insn
& (1 << 20)) {
2526 /* mcrr. Used for block cache operations, so implement as no-op. */
2529 if ((insn
& (1 << 4)) == 0) {
2533 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
2537 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2538 * instructions rather than a separate instruction.
2540 if ((insn
& 0x0fff0fff) == 0x0e070f90) {
2541 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2542 * In v7, this must NOP.
2544 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
2545 /* Wait for interrupt. */
2546 gen_set_pc_im(s
->pc
);
2547 s
->is_jmp
= DISAS_WFI
;
2552 if ((insn
& 0x0fff0fff) == 0x0e070f58) {
2553 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2554 * so this is slightly over-broad.
2556 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
2557 /* Wait for interrupt. */
2558 gen_set_pc_im(s
->pc
);
2559 s
->is_jmp
= DISAS_WFI
;
2562 /* Otherwise fall through to handle via helper function.
2563 * In particular, on v7 and some v6 cores this is one of
2564 * the VA-PA registers.
2568 rd
= (insn
>> 12) & 0xf;
2570 if (cp15_tls_load_store(env
, s
, insn
, rd
))
2573 tmp2
= tcg_const_i32(insn
);
2574 if (insn
& ARM_CP_RW_BIT
) {
2575 tmp
= tcg_temp_new_i32();
2576 gen_helper_get_cp15(tmp
, cpu_env
, tmp2
);
2577 /* If the destination register is r15 then sets condition codes. */
2579 store_reg(s
, rd
, tmp
);
2581 tcg_temp_free_i32(tmp
);
2583 tmp
= load_reg(s
, rd
);
2584 gen_helper_set_cp15(cpu_env
, tmp2
, tmp
);
2585 tcg_temp_free_i32(tmp
);
2586 /* Normally we would always end the TB here, but Linux
2587 * arch/arm/mach-pxa/sleep.S expects two instructions following
2588 * an MMU enable to execute from cache. Imitate this behaviour. */
2589 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2590 (insn
& 0x0fff0fff) != 0x0e010f10)
2593 tcg_temp_free_i32(tmp2
);
2597 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2598 #define VFP_SREG(insn, bigbit, smallbit) \
2599 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2600 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2601 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2602 reg = (((insn) >> (bigbit)) & 0x0f) \
2603 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2605 if (insn & (1 << (smallbit))) \
2607 reg = ((insn) >> (bigbit)) & 0x0f; \
2610 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2611 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2612 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2613 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2614 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2615 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2617 /* Move between integer and VFP cores. */
2618 static TCGv
gen_vfp_mrs(void)
2620 TCGv tmp
= tcg_temp_new_i32();
2621 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2625 static void gen_vfp_msr(TCGv tmp
)
2627 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2628 tcg_temp_free_i32(tmp
);
2631 static void gen_neon_dup_u8(TCGv var
, int shift
)
2633 TCGv tmp
= tcg_temp_new_i32();
2635 tcg_gen_shri_i32(var
, var
, shift
);
2636 tcg_gen_ext8u_i32(var
, var
);
2637 tcg_gen_shli_i32(tmp
, var
, 8);
2638 tcg_gen_or_i32(var
, var
, tmp
);
2639 tcg_gen_shli_i32(tmp
, var
, 16);
2640 tcg_gen_or_i32(var
, var
, tmp
);
2641 tcg_temp_free_i32(tmp
);
2644 static void gen_neon_dup_low16(TCGv var
)
2646 TCGv tmp
= tcg_temp_new_i32();
2647 tcg_gen_ext16u_i32(var
, var
);
2648 tcg_gen_shli_i32(tmp
, var
, 16);
2649 tcg_gen_or_i32(var
, var
, tmp
);
2650 tcg_temp_free_i32(tmp
);
2653 static void gen_neon_dup_high16(TCGv var
)
2655 TCGv tmp
= tcg_temp_new_i32();
2656 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2657 tcg_gen_shri_i32(tmp
, var
, 16);
2658 tcg_gen_or_i32(var
, var
, tmp
);
2659 tcg_temp_free_i32(tmp
);
2662 static TCGv
gen_load_and_replicate(DisasContext
*s
, TCGv addr
, int size
)
2664 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2668 tmp
= gen_ld8u(addr
, IS_USER(s
));
2669 gen_neon_dup_u8(tmp
, 0);
2672 tmp
= gen_ld16u(addr
, IS_USER(s
));
2673 gen_neon_dup_low16(tmp
);
2676 tmp
= gen_ld32(addr
, IS_USER(s
));
2678 default: /* Avoid compiler warnings. */
2684 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2685 (ie. an undefined instruction). */
2686 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2688 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2694 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2697 if (!s
->vfp_enabled
) {
2698 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2699 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2701 rn
= (insn
>> 16) & 0xf;
2702 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2703 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2706 dp
= ((insn
& 0xf00) == 0xb00);
2707 switch ((insn
>> 24) & 0xf) {
2709 if (insn
& (1 << 4)) {
2710 /* single register transfer */
2711 rd
= (insn
>> 12) & 0xf;
2716 VFP_DREG_N(rn
, insn
);
2719 if (insn
& 0x00c00060
2720 && !arm_feature(env
, ARM_FEATURE_NEON
))
2723 pass
= (insn
>> 21) & 1;
2724 if (insn
& (1 << 22)) {
2726 offset
= ((insn
>> 5) & 3) * 8;
2727 } else if (insn
& (1 << 5)) {
2729 offset
= (insn
& (1 << 6)) ? 16 : 0;
2734 if (insn
& ARM_CP_RW_BIT
) {
2736 tmp
= neon_load_reg(rn
, pass
);
2740 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2741 if (insn
& (1 << 23))
2747 if (insn
& (1 << 23)) {
2749 tcg_gen_shri_i32(tmp
, tmp
, 16);
2755 tcg_gen_sari_i32(tmp
, tmp
, 16);
2764 store_reg(s
, rd
, tmp
);
2767 tmp
= load_reg(s
, rd
);
2768 if (insn
& (1 << 23)) {
2771 gen_neon_dup_u8(tmp
, 0);
2772 } else if (size
== 1) {
2773 gen_neon_dup_low16(tmp
);
2775 for (n
= 0; n
<= pass
* 2; n
++) {
2776 tmp2
= tcg_temp_new_i32();
2777 tcg_gen_mov_i32(tmp2
, tmp
);
2778 neon_store_reg(rn
, n
, tmp2
);
2780 neon_store_reg(rn
, n
, tmp
);
2785 tmp2
= neon_load_reg(rn
, pass
);
2786 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2787 tcg_temp_free_i32(tmp2
);
2790 tmp2
= neon_load_reg(rn
, pass
);
2791 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2792 tcg_temp_free_i32(tmp2
);
2797 neon_store_reg(rn
, pass
, tmp
);
2801 if ((insn
& 0x6f) != 0x00)
2803 rn
= VFP_SREG_N(insn
);
2804 if (insn
& ARM_CP_RW_BIT
) {
2806 if (insn
& (1 << 21)) {
2807 /* system register */
2812 /* VFP2 allows access to FSID from userspace.
2813 VFP3 restricts all id registers to privileged
2816 && arm_feature(env
, ARM_FEATURE_VFP3
))
2818 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2823 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2825 case ARM_VFP_FPINST
:
2826 case ARM_VFP_FPINST2
:
2827 /* Not present in VFP3. */
2829 || arm_feature(env
, ARM_FEATURE_VFP3
))
2831 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2835 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2836 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2838 tmp
= tcg_temp_new_i32();
2839 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2845 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2847 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2853 gen_mov_F0_vreg(0, rn
);
2854 tmp
= gen_vfp_mrs();
2857 /* Set the 4 flag bits in the CPSR. */
2859 tcg_temp_free_i32(tmp
);
2861 store_reg(s
, rd
, tmp
);
2865 tmp
= load_reg(s
, rd
);
2866 if (insn
& (1 << 21)) {
2868 /* system register */
2873 /* Writes are ignored. */
2876 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2877 tcg_temp_free_i32(tmp
);
2883 /* TODO: VFP subarchitecture support.
2884 * For now, keep the EN bit only */
2885 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2886 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2889 case ARM_VFP_FPINST
:
2890 case ARM_VFP_FPINST2
:
2891 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2898 gen_mov_vreg_F0(0, rn
);
2903 /* data processing */
2904 /* The opcode is in bits 23, 21, 20 and 6. */
2905 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2909 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2911 /* rn is register number */
2912 VFP_DREG_N(rn
, insn
);
2915 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2916 /* Integer or single precision destination. */
2917 rd
= VFP_SREG_D(insn
);
2919 VFP_DREG_D(rd
, insn
);
2922 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2923 /* VCVT from int is always from S reg regardless of dp bit.
2924 * VCVT with immediate frac_bits has same format as SREG_M
2926 rm
= VFP_SREG_M(insn
);
2928 VFP_DREG_M(rm
, insn
);
2931 rn
= VFP_SREG_N(insn
);
2932 if (op
== 15 && rn
== 15) {
2933 /* Double precision destination. */
2934 VFP_DREG_D(rd
, insn
);
2936 rd
= VFP_SREG_D(insn
);
2938 /* NB that we implicitly rely on the encoding for the frac_bits
2939 * in VCVT of fixed to float being the same as that of an SREG_M
2941 rm
= VFP_SREG_M(insn
);
2944 veclen
= s
->vec_len
;
2945 if (op
== 15 && rn
> 3)
2948 /* Shut up compiler warnings. */
2959 /* Figure out what type of vector operation this is. */
2960 if ((rd
& bank_mask
) == 0) {
2965 delta_d
= (s
->vec_stride
>> 1) + 1;
2967 delta_d
= s
->vec_stride
+ 1;
2969 if ((rm
& bank_mask
) == 0) {
2970 /* mixed scalar/vector */
2979 /* Load the initial operands. */
2984 /* Integer source */
2985 gen_mov_F0_vreg(0, rm
);
2990 gen_mov_F0_vreg(dp
, rd
);
2991 gen_mov_F1_vreg(dp
, rm
);
2995 /* Compare with zero */
2996 gen_mov_F0_vreg(dp
, rd
);
3007 /* Source and destination the same. */
3008 gen_mov_F0_vreg(dp
, rd
);
3011 /* One source operand. */
3012 gen_mov_F0_vreg(dp
, rm
);
3016 /* Two source operands. */
3017 gen_mov_F0_vreg(dp
, rn
);
3018 gen_mov_F1_vreg(dp
, rm
);
3022 /* Perform the calculation. */
3024 case 0: /* mac: fd + (fn * fm) */
3026 gen_mov_F1_vreg(dp
, rd
);
3029 case 1: /* nmac: fd - (fn * fm) */
3032 gen_mov_F1_vreg(dp
, rd
);
3035 case 2: /* msc: -fd + (fn * fm) */
3037 gen_mov_F1_vreg(dp
, rd
);
3040 case 3: /* nmsc: -fd - (fn * fm) */
3043 gen_mov_F1_vreg(dp
, rd
);
3046 case 4: /* mul: fn * fm */
3049 case 5: /* nmul: -(fn * fm) */
3053 case 6: /* add: fn + fm */
3056 case 7: /* sub: fn - fm */
3059 case 8: /* div: fn / fm */
3062 case 14: /* fconst */
3063 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3066 n
= (insn
<< 12) & 0x80000000;
3067 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3074 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3081 tcg_gen_movi_i32(cpu_F0s
, n
);
3084 case 15: /* extension space */
3098 case 4: /* vcvtb.f32.f16 */
3099 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3101 tmp
= gen_vfp_mrs();
3102 tcg_gen_ext16u_i32(tmp
, tmp
);
3103 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3104 tcg_temp_free_i32(tmp
);
3106 case 5: /* vcvtt.f32.f16 */
3107 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3109 tmp
= gen_vfp_mrs();
3110 tcg_gen_shri_i32(tmp
, tmp
, 16);
3111 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3112 tcg_temp_free_i32(tmp
);
3114 case 6: /* vcvtb.f16.f32 */
3115 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3117 tmp
= tcg_temp_new_i32();
3118 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3119 gen_mov_F0_vreg(0, rd
);
3120 tmp2
= gen_vfp_mrs();
3121 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3122 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3123 tcg_temp_free_i32(tmp2
);
3126 case 7: /* vcvtt.f16.f32 */
3127 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3129 tmp
= tcg_temp_new_i32();
3130 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3131 tcg_gen_shli_i32(tmp
, tmp
, 16);
3132 gen_mov_F0_vreg(0, rd
);
3133 tmp2
= gen_vfp_mrs();
3134 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3135 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3136 tcg_temp_free_i32(tmp2
);
3148 case 11: /* cmpez */
3152 case 15: /* single<->double conversion */
3154 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3156 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3158 case 16: /* fuito */
3161 case 17: /* fsito */
3164 case 20: /* fshto */
3165 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3167 gen_vfp_shto(dp
, 16 - rm
);
3169 case 21: /* fslto */
3170 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3172 gen_vfp_slto(dp
, 32 - rm
);
3174 case 22: /* fuhto */
3175 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3177 gen_vfp_uhto(dp
, 16 - rm
);
3179 case 23: /* fulto */
3180 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3182 gen_vfp_ulto(dp
, 32 - rm
);
3184 case 24: /* ftoui */
3187 case 25: /* ftouiz */
3190 case 26: /* ftosi */
3193 case 27: /* ftosiz */
3196 case 28: /* ftosh */
3197 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3199 gen_vfp_tosh(dp
, 16 - rm
);
3201 case 29: /* ftosl */
3202 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3204 gen_vfp_tosl(dp
, 32 - rm
);
3206 case 30: /* ftouh */
3207 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3209 gen_vfp_touh(dp
, 16 - rm
);
3211 case 31: /* ftoul */
3212 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3214 gen_vfp_toul(dp
, 32 - rm
);
3216 default: /* undefined */
3217 printf ("rn:%d\n", rn
);
3221 default: /* undefined */
3222 printf ("op:%d\n", op
);
3226 /* Write back the result. */
3227 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3228 ; /* Comparison, do nothing. */
3229 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3230 /* VCVT double to int: always integer result. */
3231 gen_mov_vreg_F0(0, rd
);
3232 else if (op
== 15 && rn
== 15)
3234 gen_mov_vreg_F0(!dp
, rd
);
3236 gen_mov_vreg_F0(dp
, rd
);
3238 /* break out of the loop if we have finished */
3242 if (op
== 15 && delta_m
== 0) {
3243 /* single source one-many */
3245 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3247 gen_mov_vreg_F0(dp
, rd
);
3251 /* Setup the next operands. */
3253 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3257 /* One source operand. */
3258 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3260 gen_mov_F0_vreg(dp
, rm
);
3262 /* Two source operands. */
3263 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3265 gen_mov_F0_vreg(dp
, rn
);
3267 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3269 gen_mov_F1_vreg(dp
, rm
);
3277 if ((insn
& 0x03e00000) == 0x00400000) {
3278 /* two-register transfer */
3279 rn
= (insn
>> 16) & 0xf;
3280 rd
= (insn
>> 12) & 0xf;
3282 VFP_DREG_M(rm
, insn
);
3284 rm
= VFP_SREG_M(insn
);
3287 if (insn
& ARM_CP_RW_BIT
) {
3290 gen_mov_F0_vreg(0, rm
* 2);
3291 tmp
= gen_vfp_mrs();
3292 store_reg(s
, rd
, tmp
);
3293 gen_mov_F0_vreg(0, rm
* 2 + 1);
3294 tmp
= gen_vfp_mrs();
3295 store_reg(s
, rn
, tmp
);
3297 gen_mov_F0_vreg(0, rm
);
3298 tmp
= gen_vfp_mrs();
3299 store_reg(s
, rd
, tmp
);
3300 gen_mov_F0_vreg(0, rm
+ 1);
3301 tmp
= gen_vfp_mrs();
3302 store_reg(s
, rn
, tmp
);
3307 tmp
= load_reg(s
, rd
);
3309 gen_mov_vreg_F0(0, rm
* 2);
3310 tmp
= load_reg(s
, rn
);
3312 gen_mov_vreg_F0(0, rm
* 2 + 1);
3314 tmp
= load_reg(s
, rd
);
3316 gen_mov_vreg_F0(0, rm
);
3317 tmp
= load_reg(s
, rn
);
3319 gen_mov_vreg_F0(0, rm
+ 1);
3324 rn
= (insn
>> 16) & 0xf;
3326 VFP_DREG_D(rd
, insn
);
3328 rd
= VFP_SREG_D(insn
);
3329 if (s
->thumb
&& rn
== 15) {
3330 addr
= tcg_temp_new_i32();
3331 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3333 addr
= load_reg(s
, rn
);
3335 if ((insn
& 0x01200000) == 0x01000000) {
3336 /* Single load/store */
3337 offset
= (insn
& 0xff) << 2;
3338 if ((insn
& (1 << 23)) == 0)
3340 tcg_gen_addi_i32(addr
, addr
, offset
);
3341 if (insn
& (1 << 20)) {
3342 gen_vfp_ld(s
, dp
, addr
);
3343 gen_mov_vreg_F0(dp
, rd
);
3345 gen_mov_F0_vreg(dp
, rd
);
3346 gen_vfp_st(s
, dp
, addr
);
3348 tcg_temp_free_i32(addr
);
3350 /* load/store multiple */
3352 n
= (insn
>> 1) & 0x7f;
3356 if (insn
& (1 << 24)) /* pre-decrement */
3357 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3363 for (i
= 0; i
< n
; i
++) {
3364 if (insn
& ARM_CP_RW_BIT
) {
3366 gen_vfp_ld(s
, dp
, addr
);
3367 gen_mov_vreg_F0(dp
, rd
+ i
);
3370 gen_mov_F0_vreg(dp
, rd
+ i
);
3371 gen_vfp_st(s
, dp
, addr
);
3373 tcg_gen_addi_i32(addr
, addr
, offset
);
3375 if (insn
& (1 << 21)) {
3377 if (insn
& (1 << 24))
3378 offset
= -offset
* n
;
3379 else if (dp
&& (insn
& 1))
3385 tcg_gen_addi_i32(addr
, addr
, offset
);
3386 store_reg(s
, rn
, addr
);
3388 tcg_temp_free_i32(addr
);
3394 /* Should never happen. */
3400 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3402 TranslationBlock
*tb
;
3405 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3407 gen_set_pc_im(dest
);
3408 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
3410 gen_set_pc_im(dest
);
3415 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3417 if (unlikely(s
->singlestep_enabled
)) {
3418 /* An indirect jump so that we still trigger the debug exception. */
3423 gen_goto_tb(s
, 0, dest
);
3424 s
->is_jmp
= DISAS_TB_JUMP
;
3428 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3431 tcg_gen_sari_i32(t0
, t0
, 16);
3435 tcg_gen_sari_i32(t1
, t1
, 16);
3438 tcg_gen_mul_i32(t0
, t0
, t1
);
3441 /* Return the mask of PSR bits set by a MSR instruction. */
3442 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3446 if (flags
& (1 << 0))
3448 if (flags
& (1 << 1))
3450 if (flags
& (1 << 2))
3452 if (flags
& (1 << 3))
3455 /* Mask out undefined bits. */
3456 mask
&= ~CPSR_RESERVED
;
3457 if (!arm_feature(env
, ARM_FEATURE_V4T
))
3459 if (!arm_feature(env
, ARM_FEATURE_V5
))
3460 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3461 if (!arm_feature(env
, ARM_FEATURE_V6
))
3462 mask
&= ~(CPSR_E
| CPSR_GE
);
3463 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3465 /* Mask out execution state bits. */
3468 /* Mask out privileged bits. */
3474 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3475 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3479 /* ??? This is also undefined in system mode. */
3483 tmp
= load_cpu_field(spsr
);
3484 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3485 tcg_gen_andi_i32(t0
, t0
, mask
);
3486 tcg_gen_or_i32(tmp
, tmp
, t0
);
3487 store_cpu_field(tmp
, spsr
);
3489 gen_set_cpsr(t0
, mask
);
3491 tcg_temp_free_i32(t0
);
3496 /* Returns nonzero if access to the PSR is not permitted. */
3497 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3500 tmp
= tcg_temp_new_i32();
3501 tcg_gen_movi_i32(tmp
, val
);
3502 return gen_set_psr(s
, mask
, spsr
, tmp
);
3505 /* Generate an old-style exception return. Marks pc as dead. */
3506 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3509 store_reg(s
, 15, pc
);
3510 tmp
= load_cpu_field(spsr
);
3511 gen_set_cpsr(tmp
, 0xffffffff);
3512 tcg_temp_free_i32(tmp
);
3513 s
->is_jmp
= DISAS_UPDATE
;
3516 /* Generate a v6 exception return. Marks both values as dead. */
3517 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3519 gen_set_cpsr(cpsr
, 0xffffffff);
3520 tcg_temp_free_i32(cpsr
);
3521 store_reg(s
, 15, pc
);
3522 s
->is_jmp
= DISAS_UPDATE
;
3526 gen_set_condexec (DisasContext
*s
)
3528 if (s
->condexec_mask
) {
3529 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3530 TCGv tmp
= tcg_temp_new_i32();
3531 tcg_gen_movi_i32(tmp
, val
);
3532 store_cpu_field(tmp
, condexec_bits
);
3536 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3538 gen_set_condexec(s
);
3539 gen_set_pc_im(s
->pc
- offset
);
3540 gen_exception(excp
);
3541 s
->is_jmp
= DISAS_JUMP
;
3544 static void gen_nop_hint(DisasContext
*s
, int val
)
3548 gen_set_pc_im(s
->pc
);
3549 s
->is_jmp
= DISAS_WFI
;
3553 /* TODO: Implement SEV and WFE. May help SMP performance. */
3559 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3561 static inline void gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3564 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3565 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3566 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3571 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3574 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3575 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3576 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3581 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3582 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3583 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3584 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3585 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3587 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3588 switch ((size << 1) | u) { \
3590 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3593 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3596 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3599 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3602 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3605 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3607 default: return 1; \
3610 #define GEN_NEON_INTEGER_OP(name) do { \
3611 switch ((size << 1) | u) { \
3613 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3616 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3619 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3622 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3625 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3628 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3630 default: return 1; \
3633 static TCGv
neon_load_scratch(int scratch
)
3635 TCGv tmp
= tcg_temp_new_i32();
3636 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3640 static void neon_store_scratch(int scratch
, TCGv var
)
3642 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3643 tcg_temp_free_i32(var
);
3646 static inline TCGv
neon_get_scalar(int size
, int reg
)
3650 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3652 gen_neon_dup_high16(tmp
);
3654 gen_neon_dup_low16(tmp
);
3657 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3662 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3665 if (!q
&& size
== 2) {
3668 tmp
= tcg_const_i32(rd
);
3669 tmp2
= tcg_const_i32(rm
);
3673 gen_helper_neon_qunzip8(tmp
, tmp2
);
3676 gen_helper_neon_qunzip16(tmp
, tmp2
);
3679 gen_helper_neon_qunzip32(tmp
, tmp2
);
3687 gen_helper_neon_unzip8(tmp
, tmp2
);
3690 gen_helper_neon_unzip16(tmp
, tmp2
);
3696 tcg_temp_free_i32(tmp
);
3697 tcg_temp_free_i32(tmp2
);
3701 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3704 if (!q
&& size
== 2) {
3707 tmp
= tcg_const_i32(rd
);
3708 tmp2
= tcg_const_i32(rm
);
3712 gen_helper_neon_qzip8(tmp
, tmp2
);
3715 gen_helper_neon_qzip16(tmp
, tmp2
);
3718 gen_helper_neon_qzip32(tmp
, tmp2
);
3726 gen_helper_neon_zip8(tmp
, tmp2
);
3729 gen_helper_neon_zip16(tmp
, tmp2
);
3735 tcg_temp_free_i32(tmp
);
3736 tcg_temp_free_i32(tmp2
);
3740 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3744 rd
= tcg_temp_new_i32();
3745 tmp
= tcg_temp_new_i32();
3747 tcg_gen_shli_i32(rd
, t0
, 8);
3748 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3749 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3750 tcg_gen_or_i32(rd
, rd
, tmp
);
3752 tcg_gen_shri_i32(t1
, t1
, 8);
3753 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3754 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3755 tcg_gen_or_i32(t1
, t1
, tmp
);
3756 tcg_gen_mov_i32(t0
, rd
);
3758 tcg_temp_free_i32(tmp
);
3759 tcg_temp_free_i32(rd
);
3762 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3766 rd
= tcg_temp_new_i32();
3767 tmp
= tcg_temp_new_i32();
3769 tcg_gen_shli_i32(rd
, t0
, 16);
3770 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3771 tcg_gen_or_i32(rd
, rd
, tmp
);
3772 tcg_gen_shri_i32(t1
, t1
, 16);
3773 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3774 tcg_gen_or_i32(t1
, t1
, tmp
);
3775 tcg_gen_mov_i32(t0
, rd
);
3777 tcg_temp_free_i32(tmp
);
3778 tcg_temp_free_i32(rd
);
3786 } neon_ls_element_type
[11] = {
3800 /* Translate a NEON load/store element instruction. Return nonzero if the
3801 instruction is invalid. */
3802 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3821 if (!s
->vfp_enabled
)
3823 VFP_DREG_D(rd
, insn
);
3824 rn
= (insn
>> 16) & 0xf;
3826 load
= (insn
& (1 << 21)) != 0;
3827 if ((insn
& (1 << 23)) == 0) {
3828 /* Load store all elements. */
3829 op
= (insn
>> 8) & 0xf;
3830 size
= (insn
>> 6) & 3;
3833 nregs
= neon_ls_element_type
[op
].nregs
;
3834 interleave
= neon_ls_element_type
[op
].interleave
;
3835 spacing
= neon_ls_element_type
[op
].spacing
;
3836 if (size
== 3 && (interleave
| spacing
) != 1)
3838 addr
= tcg_temp_new_i32();
3839 load_reg_var(s
, addr
, rn
);
3840 stride
= (1 << size
) * interleave
;
3841 for (reg
= 0; reg
< nregs
; reg
++) {
3842 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3843 load_reg_var(s
, addr
, rn
);
3844 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3845 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3846 load_reg_var(s
, addr
, rn
);
3847 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3851 tmp64
= gen_ld64(addr
, IS_USER(s
));
3852 neon_store_reg64(tmp64
, rd
);
3853 tcg_temp_free_i64(tmp64
);
3855 tmp64
= tcg_temp_new_i64();
3856 neon_load_reg64(tmp64
, rd
);
3857 gen_st64(tmp64
, addr
, IS_USER(s
));
3859 tcg_gen_addi_i32(addr
, addr
, stride
);
3861 for (pass
= 0; pass
< 2; pass
++) {
3864 tmp
= gen_ld32(addr
, IS_USER(s
));
3865 neon_store_reg(rd
, pass
, tmp
);
3867 tmp
= neon_load_reg(rd
, pass
);
3868 gen_st32(tmp
, addr
, IS_USER(s
));
3870 tcg_gen_addi_i32(addr
, addr
, stride
);
3871 } else if (size
== 1) {
3873 tmp
= gen_ld16u(addr
, IS_USER(s
));
3874 tcg_gen_addi_i32(addr
, addr
, stride
);
3875 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3876 tcg_gen_addi_i32(addr
, addr
, stride
);
3877 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3878 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3879 tcg_temp_free_i32(tmp2
);
3880 neon_store_reg(rd
, pass
, tmp
);
3882 tmp
= neon_load_reg(rd
, pass
);
3883 tmp2
= tcg_temp_new_i32();
3884 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3885 gen_st16(tmp
, addr
, IS_USER(s
));
3886 tcg_gen_addi_i32(addr
, addr
, stride
);
3887 gen_st16(tmp2
, addr
, IS_USER(s
));
3888 tcg_gen_addi_i32(addr
, addr
, stride
);
3890 } else /* size == 0 */ {
3893 for (n
= 0; n
< 4; n
++) {
3894 tmp
= gen_ld8u(addr
, IS_USER(s
));
3895 tcg_gen_addi_i32(addr
, addr
, stride
);
3899 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
3900 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3901 tcg_temp_free_i32(tmp
);
3904 neon_store_reg(rd
, pass
, tmp2
);
3906 tmp2
= neon_load_reg(rd
, pass
);
3907 for (n
= 0; n
< 4; n
++) {
3908 tmp
= tcg_temp_new_i32();
3910 tcg_gen_mov_i32(tmp
, tmp2
);
3912 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3914 gen_st8(tmp
, addr
, IS_USER(s
));
3915 tcg_gen_addi_i32(addr
, addr
, stride
);
3917 tcg_temp_free_i32(tmp2
);
3924 tcg_temp_free_i32(addr
);
3927 size
= (insn
>> 10) & 3;
3929 /* Load single element to all lanes. */
3930 int a
= (insn
>> 4) & 1;
3934 size
= (insn
>> 6) & 3;
3935 nregs
= ((insn
>> 8) & 3) + 1;
3938 if (nregs
!= 4 || a
== 0) {
3941 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3944 if (nregs
== 1 && a
== 1 && size
== 0) {
3947 if (nregs
== 3 && a
== 1) {
3950 addr
= tcg_temp_new_i32();
3951 load_reg_var(s
, addr
, rn
);
3953 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3954 tmp
= gen_load_and_replicate(s
, addr
, size
);
3955 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3956 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3957 if (insn
& (1 << 5)) {
3958 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
3959 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
3961 tcg_temp_free_i32(tmp
);
3963 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3964 stride
= (insn
& (1 << 5)) ? 2 : 1;
3965 for (reg
= 0; reg
< nregs
; reg
++) {
3966 tmp
= gen_load_and_replicate(s
, addr
, size
);
3967 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3968 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3969 tcg_temp_free_i32(tmp
);
3970 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3974 tcg_temp_free_i32(addr
);
3975 stride
= (1 << size
) * nregs
;
3977 /* Single element. */
3978 pass
= (insn
>> 7) & 1;
3981 shift
= ((insn
>> 5) & 3) * 8;
3985 shift
= ((insn
>> 6) & 1) * 16;
3986 stride
= (insn
& (1 << 5)) ? 2 : 1;
3990 stride
= (insn
& (1 << 6)) ? 2 : 1;
3995 nregs
= ((insn
>> 8) & 3) + 1;
3996 addr
= tcg_temp_new_i32();
3997 load_reg_var(s
, addr
, rn
);
3998 for (reg
= 0; reg
< nregs
; reg
++) {
4002 tmp
= gen_ld8u(addr
, IS_USER(s
));
4005 tmp
= gen_ld16u(addr
, IS_USER(s
));
4008 tmp
= gen_ld32(addr
, IS_USER(s
));
4010 default: /* Avoid compiler warnings. */
4014 tmp2
= neon_load_reg(rd
, pass
);
4015 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
4016 tcg_temp_free_i32(tmp2
);
4018 neon_store_reg(rd
, pass
, tmp
);
4019 } else { /* Store */
4020 tmp
= neon_load_reg(rd
, pass
);
4022 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4025 gen_st8(tmp
, addr
, IS_USER(s
));
4028 gen_st16(tmp
, addr
, IS_USER(s
));
4031 gen_st32(tmp
, addr
, IS_USER(s
));
4036 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4038 tcg_temp_free_i32(addr
);
4039 stride
= nregs
* (1 << size
);
4045 base
= load_reg(s
, rn
);
4047 tcg_gen_addi_i32(base
, base
, stride
);
4050 index
= load_reg(s
, rm
);
4051 tcg_gen_add_i32(base
, base
, index
);
4052 tcg_temp_free_i32(index
);
4054 store_reg(s
, rn
, base
);
4059 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4060 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4062 tcg_gen_and_i32(t
, t
, c
);
4063 tcg_gen_andc_i32(f
, f
, c
);
4064 tcg_gen_or_i32(dest
, t
, f
);
4067 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4070 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4071 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4072 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4077 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4080 case 0: gen_helper_neon_narrow_sat_s8(dest
, src
); break;
4081 case 1: gen_helper_neon_narrow_sat_s16(dest
, src
); break;
4082 case 2: gen_helper_neon_narrow_sat_s32(dest
, src
); break;
4087 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4090 case 0: gen_helper_neon_narrow_sat_u8(dest
, src
); break;
4091 case 1: gen_helper_neon_narrow_sat_u16(dest
, src
); break;
4092 case 2: gen_helper_neon_narrow_sat_u32(dest
, src
); break;
4097 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4100 case 0: gen_helper_neon_unarrow_sat8(dest
, src
); break;
4101 case 1: gen_helper_neon_unarrow_sat16(dest
, src
); break;
4102 case 2: gen_helper_neon_unarrow_sat32(dest
, src
); break;
4107 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4113 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4114 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4119 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4120 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4127 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4128 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4133 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4134 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4141 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4145 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4146 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4147 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4152 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4153 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4154 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4158 tcg_temp_free_i32(src
);
4161 static inline void gen_neon_addl(int size
)
4164 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4165 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4166 case 2: tcg_gen_add_i64(CPU_V001
); break;
4171 static inline void gen_neon_subl(int size
)
4174 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4175 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4176 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4181 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4184 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4185 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4186 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4191 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4194 case 1: gen_helper_neon_addl_saturate_s32(op0
, op0
, op1
); break;
4195 case 2: gen_helper_neon_addl_saturate_s64(op0
, op0
, op1
); break;
4200 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4204 switch ((size
<< 1) | u
) {
4205 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4206 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4207 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4208 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4210 tmp
= gen_muls_i64_i32(a
, b
);
4211 tcg_gen_mov_i64(dest
, tmp
);
4212 tcg_temp_free_i64(tmp
);
4215 tmp
= gen_mulu_i64_i32(a
, b
);
4216 tcg_gen_mov_i64(dest
, tmp
);
4217 tcg_temp_free_i64(tmp
);
4222 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4223 Don't forget to clean them now. */
4225 tcg_temp_free_i32(a
);
4226 tcg_temp_free_i32(b
);
4230 static void gen_neon_narrow_op(int op
, int u
, int size
, TCGv dest
, TCGv_i64 src
)
4234 gen_neon_unarrow_sats(size
, dest
, src
);
4236 gen_neon_narrow(size
, dest
, src
);
4240 gen_neon_narrow_satu(size
, dest
, src
);
4242 gen_neon_narrow_sats(size
, dest
, src
);
4247 /* Symbolic constants for op fields for Neon 3-register same-length.
4248 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4251 #define NEON_3R_VHADD 0
4252 #define NEON_3R_VQADD 1
4253 #define NEON_3R_VRHADD 2
4254 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4255 #define NEON_3R_VHSUB 4
4256 #define NEON_3R_VQSUB 5
4257 #define NEON_3R_VCGT 6
4258 #define NEON_3R_VCGE 7
4259 #define NEON_3R_VSHL 8
4260 #define NEON_3R_VQSHL 9
4261 #define NEON_3R_VRSHL 10
4262 #define NEON_3R_VQRSHL 11
4263 #define NEON_3R_VMAX 12
4264 #define NEON_3R_VMIN 13
4265 #define NEON_3R_VABD 14
4266 #define NEON_3R_VABA 15
4267 #define NEON_3R_VADD_VSUB 16
4268 #define NEON_3R_VTST_VCEQ 17
4269 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4270 #define NEON_3R_VMUL 19
4271 #define NEON_3R_VPMAX 20
4272 #define NEON_3R_VPMIN 21
4273 #define NEON_3R_VQDMULH_VQRDMULH 22
4274 #define NEON_3R_VPADD 23
4275 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4276 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4277 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4278 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4279 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4280 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4282 static const uint8_t neon_3r_sizes
[] = {
4283 [NEON_3R_VHADD
] = 0x7,
4284 [NEON_3R_VQADD
] = 0xf,
4285 [NEON_3R_VRHADD
] = 0x7,
4286 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4287 [NEON_3R_VHSUB
] = 0x7,
4288 [NEON_3R_VQSUB
] = 0xf,
4289 [NEON_3R_VCGT
] = 0x7,
4290 [NEON_3R_VCGE
] = 0x7,
4291 [NEON_3R_VSHL
] = 0xf,
4292 [NEON_3R_VQSHL
] = 0xf,
4293 [NEON_3R_VRSHL
] = 0xf,
4294 [NEON_3R_VQRSHL
] = 0xf,
4295 [NEON_3R_VMAX
] = 0x7,
4296 [NEON_3R_VMIN
] = 0x7,
4297 [NEON_3R_VABD
] = 0x7,
4298 [NEON_3R_VABA
] = 0x7,
4299 [NEON_3R_VADD_VSUB
] = 0xf,
4300 [NEON_3R_VTST_VCEQ
] = 0x7,
4301 [NEON_3R_VML
] = 0x7,
4302 [NEON_3R_VMUL
] = 0x7,
4303 [NEON_3R_VPMAX
] = 0x7,
4304 [NEON_3R_VPMIN
] = 0x7,
4305 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4306 [NEON_3R_VPADD
] = 0x7,
4307 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4308 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4309 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4310 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4311 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4312 [NEON_3R_VRECPS_VRSQRTS
] = 0x5, /* size bit 1 encodes op */
4315 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4316 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4319 #define NEON_2RM_VREV64 0
4320 #define NEON_2RM_VREV32 1
4321 #define NEON_2RM_VREV16 2
4322 #define NEON_2RM_VPADDL 4
4323 #define NEON_2RM_VPADDL_U 5
4324 #define NEON_2RM_VCLS 8
4325 #define NEON_2RM_VCLZ 9
4326 #define NEON_2RM_VCNT 10
4327 #define NEON_2RM_VMVN 11
4328 #define NEON_2RM_VPADAL 12
4329 #define NEON_2RM_VPADAL_U 13
4330 #define NEON_2RM_VQABS 14
4331 #define NEON_2RM_VQNEG 15
4332 #define NEON_2RM_VCGT0 16
4333 #define NEON_2RM_VCGE0 17
4334 #define NEON_2RM_VCEQ0 18
4335 #define NEON_2RM_VCLE0 19
4336 #define NEON_2RM_VCLT0 20
4337 #define NEON_2RM_VABS 22
4338 #define NEON_2RM_VNEG 23
4339 #define NEON_2RM_VCGT0_F 24
4340 #define NEON_2RM_VCGE0_F 25
4341 #define NEON_2RM_VCEQ0_F 26
4342 #define NEON_2RM_VCLE0_F 27
4343 #define NEON_2RM_VCLT0_F 28
4344 #define NEON_2RM_VABS_F 30
4345 #define NEON_2RM_VNEG_F 31
4346 #define NEON_2RM_VSWP 32
4347 #define NEON_2RM_VTRN 33
4348 #define NEON_2RM_VUZP 34
4349 #define NEON_2RM_VZIP 35
4350 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4351 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4352 #define NEON_2RM_VSHLL 38
4353 #define NEON_2RM_VCVT_F16_F32 44
4354 #define NEON_2RM_VCVT_F32_F16 46
4355 #define NEON_2RM_VRECPE 56
4356 #define NEON_2RM_VRSQRTE 57
4357 #define NEON_2RM_VRECPE_F 58
4358 #define NEON_2RM_VRSQRTE_F 59
4359 #define NEON_2RM_VCVT_FS 60
4360 #define NEON_2RM_VCVT_FU 61
4361 #define NEON_2RM_VCVT_SF 62
4362 #define NEON_2RM_VCVT_UF 63
4364 static int neon_2rm_is_float_op(int op
)
4366 /* Return true if this neon 2reg-misc op is float-to-float */
4367 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
4368 op
>= NEON_2RM_VRECPE_F
);
4371 /* Each entry in this array has bit n set if the insn allows
4372 * size value n (otherwise it will UNDEF). Since unallocated
4373 * op values will have no bits set they always UNDEF.
4375 static const uint8_t neon_2rm_sizes
[] = {
4376 [NEON_2RM_VREV64
] = 0x7,
4377 [NEON_2RM_VREV32
] = 0x3,
4378 [NEON_2RM_VREV16
] = 0x1,
4379 [NEON_2RM_VPADDL
] = 0x7,
4380 [NEON_2RM_VPADDL_U
] = 0x7,
4381 [NEON_2RM_VCLS
] = 0x7,
4382 [NEON_2RM_VCLZ
] = 0x7,
4383 [NEON_2RM_VCNT
] = 0x1,
4384 [NEON_2RM_VMVN
] = 0x1,
4385 [NEON_2RM_VPADAL
] = 0x7,
4386 [NEON_2RM_VPADAL_U
] = 0x7,
4387 [NEON_2RM_VQABS
] = 0x7,
4388 [NEON_2RM_VQNEG
] = 0x7,
4389 [NEON_2RM_VCGT0
] = 0x7,
4390 [NEON_2RM_VCGE0
] = 0x7,
4391 [NEON_2RM_VCEQ0
] = 0x7,
4392 [NEON_2RM_VCLE0
] = 0x7,
4393 [NEON_2RM_VCLT0
] = 0x7,
4394 [NEON_2RM_VABS
] = 0x7,
4395 [NEON_2RM_VNEG
] = 0x7,
4396 [NEON_2RM_VCGT0_F
] = 0x4,
4397 [NEON_2RM_VCGE0_F
] = 0x4,
4398 [NEON_2RM_VCEQ0_F
] = 0x4,
4399 [NEON_2RM_VCLE0_F
] = 0x4,
4400 [NEON_2RM_VCLT0_F
] = 0x4,
4401 [NEON_2RM_VABS_F
] = 0x4,
4402 [NEON_2RM_VNEG_F
] = 0x4,
4403 [NEON_2RM_VSWP
] = 0x1,
4404 [NEON_2RM_VTRN
] = 0x7,
4405 [NEON_2RM_VUZP
] = 0x7,
4406 [NEON_2RM_VZIP
] = 0x7,
4407 [NEON_2RM_VMOVN
] = 0x7,
4408 [NEON_2RM_VQMOVN
] = 0x7,
4409 [NEON_2RM_VSHLL
] = 0x7,
4410 [NEON_2RM_VCVT_F16_F32
] = 0x2,
4411 [NEON_2RM_VCVT_F32_F16
] = 0x2,
4412 [NEON_2RM_VRECPE
] = 0x4,
4413 [NEON_2RM_VRSQRTE
] = 0x4,
4414 [NEON_2RM_VRECPE_F
] = 0x4,
4415 [NEON_2RM_VRSQRTE_F
] = 0x4,
4416 [NEON_2RM_VCVT_FS
] = 0x4,
4417 [NEON_2RM_VCVT_FU
] = 0x4,
4418 [NEON_2RM_VCVT_SF
] = 0x4,
4419 [NEON_2RM_VCVT_UF
] = 0x4,
4422 /* Translate a NEON data processing instruction. Return nonzero if the
4423 instruction is invalid.
4424 We process data in a mixture of 32-bit and 64-bit chunks.
4425 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4427 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4439 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4442 if (!s
->vfp_enabled
)
4444 q
= (insn
& (1 << 6)) != 0;
4445 u
= (insn
>> 24) & 1;
4446 VFP_DREG_D(rd
, insn
);
4447 VFP_DREG_N(rn
, insn
);
4448 VFP_DREG_M(rm
, insn
);
4449 size
= (insn
>> 20) & 3;
4450 if ((insn
& (1 << 23)) == 0) {
4451 /* Three register same length. */
4452 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4453 /* Catch invalid op and bad size combinations: UNDEF */
4454 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4457 /* All insns of this form UNDEF for either this condition or the
4458 * superset of cases "Q==1"; we catch the latter later.
4460 if (q
&& ((rd
| rn
| rm
) & 1)) {
4463 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
4464 /* 64-bit element instructions. */
4465 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4466 neon_load_reg64(cpu_V0
, rn
+ pass
);
4467 neon_load_reg64(cpu_V1
, rm
+ pass
);
4471 gen_helper_neon_qadd_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4473 gen_helper_neon_qadd_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4478 gen_helper_neon_qsub_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4480 gen_helper_neon_qsub_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4485 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4487 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4492 gen_helper_neon_qshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4494 gen_helper_neon_qshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4499 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4501 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4504 case NEON_3R_VQRSHL
:
4506 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4508 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4511 case NEON_3R_VADD_VSUB
:
4513 tcg_gen_sub_i64(CPU_V001
);
4515 tcg_gen_add_i64(CPU_V001
);
4521 neon_store_reg64(cpu_V0
, rd
+ pass
);
4530 case NEON_3R_VQRSHL
:
4533 /* Shift instruction operands are reversed. */
4548 case NEON_3R_FLOAT_ARITH
:
4549 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
4551 case NEON_3R_FLOAT_MINMAX
:
4552 pairwise
= u
; /* if VPMIN/VPMAX (float) */
4554 case NEON_3R_FLOAT_CMP
:
4556 /* no encoding for U=0 C=1x */
4560 case NEON_3R_FLOAT_ACMP
:
4565 case NEON_3R_VRECPS_VRSQRTS
:
4571 if (u
&& (size
!= 0)) {
4572 /* UNDEF on invalid size for polynomial subcase */
4580 if (pairwise
&& q
) {
4581 /* All the pairwise insns UNDEF if Q is set */
4585 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4590 tmp
= neon_load_reg(rn
, 0);
4591 tmp2
= neon_load_reg(rn
, 1);
4593 tmp
= neon_load_reg(rm
, 0);
4594 tmp2
= neon_load_reg(rm
, 1);
4598 tmp
= neon_load_reg(rn
, pass
);
4599 tmp2
= neon_load_reg(rm
, pass
);
4603 GEN_NEON_INTEGER_OP(hadd
);
4606 GEN_NEON_INTEGER_OP(qadd
);
4608 case NEON_3R_VRHADD
:
4609 GEN_NEON_INTEGER_OP(rhadd
);
4611 case NEON_3R_LOGIC
: /* Logic ops. */
4612 switch ((u
<< 2) | size
) {
4614 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4617 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4620 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4623 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4626 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4629 tmp3
= neon_load_reg(rd
, pass
);
4630 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4631 tcg_temp_free_i32(tmp3
);
4634 tmp3
= neon_load_reg(rd
, pass
);
4635 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4636 tcg_temp_free_i32(tmp3
);
4639 tmp3
= neon_load_reg(rd
, pass
);
4640 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4641 tcg_temp_free_i32(tmp3
);
4646 GEN_NEON_INTEGER_OP(hsub
);
4649 GEN_NEON_INTEGER_OP(qsub
);
4652 GEN_NEON_INTEGER_OP(cgt
);
4655 GEN_NEON_INTEGER_OP(cge
);
4658 GEN_NEON_INTEGER_OP(shl
);
4661 GEN_NEON_INTEGER_OP(qshl
);
4664 GEN_NEON_INTEGER_OP(rshl
);
4666 case NEON_3R_VQRSHL
:
4667 GEN_NEON_INTEGER_OP(qrshl
);
4670 GEN_NEON_INTEGER_OP(max
);
4673 GEN_NEON_INTEGER_OP(min
);
4676 GEN_NEON_INTEGER_OP(abd
);
4679 GEN_NEON_INTEGER_OP(abd
);
4680 tcg_temp_free_i32(tmp2
);
4681 tmp2
= neon_load_reg(rd
, pass
);
4682 gen_neon_add(size
, tmp
, tmp2
);
4684 case NEON_3R_VADD_VSUB
:
4685 if (!u
) { /* VADD */
4686 gen_neon_add(size
, tmp
, tmp2
);
4689 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4690 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4691 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4696 case NEON_3R_VTST_VCEQ
:
4697 if (!u
) { /* VTST */
4699 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4700 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4701 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4706 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4707 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4708 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4713 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
4715 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4716 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4717 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4720 tcg_temp_free_i32(tmp2
);
4721 tmp2
= neon_load_reg(rd
, pass
);
4723 gen_neon_rsb(size
, tmp
, tmp2
);
4725 gen_neon_add(size
, tmp
, tmp2
);
4729 if (u
) { /* polynomial */
4730 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4731 } else { /* Integer */
4733 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4734 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4735 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4741 GEN_NEON_INTEGER_OP(pmax
);
4744 GEN_NEON_INTEGER_OP(pmin
);
4746 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
4747 if (!u
) { /* VQDMULH */
4749 case 1: gen_helper_neon_qdmulh_s16(tmp
, tmp
, tmp2
); break;
4750 case 2: gen_helper_neon_qdmulh_s32(tmp
, tmp
, tmp2
); break;
4753 } else { /* VQRDMULH */
4755 case 1: gen_helper_neon_qrdmulh_s16(tmp
, tmp
, tmp2
); break;
4756 case 2: gen_helper_neon_qrdmulh_s32(tmp
, tmp
, tmp2
); break;
4763 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4764 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4765 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4769 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
4770 switch ((u
<< 2) | size
) {
4772 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4775 gen_helper_neon_sub_f32(tmp
, tmp
, tmp2
);
4778 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4781 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
);
4787 case NEON_3R_FLOAT_MULTIPLY
:
4788 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
4790 tcg_temp_free_i32(tmp2
);
4791 tmp2
= neon_load_reg(rd
, pass
);
4793 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4795 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
4799 case NEON_3R_FLOAT_CMP
:
4801 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
4804 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
4806 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
4809 case NEON_3R_FLOAT_ACMP
:
4811 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
);
4813 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
);
4815 case NEON_3R_FLOAT_MINMAX
:
4817 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
);
4819 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
);
4821 case NEON_3R_VRECPS_VRSQRTS
:
4823 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4825 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4830 tcg_temp_free_i32(tmp2
);
4832 /* Save the result. For elementwise operations we can put it
4833 straight into the destination register. For pairwise operations
4834 we have to be careful to avoid clobbering the source operands. */
4835 if (pairwise
&& rd
== rm
) {
4836 neon_store_scratch(pass
, tmp
);
4838 neon_store_reg(rd
, pass
, tmp
);
4842 if (pairwise
&& rd
== rm
) {
4843 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4844 tmp
= neon_load_scratch(pass
);
4845 neon_store_reg(rd
, pass
, tmp
);
4848 /* End of 3 register same size operations. */
4849 } else if (insn
& (1 << 4)) {
4850 if ((insn
& 0x00380080) != 0) {
4851 /* Two registers and shift. */
4852 op
= (insn
>> 8) & 0xf;
4853 if (insn
& (1 << 7)) {
4861 while ((insn
& (1 << (size
+ 19))) == 0)
4864 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4865 /* To avoid excessive dumplication of ops we implement shift
4866 by immediate using the variable shift operations. */
4868 /* Shift by immediate:
4869 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4870 if (q
&& ((rd
| rm
) & 1)) {
4873 if (!u
&& (op
== 4 || op
== 6)) {
4876 /* Right shifts are encoded as N - shift, where N is the
4877 element size in bits. */
4879 shift
= shift
- (1 << (size
+ 3));
4887 imm
= (uint8_t) shift
;
4892 imm
= (uint16_t) shift
;
4903 for (pass
= 0; pass
< count
; pass
++) {
4905 neon_load_reg64(cpu_V0
, rm
+ pass
);
4906 tcg_gen_movi_i64(cpu_V1
, imm
);
4911 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4913 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4918 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4920 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4923 case 5: /* VSHL, VSLI */
4924 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4926 case 6: /* VQSHLU */
4927 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4931 gen_helper_neon_qshl_u64(cpu_V0
,
4934 gen_helper_neon_qshl_s64(cpu_V0
,
4939 if (op
== 1 || op
== 3) {
4941 neon_load_reg64(cpu_V1
, rd
+ pass
);
4942 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4943 } else if (op
== 4 || (op
== 5 && u
)) {
4945 neon_load_reg64(cpu_V1
, rd
+ pass
);
4947 if (shift
< -63 || shift
> 63) {
4951 mask
= 0xffffffffffffffffull
>> -shift
;
4953 mask
= 0xffffffffffffffffull
<< shift
;
4956 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
4957 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4959 neon_store_reg64(cpu_V0
, rd
+ pass
);
4960 } else { /* size < 3 */
4961 /* Operands in T0 and T1. */
4962 tmp
= neon_load_reg(rm
, pass
);
4963 tmp2
= tcg_temp_new_i32();
4964 tcg_gen_movi_i32(tmp2
, imm
);
4968 GEN_NEON_INTEGER_OP(shl
);
4972 GEN_NEON_INTEGER_OP(rshl
);
4975 case 5: /* VSHL, VSLI */
4977 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
4978 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
4979 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
4983 case 6: /* VQSHLU */
4986 gen_helper_neon_qshlu_s8(tmp
, tmp
, tmp2
);
4989 gen_helper_neon_qshlu_s16(tmp
, tmp
, tmp2
);
4992 gen_helper_neon_qshlu_s32(tmp
, tmp
, tmp2
);
4999 GEN_NEON_INTEGER_OP(qshl
);
5002 tcg_temp_free_i32(tmp2
);
5004 if (op
== 1 || op
== 3) {
5006 tmp2
= neon_load_reg(rd
, pass
);
5007 gen_neon_add(size
, tmp
, tmp2
);
5008 tcg_temp_free_i32(tmp2
);
5009 } else if (op
== 4 || (op
== 5 && u
)) {
5014 mask
= 0xff >> -shift
;
5016 mask
= (uint8_t)(0xff << shift
);
5022 mask
= 0xffff >> -shift
;
5024 mask
= (uint16_t)(0xffff << shift
);
5028 if (shift
< -31 || shift
> 31) {
5032 mask
= 0xffffffffu
>> -shift
;
5034 mask
= 0xffffffffu
<< shift
;
5040 tmp2
= neon_load_reg(rd
, pass
);
5041 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5042 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5043 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5044 tcg_temp_free_i32(tmp2
);
5046 neon_store_reg(rd
, pass
, tmp
);
5049 } else if (op
< 10) {
5050 /* Shift by immediate and narrow:
5051 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5052 int input_unsigned
= (op
== 8) ? !u
: u
;
5056 shift
= shift
- (1 << (size
+ 3));
5059 tmp64
= tcg_const_i64(shift
);
5060 neon_load_reg64(cpu_V0
, rm
);
5061 neon_load_reg64(cpu_V1
, rm
+ 1);
5062 for (pass
= 0; pass
< 2; pass
++) {
5070 if (input_unsigned
) {
5071 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5073 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5076 if (input_unsigned
) {
5077 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5079 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5082 tmp
= tcg_temp_new_i32();
5083 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5084 neon_store_reg(rd
, pass
, tmp
);
5086 tcg_temp_free_i64(tmp64
);
5089 imm
= (uint16_t)shift
;
5093 imm
= (uint32_t)shift
;
5095 tmp2
= tcg_const_i32(imm
);
5096 tmp4
= neon_load_reg(rm
+ 1, 0);
5097 tmp5
= neon_load_reg(rm
+ 1, 1);
5098 for (pass
= 0; pass
< 2; pass
++) {
5100 tmp
= neon_load_reg(rm
, 0);
5104 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5107 tmp3
= neon_load_reg(rm
, 1);
5111 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5113 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5114 tcg_temp_free_i32(tmp
);
5115 tcg_temp_free_i32(tmp3
);
5116 tmp
= tcg_temp_new_i32();
5117 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5118 neon_store_reg(rd
, pass
, tmp
);
5120 tcg_temp_free_i32(tmp2
);
5122 } else if (op
== 10) {
5124 if (q
|| (rd
& 1)) {
5127 tmp
= neon_load_reg(rm
, 0);
5128 tmp2
= neon_load_reg(rm
, 1);
5129 for (pass
= 0; pass
< 2; pass
++) {
5133 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5136 /* The shift is less than the width of the source
5137 type, so we can just shift the whole register. */
5138 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5139 /* Widen the result of shift: we need to clear
5140 * the potential overflow bits resulting from
5141 * left bits of the narrow input appearing as
5142 * right bits of left the neighbour narrow
5144 if (size
< 2 || !u
) {
5147 imm
= (0xffu
>> (8 - shift
));
5149 } else if (size
== 1) {
5150 imm
= 0xffff >> (16 - shift
);
5153 imm
= 0xffffffff >> (32 - shift
);
5156 imm64
= imm
| (((uint64_t)imm
) << 32);
5160 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5163 neon_store_reg64(cpu_V0
, rd
+ pass
);
5165 } else if (op
>= 14) {
5166 /* VCVT fixed-point. */
5167 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5170 /* We have already masked out the must-be-1 top bit of imm6,
5171 * hence this 32-shift where the ARM ARM has 64-imm6.
5174 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5175 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5178 gen_vfp_ulto(0, shift
);
5180 gen_vfp_slto(0, shift
);
5183 gen_vfp_toul(0, shift
);
5185 gen_vfp_tosl(0, shift
);
5187 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
5192 } else { /* (insn & 0x00380080) == 0 */
5194 if (q
&& (rd
& 1)) {
5198 op
= (insn
>> 8) & 0xf;
5199 /* One register and immediate. */
5200 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5201 invert
= (insn
& (1 << 5)) != 0;
5202 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5203 * We choose to not special-case this and will behave as if a
5204 * valid constant encoding of 0 had been given.
5223 imm
= (imm
<< 8) | (imm
<< 24);
5226 imm
= (imm
<< 8) | 0xff;
5229 imm
= (imm
<< 16) | 0xffff;
5232 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5240 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5241 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5247 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5248 if (op
& 1 && op
< 12) {
5249 tmp
= neon_load_reg(rd
, pass
);
5251 /* The immediate value has already been inverted, so
5253 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5255 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5259 tmp
= tcg_temp_new_i32();
5260 if (op
== 14 && invert
) {
5264 for (n
= 0; n
< 4; n
++) {
5265 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5266 val
|= 0xff << (n
* 8);
5268 tcg_gen_movi_i32(tmp
, val
);
5270 tcg_gen_movi_i32(tmp
, imm
);
5273 neon_store_reg(rd
, pass
, tmp
);
5276 } else { /* (insn & 0x00800010 == 0x00800000) */
5278 op
= (insn
>> 8) & 0xf;
5279 if ((insn
& (1 << 6)) == 0) {
5280 /* Three registers of different lengths. */
5284 /* undefreq: bit 0 : UNDEF if size != 0
5285 * bit 1 : UNDEF if size == 0
5286 * bit 2 : UNDEF if U == 1
5287 * Note that [1:0] set implies 'always UNDEF'
5290 /* prewiden, src1_wide, src2_wide, undefreq */
5291 static const int neon_3reg_wide
[16][4] = {
5292 {1, 0, 0, 0}, /* VADDL */
5293 {1, 1, 0, 0}, /* VADDW */
5294 {1, 0, 0, 0}, /* VSUBL */
5295 {1, 1, 0, 0}, /* VSUBW */
5296 {0, 1, 1, 0}, /* VADDHN */
5297 {0, 0, 0, 0}, /* VABAL */
5298 {0, 1, 1, 0}, /* VSUBHN */
5299 {0, 0, 0, 0}, /* VABDL */
5300 {0, 0, 0, 0}, /* VMLAL */
5301 {0, 0, 0, 6}, /* VQDMLAL */
5302 {0, 0, 0, 0}, /* VMLSL */
5303 {0, 0, 0, 6}, /* VQDMLSL */
5304 {0, 0, 0, 0}, /* Integer VMULL */
5305 {0, 0, 0, 2}, /* VQDMULL */
5306 {0, 0, 0, 5}, /* Polynomial VMULL */
5307 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5310 prewiden
= neon_3reg_wide
[op
][0];
5311 src1_wide
= neon_3reg_wide
[op
][1];
5312 src2_wide
= neon_3reg_wide
[op
][2];
5313 undefreq
= neon_3reg_wide
[op
][3];
5315 if (((undefreq
& 1) && (size
!= 0)) ||
5316 ((undefreq
& 2) && (size
== 0)) ||
5317 ((undefreq
& 4) && u
)) {
5320 if ((src1_wide
&& (rn
& 1)) ||
5321 (src2_wide
&& (rm
& 1)) ||
5322 (!src2_wide
&& (rd
& 1))) {
5326 /* Avoid overlapping operands. Wide source operands are
5327 always aligned so will never overlap with wide
5328 destinations in problematic ways. */
5329 if (rd
== rm
&& !src2_wide
) {
5330 tmp
= neon_load_reg(rm
, 1);
5331 neon_store_scratch(2, tmp
);
5332 } else if (rd
== rn
&& !src1_wide
) {
5333 tmp
= neon_load_reg(rn
, 1);
5334 neon_store_scratch(2, tmp
);
5337 for (pass
= 0; pass
< 2; pass
++) {
5339 neon_load_reg64(cpu_V0
, rn
+ pass
);
5342 if (pass
== 1 && rd
== rn
) {
5343 tmp
= neon_load_scratch(2);
5345 tmp
= neon_load_reg(rn
, pass
);
5348 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5352 neon_load_reg64(cpu_V1
, rm
+ pass
);
5355 if (pass
== 1 && rd
== rm
) {
5356 tmp2
= neon_load_scratch(2);
5358 tmp2
= neon_load_reg(rm
, pass
);
5361 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5365 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5366 gen_neon_addl(size
);
5368 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5369 gen_neon_subl(size
);
5371 case 5: case 7: /* VABAL, VABDL */
5372 switch ((size
<< 1) | u
) {
5374 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5377 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5380 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5383 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5386 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5389 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5393 tcg_temp_free_i32(tmp2
);
5394 tcg_temp_free_i32(tmp
);
5396 case 8: case 9: case 10: case 11: case 12: case 13:
5397 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5398 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5400 case 14: /* Polynomial VMULL */
5401 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5402 tcg_temp_free_i32(tmp2
);
5403 tcg_temp_free_i32(tmp
);
5405 default: /* 15 is RESERVED: caught earlier */
5410 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5411 neon_store_reg64(cpu_V0
, rd
+ pass
);
5412 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5414 neon_load_reg64(cpu_V1
, rd
+ pass
);
5416 case 10: /* VMLSL */
5417 gen_neon_negl(cpu_V0
, size
);
5419 case 5: case 8: /* VABAL, VMLAL */
5420 gen_neon_addl(size
);
5422 case 9: case 11: /* VQDMLAL, VQDMLSL */
5423 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5425 gen_neon_negl(cpu_V0
, size
);
5427 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5432 neon_store_reg64(cpu_V0
, rd
+ pass
);
5433 } else if (op
== 4 || op
== 6) {
5434 /* Narrowing operation. */
5435 tmp
= tcg_temp_new_i32();
5439 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5442 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5445 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5446 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5453 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5456 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5459 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5460 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5461 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5469 neon_store_reg(rd
, 0, tmp3
);
5470 neon_store_reg(rd
, 1, tmp
);
5473 /* Write back the result. */
5474 neon_store_reg64(cpu_V0
, rd
+ pass
);
5478 /* Two registers and a scalar. NB that for ops of this form
5479 * the ARM ARM labels bit 24 as Q, but it is in our variable
5486 case 1: /* Float VMLA scalar */
5487 case 5: /* Floating point VMLS scalar */
5488 case 9: /* Floating point VMUL scalar */
5493 case 0: /* Integer VMLA scalar */
5494 case 4: /* Integer VMLS scalar */
5495 case 8: /* Integer VMUL scalar */
5496 case 12: /* VQDMULH scalar */
5497 case 13: /* VQRDMULH scalar */
5498 if (u
&& ((rd
| rn
) & 1)) {
5501 tmp
= neon_get_scalar(size
, rm
);
5502 neon_store_scratch(0, tmp
);
5503 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5504 tmp
= neon_load_scratch(0);
5505 tmp2
= neon_load_reg(rn
, pass
);
5508 gen_helper_neon_qdmulh_s16(tmp
, tmp
, tmp2
);
5510 gen_helper_neon_qdmulh_s32(tmp
, tmp
, tmp2
);
5512 } else if (op
== 13) {
5514 gen_helper_neon_qrdmulh_s16(tmp
, tmp
, tmp2
);
5516 gen_helper_neon_qrdmulh_s32(tmp
, tmp
, tmp2
);
5518 } else if (op
& 1) {
5519 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
5522 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5523 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5524 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5528 tcg_temp_free_i32(tmp2
);
5531 tmp2
= neon_load_reg(rd
, pass
);
5534 gen_neon_add(size
, tmp
, tmp2
);
5537 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
5540 gen_neon_rsb(size
, tmp
, tmp2
);
5543 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
5548 tcg_temp_free_i32(tmp2
);
5550 neon_store_reg(rd
, pass
, tmp
);
5553 case 3: /* VQDMLAL scalar */
5554 case 7: /* VQDMLSL scalar */
5555 case 11: /* VQDMULL scalar */
5560 case 2: /* VMLAL sclar */
5561 case 6: /* VMLSL scalar */
5562 case 10: /* VMULL scalar */
5566 tmp2
= neon_get_scalar(size
, rm
);
5567 /* We need a copy of tmp2 because gen_neon_mull
5568 * deletes it during pass 0. */
5569 tmp4
= tcg_temp_new_i32();
5570 tcg_gen_mov_i32(tmp4
, tmp2
);
5571 tmp3
= neon_load_reg(rn
, 1);
5573 for (pass
= 0; pass
< 2; pass
++) {
5575 tmp
= neon_load_reg(rn
, 0);
5580 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5582 neon_load_reg64(cpu_V1
, rd
+ pass
);
5586 gen_neon_negl(cpu_V0
, size
);
5589 gen_neon_addl(size
);
5592 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5594 gen_neon_negl(cpu_V0
, size
);
5596 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5602 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5607 neon_store_reg64(cpu_V0
, rd
+ pass
);
5612 default: /* 14 and 15 are RESERVED */
5616 } else { /* size == 3 */
5619 imm
= (insn
>> 8) & 0xf;
5624 if (q
&& ((rd
| rn
| rm
) & 1)) {
5629 neon_load_reg64(cpu_V0
, rn
);
5631 neon_load_reg64(cpu_V1
, rn
+ 1);
5633 } else if (imm
== 8) {
5634 neon_load_reg64(cpu_V0
, rn
+ 1);
5636 neon_load_reg64(cpu_V1
, rm
);
5639 tmp64
= tcg_temp_new_i64();
5641 neon_load_reg64(cpu_V0
, rn
);
5642 neon_load_reg64(tmp64
, rn
+ 1);
5644 neon_load_reg64(cpu_V0
, rn
+ 1);
5645 neon_load_reg64(tmp64
, rm
);
5647 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5648 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5649 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5651 neon_load_reg64(cpu_V1
, rm
);
5653 neon_load_reg64(cpu_V1
, rm
+ 1);
5656 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5657 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5658 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5659 tcg_temp_free_i64(tmp64
);
5662 neon_load_reg64(cpu_V0
, rn
);
5663 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5664 neon_load_reg64(cpu_V1
, rm
);
5665 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5666 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5668 neon_store_reg64(cpu_V0
, rd
);
5670 neon_store_reg64(cpu_V1
, rd
+ 1);
5672 } else if ((insn
& (1 << 11)) == 0) {
5673 /* Two register misc. */
5674 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5675 size
= (insn
>> 18) & 3;
5676 /* UNDEF for unknown op values and bad op-size combinations */
5677 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
5680 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
5681 q
&& ((rm
| rd
) & 1)) {
5685 case NEON_2RM_VREV64
:
5686 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5687 tmp
= neon_load_reg(rm
, pass
* 2);
5688 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5690 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5691 case 1: gen_swap_half(tmp
); break;
5692 case 2: /* no-op */ break;
5695 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5697 neon_store_reg(rd
, pass
* 2, tmp2
);
5700 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5701 case 1: gen_swap_half(tmp2
); break;
5704 neon_store_reg(rd
, pass
* 2, tmp2
);
5708 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
5709 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
5710 for (pass
= 0; pass
< q
+ 1; pass
++) {
5711 tmp
= neon_load_reg(rm
, pass
* 2);
5712 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5713 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5714 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5716 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5717 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5718 case 2: tcg_gen_add_i64(CPU_V001
); break;
5721 if (op
>= NEON_2RM_VPADAL
) {
5723 neon_load_reg64(cpu_V1
, rd
+ pass
);
5724 gen_neon_addl(size
);
5726 neon_store_reg64(cpu_V0
, rd
+ pass
);
5732 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5733 tmp
= neon_load_reg(rm
, n
);
5734 tmp2
= neon_load_reg(rd
, n
+ 1);
5735 neon_store_reg(rm
, n
, tmp2
);
5736 neon_store_reg(rd
, n
+ 1, tmp
);
5743 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
5748 if (gen_neon_zip(rd
, rm
, size
, q
)) {
5752 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
5753 /* also VQMOVUN; op field and mnemonics don't line up */
5758 for (pass
= 0; pass
< 2; pass
++) {
5759 neon_load_reg64(cpu_V0
, rm
+ pass
);
5760 tmp
= tcg_temp_new_i32();
5761 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
5766 neon_store_reg(rd
, 0, tmp2
);
5767 neon_store_reg(rd
, 1, tmp
);
5771 case NEON_2RM_VSHLL
:
5772 if (q
|| (rd
& 1)) {
5775 tmp
= neon_load_reg(rm
, 0);
5776 tmp2
= neon_load_reg(rm
, 1);
5777 for (pass
= 0; pass
< 2; pass
++) {
5780 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5781 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5782 neon_store_reg64(cpu_V0
, rd
+ pass
);
5785 case NEON_2RM_VCVT_F16_F32
:
5786 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5790 tmp
= tcg_temp_new_i32();
5791 tmp2
= tcg_temp_new_i32();
5792 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5793 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5794 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5795 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5796 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5797 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5798 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5799 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5800 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5801 neon_store_reg(rd
, 0, tmp2
);
5802 tmp2
= tcg_temp_new_i32();
5803 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5804 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5805 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5806 neon_store_reg(rd
, 1, tmp2
);
5807 tcg_temp_free_i32(tmp
);
5809 case NEON_2RM_VCVT_F32_F16
:
5810 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5814 tmp3
= tcg_temp_new_i32();
5815 tmp
= neon_load_reg(rm
, 0);
5816 tmp2
= neon_load_reg(rm
, 1);
5817 tcg_gen_ext16u_i32(tmp3
, tmp
);
5818 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5819 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5820 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5821 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5822 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5823 tcg_temp_free_i32(tmp
);
5824 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5825 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5826 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5827 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5828 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5829 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5830 tcg_temp_free_i32(tmp2
);
5831 tcg_temp_free_i32(tmp3
);
5835 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5836 if (neon_2rm_is_float_op(op
)) {
5837 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5838 neon_reg_offset(rm
, pass
));
5841 tmp
= neon_load_reg(rm
, pass
);
5844 case NEON_2RM_VREV32
:
5846 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5847 case 1: gen_swap_half(tmp
); break;
5851 case NEON_2RM_VREV16
:
5856 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5857 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5858 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5864 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5865 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5866 case 2: gen_helper_clz(tmp
, tmp
); break;
5871 gen_helper_neon_cnt_u8(tmp
, tmp
);
5874 tcg_gen_not_i32(tmp
, tmp
);
5876 case NEON_2RM_VQABS
:
5878 case 0: gen_helper_neon_qabs_s8(tmp
, tmp
); break;
5879 case 1: gen_helper_neon_qabs_s16(tmp
, tmp
); break;
5880 case 2: gen_helper_neon_qabs_s32(tmp
, tmp
); break;
5884 case NEON_2RM_VQNEG
:
5886 case 0: gen_helper_neon_qneg_s8(tmp
, tmp
); break;
5887 case 1: gen_helper_neon_qneg_s16(tmp
, tmp
); break;
5888 case 2: gen_helper_neon_qneg_s32(tmp
, tmp
); break;
5892 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
5893 tmp2
= tcg_const_i32(0);
5895 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5896 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5897 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
5900 tcg_temp_free(tmp2
);
5901 if (op
== NEON_2RM_VCLE0
) {
5902 tcg_gen_not_i32(tmp
, tmp
);
5905 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
5906 tmp2
= tcg_const_i32(0);
5908 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
5909 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
5910 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
5913 tcg_temp_free(tmp2
);
5914 if (op
== NEON_2RM_VCLT0
) {
5915 tcg_gen_not_i32(tmp
, tmp
);
5918 case NEON_2RM_VCEQ0
:
5919 tmp2
= tcg_const_i32(0);
5921 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5922 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5923 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5926 tcg_temp_free(tmp2
);
5930 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
5931 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
5932 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
5937 tmp2
= tcg_const_i32(0);
5938 gen_neon_rsb(size
, tmp
, tmp2
);
5939 tcg_temp_free(tmp2
);
5941 case NEON_2RM_VCGT0_F
:
5942 tmp2
= tcg_const_i32(0);
5943 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
5944 tcg_temp_free(tmp2
);
5946 case NEON_2RM_VCGE0_F
:
5947 tmp2
= tcg_const_i32(0);
5948 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
5949 tcg_temp_free(tmp2
);
5951 case NEON_2RM_VCEQ0_F
:
5952 tmp2
= tcg_const_i32(0);
5953 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
5954 tcg_temp_free(tmp2
);
5956 case NEON_2RM_VCLE0_F
:
5957 tmp2
= tcg_const_i32(0);
5958 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
);
5959 tcg_temp_free(tmp2
);
5961 case NEON_2RM_VCLT0_F
:
5962 tmp2
= tcg_const_i32(0);
5963 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
);
5964 tcg_temp_free(tmp2
);
5966 case NEON_2RM_VABS_F
:
5969 case NEON_2RM_VNEG_F
:
5973 tmp2
= neon_load_reg(rd
, pass
);
5974 neon_store_reg(rm
, pass
, tmp2
);
5977 tmp2
= neon_load_reg(rd
, pass
);
5979 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
5980 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
5983 neon_store_reg(rm
, pass
, tmp2
);
5985 case NEON_2RM_VRECPE
:
5986 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
5988 case NEON_2RM_VRSQRTE
:
5989 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
5991 case NEON_2RM_VRECPE_F
:
5992 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5994 case NEON_2RM_VRSQRTE_F
:
5995 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5997 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6000 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6003 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6006 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6010 /* Reserved op values were caught by the
6011 * neon_2rm_sizes[] check earlier.
6015 if (neon_2rm_is_float_op(op
)) {
6016 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
6017 neon_reg_offset(rd
, pass
));
6019 neon_store_reg(rd
, pass
, tmp
);
6024 } else if ((insn
& (1 << 10)) == 0) {
6026 int n
= ((insn
>> 8) & 3) + 1;
6027 if ((rn
+ n
) > 32) {
6028 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6029 * helper function running off the end of the register file.
6034 if (insn
& (1 << 6)) {
6035 tmp
= neon_load_reg(rd
, 0);
6037 tmp
= tcg_temp_new_i32();
6038 tcg_gen_movi_i32(tmp
, 0);
6040 tmp2
= neon_load_reg(rm
, 0);
6041 tmp4
= tcg_const_i32(rn
);
6042 tmp5
= tcg_const_i32(n
);
6043 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
6044 tcg_temp_free_i32(tmp
);
6045 if (insn
& (1 << 6)) {
6046 tmp
= neon_load_reg(rd
, 1);
6048 tmp
= tcg_temp_new_i32();
6049 tcg_gen_movi_i32(tmp
, 0);
6051 tmp3
= neon_load_reg(rm
, 1);
6052 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
6053 tcg_temp_free_i32(tmp5
);
6054 tcg_temp_free_i32(tmp4
);
6055 neon_store_reg(rd
, 0, tmp2
);
6056 neon_store_reg(rd
, 1, tmp3
);
6057 tcg_temp_free_i32(tmp
);
6058 } else if ((insn
& 0x380) == 0) {
6060 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6063 if (insn
& (1 << 19)) {
6064 tmp
= neon_load_reg(rm
, 1);
6066 tmp
= neon_load_reg(rm
, 0);
6068 if (insn
& (1 << 16)) {
6069 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
6070 } else if (insn
& (1 << 17)) {
6071 if ((insn
>> 18) & 1)
6072 gen_neon_dup_high16(tmp
);
6074 gen_neon_dup_low16(tmp
);
6076 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6077 tmp2
= tcg_temp_new_i32();
6078 tcg_gen_mov_i32(tmp2
, tmp
);
6079 neon_store_reg(rd
, pass
, tmp2
);
6081 tcg_temp_free_i32(tmp
);
6090 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
6092 int crn
= (insn
>> 16) & 0xf;
6093 int crm
= insn
& 0xf;
6094 int op1
= (insn
>> 21) & 7;
6095 int op2
= (insn
>> 5) & 7;
6096 int rt
= (insn
>> 12) & 0xf;
6099 /* Minimal set of debug registers, since we don't support debug */
6100 if (op1
== 0 && crn
== 0 && op2
== 0) {
6103 /* DBGDIDR: just RAZ. In particular this means the
6104 * "debug architecture version" bits will read as
6105 * a reserved value, which should cause Linux to
6106 * not try to use the debug hardware.
6108 tmp
= tcg_const_i32(0);
6109 store_reg(s
, rt
, tmp
);
6113 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6114 * don't implement memory mapped debug components
6116 if (ENABLE_ARCH_7
) {
6117 tmp
= tcg_const_i32(0);
6118 store_reg(s
, rt
, tmp
);
6127 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6128 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
6132 tmp
= load_cpu_field(teecr
);
6133 store_reg(s
, rt
, tmp
);
6136 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
6138 if (IS_USER(s
) && (env
->teecr
& 1))
6140 tmp
= load_cpu_field(teehbr
);
6141 store_reg(s
, rt
, tmp
);
6145 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6146 op1
, crn
, crm
, op2
);
6150 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
6152 int crn
= (insn
>> 16) & 0xf;
6153 int crm
= insn
& 0xf;
6154 int op1
= (insn
>> 21) & 7;
6155 int op2
= (insn
>> 5) & 7;
6156 int rt
= (insn
>> 12) & 0xf;
6159 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6160 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
6164 tmp
= load_reg(s
, rt
);
6165 gen_helper_set_teecr(cpu_env
, tmp
);
6166 tcg_temp_free_i32(tmp
);
6169 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
6171 if (IS_USER(s
) && (env
->teecr
& 1))
6173 tmp
= load_reg(s
, rt
);
6174 store_cpu_field(tmp
, teehbr
);
6178 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6179 op1
, crn
, crm
, op2
);
6183 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
6187 cpnum
= (insn
>> 8) & 0xf;
6188 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
6189 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
6195 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6196 return disas_iwmmxt_insn(env
, s
, insn
);
6197 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6198 return disas_dsp_insn(env
, s
, insn
);
6203 return disas_vfp_insn (env
, s
, insn
);
6205 /* Coprocessors 7-15 are architecturally reserved by ARM.
6206 Unfortunately Intel decided to ignore this. */
6207 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
6209 if (insn
& (1 << 20))
6210 return disas_cp14_read(env
, s
, insn
);
6212 return disas_cp14_write(env
, s
, insn
);
6214 return disas_cp15_insn (env
, s
, insn
);
6217 /* Unknown coprocessor. See if the board has hooked it. */
6218 return disas_cp_insn (env
, s
, insn
);
6223 /* Store a 64-bit value to a register pair. Clobbers val. */
6224 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
6227 tmp
= tcg_temp_new_i32();
6228 tcg_gen_trunc_i64_i32(tmp
, val
);
6229 store_reg(s
, rlow
, tmp
);
6230 tmp
= tcg_temp_new_i32();
6231 tcg_gen_shri_i64(val
, val
, 32);
6232 tcg_gen_trunc_i64_i32(tmp
, val
);
6233 store_reg(s
, rhigh
, tmp
);
6236 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6237 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
6242 /* Load value and extend to 64 bits. */
6243 tmp
= tcg_temp_new_i64();
6244 tmp2
= load_reg(s
, rlow
);
6245 tcg_gen_extu_i32_i64(tmp
, tmp2
);
6246 tcg_temp_free_i32(tmp2
);
6247 tcg_gen_add_i64(val
, val
, tmp
);
6248 tcg_temp_free_i64(tmp
);
6251 /* load and add a 64-bit value from a register pair. */
6252 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
6258 /* Load 64-bit value rd:rn. */
6259 tmpl
= load_reg(s
, rlow
);
6260 tmph
= load_reg(s
, rhigh
);
6261 tmp
= tcg_temp_new_i64();
6262 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
6263 tcg_temp_free_i32(tmpl
);
6264 tcg_temp_free_i32(tmph
);
6265 tcg_gen_add_i64(val
, val
, tmp
);
6266 tcg_temp_free_i64(tmp
);
6269 /* Set N and Z flags from a 64-bit value. */
6270 static void gen_logicq_cc(TCGv_i64 val
)
6272 TCGv tmp
= tcg_temp_new_i32();
6273 gen_helper_logicq_cc(tmp
, val
);
6275 tcg_temp_free_i32(tmp
);
6278 /* Load/Store exclusive instructions are implemented by remembering
6279 the value/address loaded, and seeing if these are the same
6280 when the store is performed. This should be is sufficient to implement
6281 the architecturally mandated semantics, and avoids having to monitor
6284 In system emulation mode only one CPU will be running at once, so
6285 this sequence is effectively atomic. In user emulation mode we
6286 throw an exception and handle the atomic operation elsewhere. */
6287 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6288 TCGv addr
, int size
)
6294 tmp
= gen_ld8u(addr
, IS_USER(s
));
6297 tmp
= gen_ld16u(addr
, IS_USER(s
));
6301 tmp
= gen_ld32(addr
, IS_USER(s
));
6306 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6307 store_reg(s
, rt
, tmp
);
6309 TCGv tmp2
= tcg_temp_new_i32();
6310 tcg_gen_addi_i32(tmp2
, addr
, 4);
6311 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6312 tcg_temp_free_i32(tmp2
);
6313 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6314 store_reg(s
, rt2
, tmp
);
6316 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6319 static void gen_clrex(DisasContext
*s
)
6321 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6324 #ifdef CONFIG_USER_ONLY
6325 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6326 TCGv addr
, int size
)
6328 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6329 tcg_gen_movi_i32(cpu_exclusive_info
,
6330 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6331 gen_exception_insn(s
, 4, EXCP_STREX
);
6334 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6335 TCGv addr
, int size
)
6341 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6347 fail_label
= gen_new_label();
6348 done_label
= gen_new_label();
6349 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6352 tmp
= gen_ld8u(addr
, IS_USER(s
));
6355 tmp
= gen_ld16u(addr
, IS_USER(s
));
6359 tmp
= gen_ld32(addr
, IS_USER(s
));
6364 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6365 tcg_temp_free_i32(tmp
);
6367 TCGv tmp2
= tcg_temp_new_i32();
6368 tcg_gen_addi_i32(tmp2
, addr
, 4);
6369 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6370 tcg_temp_free_i32(tmp2
);
6371 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6372 tcg_temp_free_i32(tmp
);
6374 tmp
= load_reg(s
, rt
);
6377 gen_st8(tmp
, addr
, IS_USER(s
));
6380 gen_st16(tmp
, addr
, IS_USER(s
));
6384 gen_st32(tmp
, addr
, IS_USER(s
));
6390 tcg_gen_addi_i32(addr
, addr
, 4);
6391 tmp
= load_reg(s
, rt2
);
6392 gen_st32(tmp
, addr
, IS_USER(s
));
6394 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6395 tcg_gen_br(done_label
);
6396 gen_set_label(fail_label
);
6397 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6398 gen_set_label(done_label
);
6399 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6403 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
6405 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6412 insn
= ldl_code(s
->pc
);
6415 /* M variants do not implement ARM mode. */
6420 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6421 * choose to UNDEF. In ARMv5 and above the space is used
6422 * for miscellaneous unconditional instructions.
6426 /* Unconditional instructions. */
6427 if (((insn
>> 25) & 7) == 1) {
6428 /* NEON Data processing. */
6429 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6432 if (disas_neon_data_insn(env
, s
, insn
))
6436 if ((insn
& 0x0f100000) == 0x04000000) {
6437 /* NEON load/store. */
6438 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6441 if (disas_neon_ls_insn(env
, s
, insn
))
6445 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6446 ((insn
& 0x0f30f010) == 0x0710f000)) {
6447 if ((insn
& (1 << 22)) == 0) {
6449 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6453 /* Otherwise PLD; v5TE+ */
6457 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6458 ((insn
& 0x0f70f010) == 0x0650f000)) {
6460 return; /* PLI; V7 */
6462 if (((insn
& 0x0f700000) == 0x04100000) ||
6463 ((insn
& 0x0f700010) == 0x06100000)) {
6464 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6467 return; /* v7MP: Unallocated memory hint: must NOP */
6470 if ((insn
& 0x0ffffdff) == 0x01010000) {
6473 if (insn
& (1 << 9)) {
6474 /* BE8 mode not implemented. */
6478 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6479 switch ((insn
>> 4) & 0xf) {
6488 /* We don't emulate caches so these are a no-op. */
6493 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6499 op1
= (insn
& 0x1f);
6500 addr
= tcg_temp_new_i32();
6501 tmp
= tcg_const_i32(op1
);
6502 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6503 tcg_temp_free_i32(tmp
);
6504 i
= (insn
>> 23) & 3;
6506 case 0: offset
= -4; break; /* DA */
6507 case 1: offset
= 0; break; /* IA */
6508 case 2: offset
= -8; break; /* DB */
6509 case 3: offset
= 4; break; /* IB */
6513 tcg_gen_addi_i32(addr
, addr
, offset
);
6514 tmp
= load_reg(s
, 14);
6515 gen_st32(tmp
, addr
, 0);
6516 tmp
= load_cpu_field(spsr
);
6517 tcg_gen_addi_i32(addr
, addr
, 4);
6518 gen_st32(tmp
, addr
, 0);
6519 if (insn
& (1 << 21)) {
6520 /* Base writeback. */
6522 case 0: offset
= -8; break;
6523 case 1: offset
= 4; break;
6524 case 2: offset
= -4; break;
6525 case 3: offset
= 0; break;
6529 tcg_gen_addi_i32(addr
, addr
, offset
);
6530 tmp
= tcg_const_i32(op1
);
6531 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6532 tcg_temp_free_i32(tmp
);
6533 tcg_temp_free_i32(addr
);
6535 tcg_temp_free_i32(addr
);
6538 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6544 rn
= (insn
>> 16) & 0xf;
6545 addr
= load_reg(s
, rn
);
6546 i
= (insn
>> 23) & 3;
6548 case 0: offset
= -4; break; /* DA */
6549 case 1: offset
= 0; break; /* IA */
6550 case 2: offset
= -8; break; /* DB */
6551 case 3: offset
= 4; break; /* IB */
6555 tcg_gen_addi_i32(addr
, addr
, offset
);
6556 /* Load PC into tmp and CPSR into tmp2. */
6557 tmp
= gen_ld32(addr
, 0);
6558 tcg_gen_addi_i32(addr
, addr
, 4);
6559 tmp2
= gen_ld32(addr
, 0);
6560 if (insn
& (1 << 21)) {
6561 /* Base writeback. */
6563 case 0: offset
= -8; break;
6564 case 1: offset
= 4; break;
6565 case 2: offset
= -4; break;
6566 case 3: offset
= 0; break;
6570 tcg_gen_addi_i32(addr
, addr
, offset
);
6571 store_reg(s
, rn
, addr
);
6573 tcg_temp_free_i32(addr
);
6575 gen_rfe(s
, tmp
, tmp2
);
6577 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6578 /* branch link and change to thumb (blx <offset>) */
6581 val
= (uint32_t)s
->pc
;
6582 tmp
= tcg_temp_new_i32();
6583 tcg_gen_movi_i32(tmp
, val
);
6584 store_reg(s
, 14, tmp
);
6585 /* Sign-extend the 24-bit offset */
6586 offset
= (((int32_t)insn
) << 8) >> 8;
6587 /* offset * 4 + bit24 * 2 + (thumb bit) */
6588 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6589 /* pipeline offset */
6591 /* protected by ARCH(5); above, near the start of uncond block */
6594 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6595 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6596 /* iWMMXt register transfer. */
6597 if (env
->cp15
.c15_cpar
& (1 << 1))
6598 if (!disas_iwmmxt_insn(env
, s
, insn
))
6601 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6602 /* Coprocessor double register transfer. */
6604 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6605 /* Additional coprocessor register transfer. */
6606 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6609 /* cps (privileged) */
6613 if (insn
& (1 << 19)) {
6614 if (insn
& (1 << 8))
6616 if (insn
& (1 << 7))
6618 if (insn
& (1 << 6))
6620 if (insn
& (1 << 18))
6623 if (insn
& (1 << 17)) {
6625 val
|= (insn
& 0x1f);
6628 gen_set_psr_im(s
, mask
, 0, val
);
6635 /* if not always execute, we generate a conditional jump to
6637 s
->condlabel
= gen_new_label();
6638 gen_test_cc(cond
^ 1, s
->condlabel
);
6641 if ((insn
& 0x0f900000) == 0x03000000) {
6642 if ((insn
& (1 << 21)) == 0) {
6644 rd
= (insn
>> 12) & 0xf;
6645 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6646 if ((insn
& (1 << 22)) == 0) {
6648 tmp
= tcg_temp_new_i32();
6649 tcg_gen_movi_i32(tmp
, val
);
6652 tmp
= load_reg(s
, rd
);
6653 tcg_gen_ext16u_i32(tmp
, tmp
);
6654 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6656 store_reg(s
, rd
, tmp
);
6658 if (((insn
>> 12) & 0xf) != 0xf)
6660 if (((insn
>> 16) & 0xf) == 0) {
6661 gen_nop_hint(s
, insn
& 0xff);
6663 /* CPSR = immediate */
6665 shift
= ((insn
>> 8) & 0xf) * 2;
6667 val
= (val
>> shift
) | (val
<< (32 - shift
));
6668 i
= ((insn
& (1 << 22)) != 0);
6669 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6673 } else if ((insn
& 0x0f900000) == 0x01000000
6674 && (insn
& 0x00000090) != 0x00000090) {
6675 /* miscellaneous instructions */
6676 op1
= (insn
>> 21) & 3;
6677 sh
= (insn
>> 4) & 0xf;
6680 case 0x0: /* move program status register */
6683 tmp
= load_reg(s
, rm
);
6684 i
= ((op1
& 2) != 0);
6685 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6689 rd
= (insn
>> 12) & 0xf;
6693 tmp
= load_cpu_field(spsr
);
6695 tmp
= tcg_temp_new_i32();
6696 gen_helper_cpsr_read(tmp
);
6698 store_reg(s
, rd
, tmp
);
6703 /* branch/exchange thumb (bx). */
6705 tmp
= load_reg(s
, rm
);
6707 } else if (op1
== 3) {
6710 rd
= (insn
>> 12) & 0xf;
6711 tmp
= load_reg(s
, rm
);
6712 gen_helper_clz(tmp
, tmp
);
6713 store_reg(s
, rd
, tmp
);
6721 /* Trivial implementation equivalent to bx. */
6722 tmp
= load_reg(s
, rm
);
6733 /* branch link/exchange thumb (blx) */
6734 tmp
= load_reg(s
, rm
);
6735 tmp2
= tcg_temp_new_i32();
6736 tcg_gen_movi_i32(tmp2
, s
->pc
);
6737 store_reg(s
, 14, tmp2
);
6740 case 0x5: /* saturating add/subtract */
6742 rd
= (insn
>> 12) & 0xf;
6743 rn
= (insn
>> 16) & 0xf;
6744 tmp
= load_reg(s
, rm
);
6745 tmp2
= load_reg(s
, rn
);
6747 gen_helper_double_saturate(tmp2
, tmp2
);
6749 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6751 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6752 tcg_temp_free_i32(tmp2
);
6753 store_reg(s
, rd
, tmp
);
6756 /* SMC instruction (op1 == 3)
6757 and undefined instructions (op1 == 0 || op1 == 2)
6764 gen_exception_insn(s
, 4, EXCP_BKPT
);
6766 case 0x8: /* signed multiply */
6771 rs
= (insn
>> 8) & 0xf;
6772 rn
= (insn
>> 12) & 0xf;
6773 rd
= (insn
>> 16) & 0xf;
6775 /* (32 * 16) >> 16 */
6776 tmp
= load_reg(s
, rm
);
6777 tmp2
= load_reg(s
, rs
);
6779 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6782 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6783 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6784 tmp
= tcg_temp_new_i32();
6785 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6786 tcg_temp_free_i64(tmp64
);
6787 if ((sh
& 2) == 0) {
6788 tmp2
= load_reg(s
, rn
);
6789 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6790 tcg_temp_free_i32(tmp2
);
6792 store_reg(s
, rd
, tmp
);
6795 tmp
= load_reg(s
, rm
);
6796 tmp2
= load_reg(s
, rs
);
6797 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6798 tcg_temp_free_i32(tmp2
);
6800 tmp64
= tcg_temp_new_i64();
6801 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6802 tcg_temp_free_i32(tmp
);
6803 gen_addq(s
, tmp64
, rn
, rd
);
6804 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6805 tcg_temp_free_i64(tmp64
);
6808 tmp2
= load_reg(s
, rn
);
6809 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6810 tcg_temp_free_i32(tmp2
);
6812 store_reg(s
, rd
, tmp
);
6819 } else if (((insn
& 0x0e000000) == 0 &&
6820 (insn
& 0x00000090) != 0x90) ||
6821 ((insn
& 0x0e000000) == (1 << 25))) {
6822 int set_cc
, logic_cc
, shiftop
;
6824 op1
= (insn
>> 21) & 0xf;
6825 set_cc
= (insn
>> 20) & 1;
6826 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6828 /* data processing instruction */
6829 if (insn
& (1 << 25)) {
6830 /* immediate operand */
6832 shift
= ((insn
>> 8) & 0xf) * 2;
6834 val
= (val
>> shift
) | (val
<< (32 - shift
));
6836 tmp2
= tcg_temp_new_i32();
6837 tcg_gen_movi_i32(tmp2
, val
);
6838 if (logic_cc
&& shift
) {
6839 gen_set_CF_bit31(tmp2
);
6844 tmp2
= load_reg(s
, rm
);
6845 shiftop
= (insn
>> 5) & 3;
6846 if (!(insn
& (1 << 4))) {
6847 shift
= (insn
>> 7) & 0x1f;
6848 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6850 rs
= (insn
>> 8) & 0xf;
6851 tmp
= load_reg(s
, rs
);
6852 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6855 if (op1
!= 0x0f && op1
!= 0x0d) {
6856 rn
= (insn
>> 16) & 0xf;
6857 tmp
= load_reg(s
, rn
);
6861 rd
= (insn
>> 12) & 0xf;
6864 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6868 store_reg_bx(env
, s
, rd
, tmp
);
6871 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6875 store_reg_bx(env
, s
, rd
, tmp
);
6878 if (set_cc
&& rd
== 15) {
6879 /* SUBS r15, ... is used for exception return. */
6883 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6884 gen_exception_return(s
, tmp
);
6887 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6889 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6891 store_reg_bx(env
, s
, rd
, tmp
);
6896 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
6898 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6900 store_reg_bx(env
, s
, rd
, tmp
);
6904 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6906 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6908 store_reg_bx(env
, s
, rd
, tmp
);
6912 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
6914 gen_add_carry(tmp
, tmp
, tmp2
);
6916 store_reg_bx(env
, s
, rd
, tmp
);
6920 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
6922 gen_sub_carry(tmp
, tmp
, tmp2
);
6924 store_reg_bx(env
, s
, rd
, tmp
);
6928 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
6930 gen_sub_carry(tmp
, tmp2
, tmp
);
6932 store_reg_bx(env
, s
, rd
, tmp
);
6936 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6939 tcg_temp_free_i32(tmp
);
6943 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6946 tcg_temp_free_i32(tmp
);
6950 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6952 tcg_temp_free_i32(tmp
);
6956 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6958 tcg_temp_free_i32(tmp
);
6961 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6965 store_reg_bx(env
, s
, rd
, tmp
);
6968 if (logic_cc
&& rd
== 15) {
6969 /* MOVS r15, ... is used for exception return. */
6973 gen_exception_return(s
, tmp2
);
6978 store_reg_bx(env
, s
, rd
, tmp2
);
6982 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
6986 store_reg_bx(env
, s
, rd
, tmp
);
6990 tcg_gen_not_i32(tmp2
, tmp2
);
6994 store_reg_bx(env
, s
, rd
, tmp2
);
6997 if (op1
!= 0x0f && op1
!= 0x0d) {
6998 tcg_temp_free_i32(tmp2
);
7001 /* other instructions */
7002 op1
= (insn
>> 24) & 0xf;
7006 /* multiplies, extra load/stores */
7007 sh
= (insn
>> 5) & 3;
7010 rd
= (insn
>> 16) & 0xf;
7011 rn
= (insn
>> 12) & 0xf;
7012 rs
= (insn
>> 8) & 0xf;
7014 op1
= (insn
>> 20) & 0xf;
7016 case 0: case 1: case 2: case 3: case 6:
7018 tmp
= load_reg(s
, rs
);
7019 tmp2
= load_reg(s
, rm
);
7020 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7021 tcg_temp_free_i32(tmp2
);
7022 if (insn
& (1 << 22)) {
7023 /* Subtract (mls) */
7025 tmp2
= load_reg(s
, rn
);
7026 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7027 tcg_temp_free_i32(tmp2
);
7028 } else if (insn
& (1 << 21)) {
7030 tmp2
= load_reg(s
, rn
);
7031 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7032 tcg_temp_free_i32(tmp2
);
7034 if (insn
& (1 << 20))
7036 store_reg(s
, rd
, tmp
);
7039 /* 64 bit mul double accumulate (UMAAL) */
7041 tmp
= load_reg(s
, rs
);
7042 tmp2
= load_reg(s
, rm
);
7043 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7044 gen_addq_lo(s
, tmp64
, rn
);
7045 gen_addq_lo(s
, tmp64
, rd
);
7046 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7047 tcg_temp_free_i64(tmp64
);
7049 case 8: case 9: case 10: case 11:
7050 case 12: case 13: case 14: case 15:
7051 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7052 tmp
= load_reg(s
, rs
);
7053 tmp2
= load_reg(s
, rm
);
7054 if (insn
& (1 << 22)) {
7055 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7057 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7059 if (insn
& (1 << 21)) { /* mult accumulate */
7060 gen_addq(s
, tmp64
, rn
, rd
);
7062 if (insn
& (1 << 20)) {
7063 gen_logicq_cc(tmp64
);
7065 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7066 tcg_temp_free_i64(tmp64
);
7072 rn
= (insn
>> 16) & 0xf;
7073 rd
= (insn
>> 12) & 0xf;
7074 if (insn
& (1 << 23)) {
7075 /* load/store exclusive */
7076 op1
= (insn
>> 21) & 0x3;
7081 addr
= tcg_temp_local_new_i32();
7082 load_reg_var(s
, addr
, rn
);
7083 if (insn
& (1 << 20)) {
7086 gen_load_exclusive(s
, rd
, 15, addr
, 2);
7088 case 1: /* ldrexd */
7089 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
7091 case 2: /* ldrexb */
7092 gen_load_exclusive(s
, rd
, 15, addr
, 0);
7094 case 3: /* ldrexh */
7095 gen_load_exclusive(s
, rd
, 15, addr
, 1);
7104 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
7106 case 1: /* strexd */
7107 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
7109 case 2: /* strexb */
7110 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
7112 case 3: /* strexh */
7113 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
7119 tcg_temp_free(addr
);
7121 /* SWP instruction */
7124 /* ??? This is not really atomic. However we know
7125 we never have multiple CPUs running in parallel,
7126 so it is good enough. */
7127 addr
= load_reg(s
, rn
);
7128 tmp
= load_reg(s
, rm
);
7129 if (insn
& (1 << 22)) {
7130 tmp2
= gen_ld8u(addr
, IS_USER(s
));
7131 gen_st8(tmp
, addr
, IS_USER(s
));
7133 tmp2
= gen_ld32(addr
, IS_USER(s
));
7134 gen_st32(tmp
, addr
, IS_USER(s
));
7136 tcg_temp_free_i32(addr
);
7137 store_reg(s
, rd
, tmp2
);
7143 /* Misc load/store */
7144 rn
= (insn
>> 16) & 0xf;
7145 rd
= (insn
>> 12) & 0xf;
7146 addr
= load_reg(s
, rn
);
7147 if (insn
& (1 << 24))
7148 gen_add_datah_offset(s
, insn
, 0, addr
);
7150 if (insn
& (1 << 20)) {
7154 tmp
= gen_ld16u(addr
, IS_USER(s
));
7157 tmp
= gen_ld8s(addr
, IS_USER(s
));
7161 tmp
= gen_ld16s(addr
, IS_USER(s
));
7165 } else if (sh
& 2) {
7170 tmp
= load_reg(s
, rd
);
7171 gen_st32(tmp
, addr
, IS_USER(s
));
7172 tcg_gen_addi_i32(addr
, addr
, 4);
7173 tmp
= load_reg(s
, rd
+ 1);
7174 gen_st32(tmp
, addr
, IS_USER(s
));
7178 tmp
= gen_ld32(addr
, IS_USER(s
));
7179 store_reg(s
, rd
, tmp
);
7180 tcg_gen_addi_i32(addr
, addr
, 4);
7181 tmp
= gen_ld32(addr
, IS_USER(s
));
7185 address_offset
= -4;
7188 tmp
= load_reg(s
, rd
);
7189 gen_st16(tmp
, addr
, IS_USER(s
));
7192 /* Perform base writeback before the loaded value to
7193 ensure correct behavior with overlapping index registers.
7194 ldrd with base writeback is is undefined if the
7195 destination and index registers overlap. */
7196 if (!(insn
& (1 << 24))) {
7197 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
7198 store_reg(s
, rn
, addr
);
7199 } else if (insn
& (1 << 21)) {
7201 tcg_gen_addi_i32(addr
, addr
, address_offset
);
7202 store_reg(s
, rn
, addr
);
7204 tcg_temp_free_i32(addr
);
7207 /* Complete the load. */
7208 store_reg(s
, rd
, tmp
);
7217 if (insn
& (1 << 4)) {
7219 /* Armv6 Media instructions. */
7221 rn
= (insn
>> 16) & 0xf;
7222 rd
= (insn
>> 12) & 0xf;
7223 rs
= (insn
>> 8) & 0xf;
7224 switch ((insn
>> 23) & 3) {
7225 case 0: /* Parallel add/subtract. */
7226 op1
= (insn
>> 20) & 7;
7227 tmp
= load_reg(s
, rn
);
7228 tmp2
= load_reg(s
, rm
);
7229 sh
= (insn
>> 5) & 7;
7230 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
7232 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
7233 tcg_temp_free_i32(tmp2
);
7234 store_reg(s
, rd
, tmp
);
7237 if ((insn
& 0x00700020) == 0) {
7238 /* Halfword pack. */
7239 tmp
= load_reg(s
, rn
);
7240 tmp2
= load_reg(s
, rm
);
7241 shift
= (insn
>> 7) & 0x1f;
7242 if (insn
& (1 << 6)) {
7246 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7247 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7248 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7252 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7253 tcg_gen_ext16u_i32(tmp
, tmp
);
7254 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7256 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7257 tcg_temp_free_i32(tmp2
);
7258 store_reg(s
, rd
, tmp
);
7259 } else if ((insn
& 0x00200020) == 0x00200000) {
7261 tmp
= load_reg(s
, rm
);
7262 shift
= (insn
>> 7) & 0x1f;
7263 if (insn
& (1 << 6)) {
7266 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7268 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7270 sh
= (insn
>> 16) & 0x1f;
7271 tmp2
= tcg_const_i32(sh
);
7272 if (insn
& (1 << 22))
7273 gen_helper_usat(tmp
, tmp
, tmp2
);
7275 gen_helper_ssat(tmp
, tmp
, tmp2
);
7276 tcg_temp_free_i32(tmp2
);
7277 store_reg(s
, rd
, tmp
);
7278 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
7280 tmp
= load_reg(s
, rm
);
7281 sh
= (insn
>> 16) & 0x1f;
7282 tmp2
= tcg_const_i32(sh
);
7283 if (insn
& (1 << 22))
7284 gen_helper_usat16(tmp
, tmp
, tmp2
);
7286 gen_helper_ssat16(tmp
, tmp
, tmp2
);
7287 tcg_temp_free_i32(tmp2
);
7288 store_reg(s
, rd
, tmp
);
7289 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
7291 tmp
= load_reg(s
, rn
);
7292 tmp2
= load_reg(s
, rm
);
7293 tmp3
= tcg_temp_new_i32();
7294 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7295 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7296 tcg_temp_free_i32(tmp3
);
7297 tcg_temp_free_i32(tmp2
);
7298 store_reg(s
, rd
, tmp
);
7299 } else if ((insn
& 0x000003e0) == 0x00000060) {
7300 tmp
= load_reg(s
, rm
);
7301 shift
= (insn
>> 10) & 3;
7302 /* ??? In many cases it's not neccessary to do a
7303 rotate, a shift is sufficient. */
7305 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7306 op1
= (insn
>> 20) & 7;
7308 case 0: gen_sxtb16(tmp
); break;
7309 case 2: gen_sxtb(tmp
); break;
7310 case 3: gen_sxth(tmp
); break;
7311 case 4: gen_uxtb16(tmp
); break;
7312 case 6: gen_uxtb(tmp
); break;
7313 case 7: gen_uxth(tmp
); break;
7314 default: goto illegal_op
;
7317 tmp2
= load_reg(s
, rn
);
7318 if ((op1
& 3) == 0) {
7319 gen_add16(tmp
, tmp2
);
7321 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7322 tcg_temp_free_i32(tmp2
);
7325 store_reg(s
, rd
, tmp
);
7326 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7328 tmp
= load_reg(s
, rm
);
7329 if (insn
& (1 << 22)) {
7330 if (insn
& (1 << 7)) {
7334 gen_helper_rbit(tmp
, tmp
);
7337 if (insn
& (1 << 7))
7340 tcg_gen_bswap32_i32(tmp
, tmp
);
7342 store_reg(s
, rd
, tmp
);
7347 case 2: /* Multiplies (Type 3). */
7348 tmp
= load_reg(s
, rm
);
7349 tmp2
= load_reg(s
, rs
);
7350 if (insn
& (1 << 20)) {
7351 /* Signed multiply most significant [accumulate].
7352 (SMMUL, SMMLA, SMMLS) */
7353 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7356 tmp
= load_reg(s
, rd
);
7357 if (insn
& (1 << 6)) {
7358 tmp64
= gen_subq_msw(tmp64
, tmp
);
7360 tmp64
= gen_addq_msw(tmp64
, tmp
);
7363 if (insn
& (1 << 5)) {
7364 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7366 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7367 tmp
= tcg_temp_new_i32();
7368 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7369 tcg_temp_free_i64(tmp64
);
7370 store_reg(s
, rn
, tmp
);
7372 if (insn
& (1 << 5))
7373 gen_swap_half(tmp2
);
7374 gen_smul_dual(tmp
, tmp2
);
7375 if (insn
& (1 << 6)) {
7376 /* This subtraction cannot overflow. */
7377 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7379 /* This addition cannot overflow 32 bits;
7380 * however it may overflow considered as a signed
7381 * operation, in which case we must set the Q flag.
7383 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7385 tcg_temp_free_i32(tmp2
);
7386 if (insn
& (1 << 22)) {
7387 /* smlald, smlsld */
7388 tmp64
= tcg_temp_new_i64();
7389 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7390 tcg_temp_free_i32(tmp
);
7391 gen_addq(s
, tmp64
, rd
, rn
);
7392 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7393 tcg_temp_free_i64(tmp64
);
7395 /* smuad, smusd, smlad, smlsd */
7398 tmp2
= load_reg(s
, rd
);
7399 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7400 tcg_temp_free_i32(tmp2
);
7402 store_reg(s
, rn
, tmp
);
7407 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7409 case 0: /* Unsigned sum of absolute differences. */
7411 tmp
= load_reg(s
, rm
);
7412 tmp2
= load_reg(s
, rs
);
7413 gen_helper_usad8(tmp
, tmp
, tmp2
);
7414 tcg_temp_free_i32(tmp2
);
7416 tmp2
= load_reg(s
, rd
);
7417 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7418 tcg_temp_free_i32(tmp2
);
7420 store_reg(s
, rn
, tmp
);
7422 case 0x20: case 0x24: case 0x28: case 0x2c:
7423 /* Bitfield insert/clear. */
7425 shift
= (insn
>> 7) & 0x1f;
7426 i
= (insn
>> 16) & 0x1f;
7429 tmp
= tcg_temp_new_i32();
7430 tcg_gen_movi_i32(tmp
, 0);
7432 tmp
= load_reg(s
, rm
);
7435 tmp2
= load_reg(s
, rd
);
7436 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7437 tcg_temp_free_i32(tmp2
);
7439 store_reg(s
, rd
, tmp
);
7441 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7442 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7444 tmp
= load_reg(s
, rm
);
7445 shift
= (insn
>> 7) & 0x1f;
7446 i
= ((insn
>> 16) & 0x1f) + 1;
7451 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7453 gen_sbfx(tmp
, shift
, i
);
7456 store_reg(s
, rd
, tmp
);
7466 /* Check for undefined extension instructions
7467 * per the ARM Bible IE:
7468 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7470 sh
= (0xf << 20) | (0xf << 4);
7471 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7475 /* load/store byte/word */
7476 rn
= (insn
>> 16) & 0xf;
7477 rd
= (insn
>> 12) & 0xf;
7478 tmp2
= load_reg(s
, rn
);
7479 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7480 if (insn
& (1 << 24))
7481 gen_add_data_offset(s
, insn
, tmp2
);
7482 if (insn
& (1 << 20)) {
7484 if (insn
& (1 << 22)) {
7485 tmp
= gen_ld8u(tmp2
, i
);
7487 tmp
= gen_ld32(tmp2
, i
);
7491 tmp
= load_reg(s
, rd
);
7492 if (insn
& (1 << 22))
7493 gen_st8(tmp
, tmp2
, i
);
7495 gen_st32(tmp
, tmp2
, i
);
7497 if (!(insn
& (1 << 24))) {
7498 gen_add_data_offset(s
, insn
, tmp2
);
7499 store_reg(s
, rn
, tmp2
);
7500 } else if (insn
& (1 << 21)) {
7501 store_reg(s
, rn
, tmp2
);
7503 tcg_temp_free_i32(tmp2
);
7505 if (insn
& (1 << 20)) {
7506 /* Complete the load. */
7507 store_reg_from_load(env
, s
, rd
, tmp
);
7513 int j
, n
, user
, loaded_base
;
7515 /* load/store multiple words */
7516 /* XXX: store correct base if write back */
7518 if (insn
& (1 << 22)) {
7520 goto illegal_op
; /* only usable in supervisor mode */
7522 if ((insn
& (1 << 15)) == 0)
7525 rn
= (insn
>> 16) & 0xf;
7526 addr
= load_reg(s
, rn
);
7528 /* compute total size */
7530 TCGV_UNUSED(loaded_var
);
7533 if (insn
& (1 << i
))
7536 /* XXX: test invalid n == 0 case ? */
7537 if (insn
& (1 << 23)) {
7538 if (insn
& (1 << 24)) {
7540 tcg_gen_addi_i32(addr
, addr
, 4);
7542 /* post increment */
7545 if (insn
& (1 << 24)) {
7547 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7549 /* post decrement */
7551 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7556 if (insn
& (1 << i
)) {
7557 if (insn
& (1 << 20)) {
7559 tmp
= gen_ld32(addr
, IS_USER(s
));
7561 tmp2
= tcg_const_i32(i
);
7562 gen_helper_set_user_reg(tmp2
, tmp
);
7563 tcg_temp_free_i32(tmp2
);
7564 tcg_temp_free_i32(tmp
);
7565 } else if (i
== rn
) {
7569 store_reg_from_load(env
, s
, i
, tmp
);
7574 /* special case: r15 = PC + 8 */
7575 val
= (long)s
->pc
+ 4;
7576 tmp
= tcg_temp_new_i32();
7577 tcg_gen_movi_i32(tmp
, val
);
7579 tmp
= tcg_temp_new_i32();
7580 tmp2
= tcg_const_i32(i
);
7581 gen_helper_get_user_reg(tmp
, tmp2
);
7582 tcg_temp_free_i32(tmp2
);
7584 tmp
= load_reg(s
, i
);
7586 gen_st32(tmp
, addr
, IS_USER(s
));
7589 /* no need to add after the last transfer */
7591 tcg_gen_addi_i32(addr
, addr
, 4);
7594 if (insn
& (1 << 21)) {
7596 if (insn
& (1 << 23)) {
7597 if (insn
& (1 << 24)) {
7600 /* post increment */
7601 tcg_gen_addi_i32(addr
, addr
, 4);
7604 if (insn
& (1 << 24)) {
7607 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7609 /* post decrement */
7610 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7613 store_reg(s
, rn
, addr
);
7615 tcg_temp_free_i32(addr
);
7618 store_reg(s
, rn
, loaded_var
);
7620 if ((insn
& (1 << 22)) && !user
) {
7621 /* Restore CPSR from SPSR. */
7622 tmp
= load_cpu_field(spsr
);
7623 gen_set_cpsr(tmp
, 0xffffffff);
7624 tcg_temp_free_i32(tmp
);
7625 s
->is_jmp
= DISAS_UPDATE
;
7634 /* branch (and link) */
7635 val
= (int32_t)s
->pc
;
7636 if (insn
& (1 << 24)) {
7637 tmp
= tcg_temp_new_i32();
7638 tcg_gen_movi_i32(tmp
, val
);
7639 store_reg(s
, 14, tmp
);
7641 offset
= (((int32_t)insn
<< 8) >> 8);
7642 val
+= (offset
<< 2) + 4;
7650 if (disas_coproc_insn(env
, s
, insn
))
7655 gen_set_pc_im(s
->pc
);
7656 s
->is_jmp
= DISAS_SWI
;
7660 gen_exception_insn(s
, 4, EXCP_UDEF
);
7666 /* Return true if this is a Thumb-2 logical op. */
7668 thumb2_logic_op(int op
)
7673 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7674 then set condition code flags based on the result of the operation.
7675 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7676 to the high bit of T1.
7677 Returns zero if the opcode is valid. */
7680 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7687 tcg_gen_and_i32(t0
, t0
, t1
);
7691 tcg_gen_andc_i32(t0
, t0
, t1
);
7695 tcg_gen_or_i32(t0
, t0
, t1
);
7699 tcg_gen_orc_i32(t0
, t0
, t1
);
7703 tcg_gen_xor_i32(t0
, t0
, t1
);
7708 gen_helper_add_cc(t0
, t0
, t1
);
7710 tcg_gen_add_i32(t0
, t0
, t1
);
7714 gen_helper_adc_cc(t0
, t0
, t1
);
7720 gen_helper_sbc_cc(t0
, t0
, t1
);
7722 gen_sub_carry(t0
, t0
, t1
);
7726 gen_helper_sub_cc(t0
, t0
, t1
);
7728 tcg_gen_sub_i32(t0
, t0
, t1
);
7732 gen_helper_sub_cc(t0
, t1
, t0
);
7734 tcg_gen_sub_i32(t0
, t1
, t0
);
7736 default: /* 5, 6, 7, 9, 12, 15. */
7742 gen_set_CF_bit31(t1
);
7747 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7749 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7751 uint32_t insn
, imm
, shift
, offset
;
7752 uint32_t rd
, rn
, rm
, rs
;
7763 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7764 || arm_feature (env
, ARM_FEATURE_M
))) {
7765 /* Thumb-1 cores may need to treat bl and blx as a pair of
7766 16-bit instructions to get correct prefetch abort behavior. */
7768 if ((insn
& (1 << 12)) == 0) {
7770 /* Second half of blx. */
7771 offset
= ((insn
& 0x7ff) << 1);
7772 tmp
= load_reg(s
, 14);
7773 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7774 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7776 tmp2
= tcg_temp_new_i32();
7777 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7778 store_reg(s
, 14, tmp2
);
7782 if (insn
& (1 << 11)) {
7783 /* Second half of bl. */
7784 offset
= ((insn
& 0x7ff) << 1) | 1;
7785 tmp
= load_reg(s
, 14);
7786 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7788 tmp2
= tcg_temp_new_i32();
7789 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7790 store_reg(s
, 14, tmp2
);
7794 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7795 /* Instruction spans a page boundary. Implement it as two
7796 16-bit instructions in case the second half causes an
7798 offset
= ((int32_t)insn
<< 21) >> 9;
7799 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7802 /* Fall through to 32-bit decode. */
7805 insn
= lduw_code(s
->pc
);
7807 insn
|= (uint32_t)insn_hw1
<< 16;
7809 if ((insn
& 0xf800e800) != 0xf000e800) {
7813 rn
= (insn
>> 16) & 0xf;
7814 rs
= (insn
>> 12) & 0xf;
7815 rd
= (insn
>> 8) & 0xf;
7817 switch ((insn
>> 25) & 0xf) {
7818 case 0: case 1: case 2: case 3:
7819 /* 16-bit instructions. Should never happen. */
7822 if (insn
& (1 << 22)) {
7823 /* Other load/store, table branch. */
7824 if (insn
& 0x01200000) {
7825 /* Load/store doubleword. */
7827 addr
= tcg_temp_new_i32();
7828 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7830 addr
= load_reg(s
, rn
);
7832 offset
= (insn
& 0xff) * 4;
7833 if ((insn
& (1 << 23)) == 0)
7835 if (insn
& (1 << 24)) {
7836 tcg_gen_addi_i32(addr
, addr
, offset
);
7839 if (insn
& (1 << 20)) {
7841 tmp
= gen_ld32(addr
, IS_USER(s
));
7842 store_reg(s
, rs
, tmp
);
7843 tcg_gen_addi_i32(addr
, addr
, 4);
7844 tmp
= gen_ld32(addr
, IS_USER(s
));
7845 store_reg(s
, rd
, tmp
);
7848 tmp
= load_reg(s
, rs
);
7849 gen_st32(tmp
, addr
, IS_USER(s
));
7850 tcg_gen_addi_i32(addr
, addr
, 4);
7851 tmp
= load_reg(s
, rd
);
7852 gen_st32(tmp
, addr
, IS_USER(s
));
7854 if (insn
& (1 << 21)) {
7855 /* Base writeback. */
7858 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7859 store_reg(s
, rn
, addr
);
7861 tcg_temp_free_i32(addr
);
7863 } else if ((insn
& (1 << 23)) == 0) {
7864 /* Load/store exclusive word. */
7865 addr
= tcg_temp_local_new();
7866 load_reg_var(s
, addr
, rn
);
7867 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
7868 if (insn
& (1 << 20)) {
7869 gen_load_exclusive(s
, rs
, 15, addr
, 2);
7871 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
7873 tcg_temp_free(addr
);
7874 } else if ((insn
& (1 << 6)) == 0) {
7877 addr
= tcg_temp_new_i32();
7878 tcg_gen_movi_i32(addr
, s
->pc
);
7880 addr
= load_reg(s
, rn
);
7882 tmp
= load_reg(s
, rm
);
7883 tcg_gen_add_i32(addr
, addr
, tmp
);
7884 if (insn
& (1 << 4)) {
7886 tcg_gen_add_i32(addr
, addr
, tmp
);
7887 tcg_temp_free_i32(tmp
);
7888 tmp
= gen_ld16u(addr
, IS_USER(s
));
7890 tcg_temp_free_i32(tmp
);
7891 tmp
= gen_ld8u(addr
, IS_USER(s
));
7893 tcg_temp_free_i32(addr
);
7894 tcg_gen_shli_i32(tmp
, tmp
, 1);
7895 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
7896 store_reg(s
, 15, tmp
);
7898 /* Load/store exclusive byte/halfword/doubleword. */
7900 op
= (insn
>> 4) & 0x3;
7904 addr
= tcg_temp_local_new();
7905 load_reg_var(s
, addr
, rn
);
7906 if (insn
& (1 << 20)) {
7907 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
7909 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
7911 tcg_temp_free(addr
);
7914 /* Load/store multiple, RFE, SRS. */
7915 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
7916 /* Not available in user mode. */
7919 if (insn
& (1 << 20)) {
7921 addr
= load_reg(s
, rn
);
7922 if ((insn
& (1 << 24)) == 0)
7923 tcg_gen_addi_i32(addr
, addr
, -8);
7924 /* Load PC into tmp and CPSR into tmp2. */
7925 tmp
= gen_ld32(addr
, 0);
7926 tcg_gen_addi_i32(addr
, addr
, 4);
7927 tmp2
= gen_ld32(addr
, 0);
7928 if (insn
& (1 << 21)) {
7929 /* Base writeback. */
7930 if (insn
& (1 << 24)) {
7931 tcg_gen_addi_i32(addr
, addr
, 4);
7933 tcg_gen_addi_i32(addr
, addr
, -4);
7935 store_reg(s
, rn
, addr
);
7937 tcg_temp_free_i32(addr
);
7939 gen_rfe(s
, tmp
, tmp2
);
7943 addr
= tcg_temp_new_i32();
7944 tmp
= tcg_const_i32(op
);
7945 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7946 tcg_temp_free_i32(tmp
);
7947 if ((insn
& (1 << 24)) == 0) {
7948 tcg_gen_addi_i32(addr
, addr
, -8);
7950 tmp
= load_reg(s
, 14);
7951 gen_st32(tmp
, addr
, 0);
7952 tcg_gen_addi_i32(addr
, addr
, 4);
7953 tmp
= tcg_temp_new_i32();
7954 gen_helper_cpsr_read(tmp
);
7955 gen_st32(tmp
, addr
, 0);
7956 if (insn
& (1 << 21)) {
7957 if ((insn
& (1 << 24)) == 0) {
7958 tcg_gen_addi_i32(addr
, addr
, -4);
7960 tcg_gen_addi_i32(addr
, addr
, 4);
7962 tmp
= tcg_const_i32(op
);
7963 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7964 tcg_temp_free_i32(tmp
);
7966 tcg_temp_free_i32(addr
);
7971 /* Load/store multiple. */
7972 addr
= load_reg(s
, rn
);
7974 for (i
= 0; i
< 16; i
++) {
7975 if (insn
& (1 << i
))
7978 if (insn
& (1 << 24)) {
7979 tcg_gen_addi_i32(addr
, addr
, -offset
);
7982 for (i
= 0; i
< 16; i
++) {
7983 if ((insn
& (1 << i
)) == 0)
7985 if (insn
& (1 << 20)) {
7987 tmp
= gen_ld32(addr
, IS_USER(s
));
7991 store_reg(s
, i
, tmp
);
7995 tmp
= load_reg(s
, i
);
7996 gen_st32(tmp
, addr
, IS_USER(s
));
7998 tcg_gen_addi_i32(addr
, addr
, 4);
8000 if (insn
& (1 << 21)) {
8001 /* Base register writeback. */
8002 if (insn
& (1 << 24)) {
8003 tcg_gen_addi_i32(addr
, addr
, -offset
);
8005 /* Fault if writeback register is in register list. */
8006 if (insn
& (1 << rn
))
8008 store_reg(s
, rn
, addr
);
8010 tcg_temp_free_i32(addr
);
8017 op
= (insn
>> 21) & 0xf;
8019 /* Halfword pack. */
8020 tmp
= load_reg(s
, rn
);
8021 tmp2
= load_reg(s
, rm
);
8022 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
8023 if (insn
& (1 << 5)) {
8027 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8028 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8029 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8033 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8034 tcg_gen_ext16u_i32(tmp
, tmp
);
8035 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8037 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8038 tcg_temp_free_i32(tmp2
);
8039 store_reg(s
, rd
, tmp
);
8041 /* Data processing register constant shift. */
8043 tmp
= tcg_temp_new_i32();
8044 tcg_gen_movi_i32(tmp
, 0);
8046 tmp
= load_reg(s
, rn
);
8048 tmp2
= load_reg(s
, rm
);
8050 shiftop
= (insn
>> 4) & 3;
8051 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8052 conds
= (insn
& (1 << 20)) != 0;
8053 logic_cc
= (conds
&& thumb2_logic_op(op
));
8054 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8055 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
8057 tcg_temp_free_i32(tmp2
);
8059 store_reg(s
, rd
, tmp
);
8061 tcg_temp_free_i32(tmp
);
8065 case 13: /* Misc data processing. */
8066 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
8067 if (op
< 4 && (insn
& 0xf000) != 0xf000)
8070 case 0: /* Register controlled shift. */
8071 tmp
= load_reg(s
, rn
);
8072 tmp2
= load_reg(s
, rm
);
8073 if ((insn
& 0x70) != 0)
8075 op
= (insn
>> 21) & 3;
8076 logic_cc
= (insn
& (1 << 20)) != 0;
8077 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
8080 store_reg_bx(env
, s
, rd
, tmp
);
8082 case 1: /* Sign/zero extend. */
8083 tmp
= load_reg(s
, rm
);
8084 shift
= (insn
>> 4) & 3;
8085 /* ??? In many cases it's not neccessary to do a
8086 rotate, a shift is sufficient. */
8088 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8089 op
= (insn
>> 20) & 7;
8091 case 0: gen_sxth(tmp
); break;
8092 case 1: gen_uxth(tmp
); break;
8093 case 2: gen_sxtb16(tmp
); break;
8094 case 3: gen_uxtb16(tmp
); break;
8095 case 4: gen_sxtb(tmp
); break;
8096 case 5: gen_uxtb(tmp
); break;
8097 default: goto illegal_op
;
8100 tmp2
= load_reg(s
, rn
);
8101 if ((op
>> 1) == 1) {
8102 gen_add16(tmp
, tmp2
);
8104 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8105 tcg_temp_free_i32(tmp2
);
8108 store_reg(s
, rd
, tmp
);
8110 case 2: /* SIMD add/subtract. */
8111 op
= (insn
>> 20) & 7;
8112 shift
= (insn
>> 4) & 7;
8113 if ((op
& 3) == 3 || (shift
& 3) == 3)
8115 tmp
= load_reg(s
, rn
);
8116 tmp2
= load_reg(s
, rm
);
8117 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
8118 tcg_temp_free_i32(tmp2
);
8119 store_reg(s
, rd
, tmp
);
8121 case 3: /* Other data processing. */
8122 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
8124 /* Saturating add/subtract. */
8125 tmp
= load_reg(s
, rn
);
8126 tmp2
= load_reg(s
, rm
);
8128 gen_helper_double_saturate(tmp
, tmp
);
8130 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
8132 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
8133 tcg_temp_free_i32(tmp2
);
8135 tmp
= load_reg(s
, rn
);
8137 case 0x0a: /* rbit */
8138 gen_helper_rbit(tmp
, tmp
);
8140 case 0x08: /* rev */
8141 tcg_gen_bswap32_i32(tmp
, tmp
);
8143 case 0x09: /* rev16 */
8146 case 0x0b: /* revsh */
8149 case 0x10: /* sel */
8150 tmp2
= load_reg(s
, rm
);
8151 tmp3
= tcg_temp_new_i32();
8152 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
8153 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8154 tcg_temp_free_i32(tmp3
);
8155 tcg_temp_free_i32(tmp2
);
8157 case 0x18: /* clz */
8158 gen_helper_clz(tmp
, tmp
);
8164 store_reg(s
, rd
, tmp
);
8166 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8167 op
= (insn
>> 4) & 0xf;
8168 tmp
= load_reg(s
, rn
);
8169 tmp2
= load_reg(s
, rm
);
8170 switch ((insn
>> 20) & 7) {
8171 case 0: /* 32 x 32 -> 32 */
8172 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8173 tcg_temp_free_i32(tmp2
);
8175 tmp2
= load_reg(s
, rs
);
8177 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8179 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8180 tcg_temp_free_i32(tmp2
);
8183 case 1: /* 16 x 16 -> 32 */
8184 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8185 tcg_temp_free_i32(tmp2
);
8187 tmp2
= load_reg(s
, rs
);
8188 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8189 tcg_temp_free_i32(tmp2
);
8192 case 2: /* Dual multiply add. */
8193 case 4: /* Dual multiply subtract. */
8195 gen_swap_half(tmp2
);
8196 gen_smul_dual(tmp
, tmp2
);
8197 if (insn
& (1 << 22)) {
8198 /* This subtraction cannot overflow. */
8199 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8201 /* This addition cannot overflow 32 bits;
8202 * however it may overflow considered as a signed
8203 * operation, in which case we must set the Q flag.
8205 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8207 tcg_temp_free_i32(tmp2
);
8210 tmp2
= load_reg(s
, rs
);
8211 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8212 tcg_temp_free_i32(tmp2
);
8215 case 3: /* 32 * 16 -> 32msb */
8217 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8220 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8221 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8222 tmp
= tcg_temp_new_i32();
8223 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8224 tcg_temp_free_i64(tmp64
);
8227 tmp2
= load_reg(s
, rs
);
8228 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8229 tcg_temp_free_i32(tmp2
);
8232 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8233 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8235 tmp
= load_reg(s
, rs
);
8236 if (insn
& (1 << 20)) {
8237 tmp64
= gen_addq_msw(tmp64
, tmp
);
8239 tmp64
= gen_subq_msw(tmp64
, tmp
);
8242 if (insn
& (1 << 4)) {
8243 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8245 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8246 tmp
= tcg_temp_new_i32();
8247 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8248 tcg_temp_free_i64(tmp64
);
8250 case 7: /* Unsigned sum of absolute differences. */
8251 gen_helper_usad8(tmp
, tmp
, tmp2
);
8252 tcg_temp_free_i32(tmp2
);
8254 tmp2
= load_reg(s
, rs
);
8255 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8256 tcg_temp_free_i32(tmp2
);
8260 store_reg(s
, rd
, tmp
);
8262 case 6: case 7: /* 64-bit multiply, Divide. */
8263 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
8264 tmp
= load_reg(s
, rn
);
8265 tmp2
= load_reg(s
, rm
);
8266 if ((op
& 0x50) == 0x10) {
8268 if (!arm_feature(env
, ARM_FEATURE_DIV
))
8271 gen_helper_udiv(tmp
, tmp
, tmp2
);
8273 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8274 tcg_temp_free_i32(tmp2
);
8275 store_reg(s
, rd
, tmp
);
8276 } else if ((op
& 0xe) == 0xc) {
8277 /* Dual multiply accumulate long. */
8279 gen_swap_half(tmp2
);
8280 gen_smul_dual(tmp
, tmp2
);
8282 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8284 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8286 tcg_temp_free_i32(tmp2
);
8288 tmp64
= tcg_temp_new_i64();
8289 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8290 tcg_temp_free_i32(tmp
);
8291 gen_addq(s
, tmp64
, rs
, rd
);
8292 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8293 tcg_temp_free_i64(tmp64
);
8296 /* Unsigned 64-bit multiply */
8297 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8301 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8302 tcg_temp_free_i32(tmp2
);
8303 tmp64
= tcg_temp_new_i64();
8304 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8305 tcg_temp_free_i32(tmp
);
8307 /* Signed 64-bit multiply */
8308 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8313 gen_addq_lo(s
, tmp64
, rs
);
8314 gen_addq_lo(s
, tmp64
, rd
);
8315 } else if (op
& 0x40) {
8316 /* 64-bit accumulate. */
8317 gen_addq(s
, tmp64
, rs
, rd
);
8319 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8320 tcg_temp_free_i64(tmp64
);
8325 case 6: case 7: case 14: case 15:
8327 if (((insn
>> 24) & 3) == 3) {
8328 /* Translate into the equivalent ARM encoding. */
8329 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8330 if (disas_neon_data_insn(env
, s
, insn
))
8333 if (insn
& (1 << 28))
8335 if (disas_coproc_insn (env
, s
, insn
))
8339 case 8: case 9: case 10: case 11:
8340 if (insn
& (1 << 15)) {
8341 /* Branches, misc control. */
8342 if (insn
& 0x5000) {
8343 /* Unconditional branch. */
8344 /* signextend(hw1[10:0]) -> offset[:12]. */
8345 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8346 /* hw1[10:0] -> offset[11:1]. */
8347 offset
|= (insn
& 0x7ff) << 1;
8348 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8349 offset[24:22] already have the same value because of the
8350 sign extension above. */
8351 offset
^= ((~insn
) & (1 << 13)) << 10;
8352 offset
^= ((~insn
) & (1 << 11)) << 11;
8354 if (insn
& (1 << 14)) {
8355 /* Branch and link. */
8356 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8360 if (insn
& (1 << 12)) {
8365 offset
&= ~(uint32_t)2;
8366 /* thumb2 bx, no need to check */
8367 gen_bx_im(s
, offset
);
8369 } else if (((insn
>> 23) & 7) == 7) {
8371 if (insn
& (1 << 13))
8374 if (insn
& (1 << 26)) {
8375 /* Secure monitor call (v6Z) */
8376 goto illegal_op
; /* not implemented. */
8378 op
= (insn
>> 20) & 7;
8380 case 0: /* msr cpsr. */
8382 tmp
= load_reg(s
, rn
);
8383 addr
= tcg_const_i32(insn
& 0xff);
8384 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8385 tcg_temp_free_i32(addr
);
8386 tcg_temp_free_i32(tmp
);
8391 case 1: /* msr spsr. */
8394 tmp
= load_reg(s
, rn
);
8396 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8400 case 2: /* cps, nop-hint. */
8401 if (((insn
>> 8) & 7) == 0) {
8402 gen_nop_hint(s
, insn
& 0xff);
8404 /* Implemented as NOP in user mode. */
8409 if (insn
& (1 << 10)) {
8410 if (insn
& (1 << 7))
8412 if (insn
& (1 << 6))
8414 if (insn
& (1 << 5))
8416 if (insn
& (1 << 9))
8417 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8419 if (insn
& (1 << 8)) {
8421 imm
|= (insn
& 0x1f);
8424 gen_set_psr_im(s
, offset
, 0, imm
);
8427 case 3: /* Special control operations. */
8429 op
= (insn
>> 4) & 0xf;
8437 /* These execute as NOPs. */
8444 /* Trivial implementation equivalent to bx. */
8445 tmp
= load_reg(s
, rn
);
8448 case 5: /* Exception return. */
8452 if (rn
!= 14 || rd
!= 15) {
8455 tmp
= load_reg(s
, rn
);
8456 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8457 gen_exception_return(s
, tmp
);
8459 case 6: /* mrs cpsr. */
8460 tmp
= tcg_temp_new_i32();
8462 addr
= tcg_const_i32(insn
& 0xff);
8463 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8464 tcg_temp_free_i32(addr
);
8466 gen_helper_cpsr_read(tmp
);
8468 store_reg(s
, rd
, tmp
);
8470 case 7: /* mrs spsr. */
8471 /* Not accessible in user mode. */
8472 if (IS_USER(s
) || IS_M(env
))
8474 tmp
= load_cpu_field(spsr
);
8475 store_reg(s
, rd
, tmp
);
8480 /* Conditional branch. */
8481 op
= (insn
>> 22) & 0xf;
8482 /* Generate a conditional jump to next instruction. */
8483 s
->condlabel
= gen_new_label();
8484 gen_test_cc(op
^ 1, s
->condlabel
);
8487 /* offset[11:1] = insn[10:0] */
8488 offset
= (insn
& 0x7ff) << 1;
8489 /* offset[17:12] = insn[21:16]. */
8490 offset
|= (insn
& 0x003f0000) >> 4;
8491 /* offset[31:20] = insn[26]. */
8492 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8493 /* offset[18] = insn[13]. */
8494 offset
|= (insn
& (1 << 13)) << 5;
8495 /* offset[19] = insn[11]. */
8496 offset
|= (insn
& (1 << 11)) << 8;
8498 /* jump to the offset */
8499 gen_jmp(s
, s
->pc
+ offset
);
8502 /* Data processing immediate. */
8503 if (insn
& (1 << 25)) {
8504 if (insn
& (1 << 24)) {
8505 if (insn
& (1 << 20))
8507 /* Bitfield/Saturate. */
8508 op
= (insn
>> 21) & 7;
8510 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8512 tmp
= tcg_temp_new_i32();
8513 tcg_gen_movi_i32(tmp
, 0);
8515 tmp
= load_reg(s
, rn
);
8518 case 2: /* Signed bitfield extract. */
8520 if (shift
+ imm
> 32)
8523 gen_sbfx(tmp
, shift
, imm
);
8525 case 6: /* Unsigned bitfield extract. */
8527 if (shift
+ imm
> 32)
8530 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8532 case 3: /* Bitfield insert/clear. */
8535 imm
= imm
+ 1 - shift
;
8537 tmp2
= load_reg(s
, rd
);
8538 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8539 tcg_temp_free_i32(tmp2
);
8544 default: /* Saturate. */
8547 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8549 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8551 tmp2
= tcg_const_i32(imm
);
8554 if ((op
& 1) && shift
== 0)
8555 gen_helper_usat16(tmp
, tmp
, tmp2
);
8557 gen_helper_usat(tmp
, tmp
, tmp2
);
8560 if ((op
& 1) && shift
== 0)
8561 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8563 gen_helper_ssat(tmp
, tmp
, tmp2
);
8565 tcg_temp_free_i32(tmp2
);
8568 store_reg(s
, rd
, tmp
);
8570 imm
= ((insn
& 0x04000000) >> 15)
8571 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8572 if (insn
& (1 << 22)) {
8573 /* 16-bit immediate. */
8574 imm
|= (insn
>> 4) & 0xf000;
8575 if (insn
& (1 << 23)) {
8577 tmp
= load_reg(s
, rd
);
8578 tcg_gen_ext16u_i32(tmp
, tmp
);
8579 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8582 tmp
= tcg_temp_new_i32();
8583 tcg_gen_movi_i32(tmp
, imm
);
8586 /* Add/sub 12-bit immediate. */
8588 offset
= s
->pc
& ~(uint32_t)3;
8589 if (insn
& (1 << 23))
8593 tmp
= tcg_temp_new_i32();
8594 tcg_gen_movi_i32(tmp
, offset
);
8596 tmp
= load_reg(s
, rn
);
8597 if (insn
& (1 << 23))
8598 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8600 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8603 store_reg(s
, rd
, tmp
);
8606 int shifter_out
= 0;
8607 /* modified 12-bit immediate. */
8608 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8609 imm
= (insn
& 0xff);
8612 /* Nothing to do. */
8614 case 1: /* 00XY00XY */
8617 case 2: /* XY00XY00 */
8621 case 3: /* XYXYXYXY */
8625 default: /* Rotated constant. */
8626 shift
= (shift
<< 1) | (imm
>> 7);
8628 imm
= imm
<< (32 - shift
);
8632 tmp2
= tcg_temp_new_i32();
8633 tcg_gen_movi_i32(tmp2
, imm
);
8634 rn
= (insn
>> 16) & 0xf;
8636 tmp
= tcg_temp_new_i32();
8637 tcg_gen_movi_i32(tmp
, 0);
8639 tmp
= load_reg(s
, rn
);
8641 op
= (insn
>> 21) & 0xf;
8642 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8643 shifter_out
, tmp
, tmp2
))
8645 tcg_temp_free_i32(tmp2
);
8646 rd
= (insn
>> 8) & 0xf;
8648 store_reg(s
, rd
, tmp
);
8650 tcg_temp_free_i32(tmp
);
8655 case 12: /* Load/store single data item. */
8660 if ((insn
& 0x01100000) == 0x01000000) {
8661 if (disas_neon_ls_insn(env
, s
, insn
))
8665 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8667 if (!(insn
& (1 << 20))) {
8671 /* Byte or halfword load space with dest == r15 : memory hints.
8672 * Catch them early so we don't emit pointless addressing code.
8673 * This space is a mix of:
8674 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8675 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8677 * unallocated hints, which must be treated as NOPs
8678 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8679 * which is easiest for the decoding logic
8680 * Some space which must UNDEF
8682 int op1
= (insn
>> 23) & 3;
8683 int op2
= (insn
>> 6) & 0x3f;
8688 /* UNPREDICTABLE or unallocated hint */
8692 return 0; /* PLD* or unallocated hint */
8694 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
8695 return 0; /* PLD* or unallocated hint */
8697 /* UNDEF space, or an UNPREDICTABLE */
8703 addr
= tcg_temp_new_i32();
8705 /* s->pc has already been incremented by 4. */
8706 imm
= s
->pc
& 0xfffffffc;
8707 if (insn
& (1 << 23))
8708 imm
+= insn
& 0xfff;
8710 imm
-= insn
& 0xfff;
8711 tcg_gen_movi_i32(addr
, imm
);
8713 addr
= load_reg(s
, rn
);
8714 if (insn
& (1 << 23)) {
8715 /* Positive offset. */
8717 tcg_gen_addi_i32(addr
, addr
, imm
);
8720 switch ((insn
>> 8) & 0xf) {
8721 case 0x0: /* Shifted Register. */
8722 shift
= (insn
>> 4) & 0xf;
8724 tcg_temp_free_i32(addr
);
8727 tmp
= load_reg(s
, rm
);
8729 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8730 tcg_gen_add_i32(addr
, addr
, tmp
);
8731 tcg_temp_free_i32(tmp
);
8733 case 0xc: /* Negative offset. */
8734 tcg_gen_addi_i32(addr
, addr
, -imm
);
8736 case 0xe: /* User privilege. */
8737 tcg_gen_addi_i32(addr
, addr
, imm
);
8740 case 0x9: /* Post-decrement. */
8743 case 0xb: /* Post-increment. */
8747 case 0xd: /* Pre-decrement. */
8750 case 0xf: /* Pre-increment. */
8751 tcg_gen_addi_i32(addr
, addr
, imm
);
8755 tcg_temp_free_i32(addr
);
8760 if (insn
& (1 << 20)) {
8763 case 0: tmp
= gen_ld8u(addr
, user
); break;
8764 case 4: tmp
= gen_ld8s(addr
, user
); break;
8765 case 1: tmp
= gen_ld16u(addr
, user
); break;
8766 case 5: tmp
= gen_ld16s(addr
, user
); break;
8767 case 2: tmp
= gen_ld32(addr
, user
); break;
8769 tcg_temp_free_i32(addr
);
8775 store_reg(s
, rs
, tmp
);
8779 tmp
= load_reg(s
, rs
);
8781 case 0: gen_st8(tmp
, addr
, user
); break;
8782 case 1: gen_st16(tmp
, addr
, user
); break;
8783 case 2: gen_st32(tmp
, addr
, user
); break;
8785 tcg_temp_free_i32(addr
);
8790 tcg_gen_addi_i32(addr
, addr
, imm
);
8792 store_reg(s
, rn
, addr
);
8794 tcg_temp_free_i32(addr
);
8806 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
8808 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8815 if (s
->condexec_mask
) {
8816 cond
= s
->condexec_cond
;
8817 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
8818 s
->condlabel
= gen_new_label();
8819 gen_test_cc(cond
^ 1, s
->condlabel
);
8824 insn
= lduw_code(s
->pc
);
8827 switch (insn
>> 12) {
8831 op
= (insn
>> 11) & 3;
8834 rn
= (insn
>> 3) & 7;
8835 tmp
= load_reg(s
, rn
);
8836 if (insn
& (1 << 10)) {
8838 tmp2
= tcg_temp_new_i32();
8839 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
8842 rm
= (insn
>> 6) & 7;
8843 tmp2
= load_reg(s
, rm
);
8845 if (insn
& (1 << 9)) {
8846 if (s
->condexec_mask
)
8847 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8849 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8851 if (s
->condexec_mask
)
8852 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8854 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8856 tcg_temp_free_i32(tmp2
);
8857 store_reg(s
, rd
, tmp
);
8859 /* shift immediate */
8860 rm
= (insn
>> 3) & 7;
8861 shift
= (insn
>> 6) & 0x1f;
8862 tmp
= load_reg(s
, rm
);
8863 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
8864 if (!s
->condexec_mask
)
8866 store_reg(s
, rd
, tmp
);
8870 /* arithmetic large immediate */
8871 op
= (insn
>> 11) & 3;
8872 rd
= (insn
>> 8) & 0x7;
8873 if (op
== 0) { /* mov */
8874 tmp
= tcg_temp_new_i32();
8875 tcg_gen_movi_i32(tmp
, insn
& 0xff);
8876 if (!s
->condexec_mask
)
8878 store_reg(s
, rd
, tmp
);
8880 tmp
= load_reg(s
, rd
);
8881 tmp2
= tcg_temp_new_i32();
8882 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
8885 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8886 tcg_temp_free_i32(tmp
);
8887 tcg_temp_free_i32(tmp2
);
8890 if (s
->condexec_mask
)
8891 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8893 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8894 tcg_temp_free_i32(tmp2
);
8895 store_reg(s
, rd
, tmp
);
8898 if (s
->condexec_mask
)
8899 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8901 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8902 tcg_temp_free_i32(tmp2
);
8903 store_reg(s
, rd
, tmp
);
8909 if (insn
& (1 << 11)) {
8910 rd
= (insn
>> 8) & 7;
8911 /* load pc-relative. Bit 1 of PC is ignored. */
8912 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
8913 val
&= ~(uint32_t)2;
8914 addr
= tcg_temp_new_i32();
8915 tcg_gen_movi_i32(addr
, val
);
8916 tmp
= gen_ld32(addr
, IS_USER(s
));
8917 tcg_temp_free_i32(addr
);
8918 store_reg(s
, rd
, tmp
);
8921 if (insn
& (1 << 10)) {
8922 /* data processing extended or blx */
8923 rd
= (insn
& 7) | ((insn
>> 4) & 8);
8924 rm
= (insn
>> 3) & 0xf;
8925 op
= (insn
>> 8) & 3;
8928 tmp
= load_reg(s
, rd
);
8929 tmp2
= load_reg(s
, rm
);
8930 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8931 tcg_temp_free_i32(tmp2
);
8932 store_reg(s
, rd
, tmp
);
8935 tmp
= load_reg(s
, rd
);
8936 tmp2
= load_reg(s
, rm
);
8937 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8938 tcg_temp_free_i32(tmp2
);
8939 tcg_temp_free_i32(tmp
);
8941 case 2: /* mov/cpy */
8942 tmp
= load_reg(s
, rm
);
8943 store_reg(s
, rd
, tmp
);
8945 case 3:/* branch [and link] exchange thumb register */
8946 tmp
= load_reg(s
, rm
);
8947 if (insn
& (1 << 7)) {
8949 val
= (uint32_t)s
->pc
| 1;
8950 tmp2
= tcg_temp_new_i32();
8951 tcg_gen_movi_i32(tmp2
, val
);
8952 store_reg(s
, 14, tmp2
);
8954 /* already thumb, no need to check */
8961 /* data processing register */
8963 rm
= (insn
>> 3) & 7;
8964 op
= (insn
>> 6) & 0xf;
8965 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
8966 /* the shift/rotate ops want the operands backwards */
8975 if (op
== 9) { /* neg */
8976 tmp
= tcg_temp_new_i32();
8977 tcg_gen_movi_i32(tmp
, 0);
8978 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
8979 tmp
= load_reg(s
, rd
);
8984 tmp2
= load_reg(s
, rm
);
8987 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8988 if (!s
->condexec_mask
)
8992 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8993 if (!s
->condexec_mask
)
8997 if (s
->condexec_mask
) {
8998 gen_helper_shl(tmp2
, tmp2
, tmp
);
9000 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
9005 if (s
->condexec_mask
) {
9006 gen_helper_shr(tmp2
, tmp2
, tmp
);
9008 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
9013 if (s
->condexec_mask
) {
9014 gen_helper_sar(tmp2
, tmp2
, tmp
);
9016 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
9021 if (s
->condexec_mask
)
9024 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
9027 if (s
->condexec_mask
)
9028 gen_sub_carry(tmp
, tmp
, tmp2
);
9030 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
9033 if (s
->condexec_mask
) {
9034 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
9035 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
9037 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
9042 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9047 if (s
->condexec_mask
)
9048 tcg_gen_neg_i32(tmp
, tmp2
);
9050 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9053 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9057 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9061 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9062 if (!s
->condexec_mask
)
9066 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9067 if (!s
->condexec_mask
)
9071 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
9072 if (!s
->condexec_mask
)
9076 tcg_gen_not_i32(tmp2
, tmp2
);
9077 if (!s
->condexec_mask
)
9085 store_reg(s
, rm
, tmp2
);
9087 tcg_temp_free_i32(tmp
);
9089 store_reg(s
, rd
, tmp
);
9090 tcg_temp_free_i32(tmp2
);
9093 tcg_temp_free_i32(tmp
);
9094 tcg_temp_free_i32(tmp2
);
9099 /* load/store register offset. */
9101 rn
= (insn
>> 3) & 7;
9102 rm
= (insn
>> 6) & 7;
9103 op
= (insn
>> 9) & 7;
9104 addr
= load_reg(s
, rn
);
9105 tmp
= load_reg(s
, rm
);
9106 tcg_gen_add_i32(addr
, addr
, tmp
);
9107 tcg_temp_free_i32(tmp
);
9109 if (op
< 3) /* store */
9110 tmp
= load_reg(s
, rd
);
9114 gen_st32(tmp
, addr
, IS_USER(s
));
9117 gen_st16(tmp
, addr
, IS_USER(s
));
9120 gen_st8(tmp
, addr
, IS_USER(s
));
9123 tmp
= gen_ld8s(addr
, IS_USER(s
));
9126 tmp
= gen_ld32(addr
, IS_USER(s
));
9129 tmp
= gen_ld16u(addr
, IS_USER(s
));
9132 tmp
= gen_ld8u(addr
, IS_USER(s
));
9135 tmp
= gen_ld16s(addr
, IS_USER(s
));
9138 if (op
>= 3) /* load */
9139 store_reg(s
, rd
, tmp
);
9140 tcg_temp_free_i32(addr
);
9144 /* load/store word immediate offset */
9146 rn
= (insn
>> 3) & 7;
9147 addr
= load_reg(s
, rn
);
9148 val
= (insn
>> 4) & 0x7c;
9149 tcg_gen_addi_i32(addr
, addr
, val
);
9151 if (insn
& (1 << 11)) {
9153 tmp
= gen_ld32(addr
, IS_USER(s
));
9154 store_reg(s
, rd
, tmp
);
9157 tmp
= load_reg(s
, rd
);
9158 gen_st32(tmp
, addr
, IS_USER(s
));
9160 tcg_temp_free_i32(addr
);
9164 /* load/store byte immediate offset */
9166 rn
= (insn
>> 3) & 7;
9167 addr
= load_reg(s
, rn
);
9168 val
= (insn
>> 6) & 0x1f;
9169 tcg_gen_addi_i32(addr
, addr
, val
);
9171 if (insn
& (1 << 11)) {
9173 tmp
= gen_ld8u(addr
, IS_USER(s
));
9174 store_reg(s
, rd
, tmp
);
9177 tmp
= load_reg(s
, rd
);
9178 gen_st8(tmp
, addr
, IS_USER(s
));
9180 tcg_temp_free_i32(addr
);
9184 /* load/store halfword immediate offset */
9186 rn
= (insn
>> 3) & 7;
9187 addr
= load_reg(s
, rn
);
9188 val
= (insn
>> 5) & 0x3e;
9189 tcg_gen_addi_i32(addr
, addr
, val
);
9191 if (insn
& (1 << 11)) {
9193 tmp
= gen_ld16u(addr
, IS_USER(s
));
9194 store_reg(s
, rd
, tmp
);
9197 tmp
= load_reg(s
, rd
);
9198 gen_st16(tmp
, addr
, IS_USER(s
));
9200 tcg_temp_free_i32(addr
);
9204 /* load/store from stack */
9205 rd
= (insn
>> 8) & 7;
9206 addr
= load_reg(s
, 13);
9207 val
= (insn
& 0xff) * 4;
9208 tcg_gen_addi_i32(addr
, addr
, val
);
9210 if (insn
& (1 << 11)) {
9212 tmp
= gen_ld32(addr
, IS_USER(s
));
9213 store_reg(s
, rd
, tmp
);
9216 tmp
= load_reg(s
, rd
);
9217 gen_st32(tmp
, addr
, IS_USER(s
));
9219 tcg_temp_free_i32(addr
);
9223 /* add to high reg */
9224 rd
= (insn
>> 8) & 7;
9225 if (insn
& (1 << 11)) {
9227 tmp
= load_reg(s
, 13);
9229 /* PC. bit 1 is ignored. */
9230 tmp
= tcg_temp_new_i32();
9231 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
9233 val
= (insn
& 0xff) * 4;
9234 tcg_gen_addi_i32(tmp
, tmp
, val
);
9235 store_reg(s
, rd
, tmp
);
9240 op
= (insn
>> 8) & 0xf;
9243 /* adjust stack pointer */
9244 tmp
= load_reg(s
, 13);
9245 val
= (insn
& 0x7f) * 4;
9246 if (insn
& (1 << 7))
9247 val
= -(int32_t)val
;
9248 tcg_gen_addi_i32(tmp
, tmp
, val
);
9249 store_reg(s
, 13, tmp
);
9252 case 2: /* sign/zero extend. */
9255 rm
= (insn
>> 3) & 7;
9256 tmp
= load_reg(s
, rm
);
9257 switch ((insn
>> 6) & 3) {
9258 case 0: gen_sxth(tmp
); break;
9259 case 1: gen_sxtb(tmp
); break;
9260 case 2: gen_uxth(tmp
); break;
9261 case 3: gen_uxtb(tmp
); break;
9263 store_reg(s
, rd
, tmp
);
9265 case 4: case 5: case 0xc: case 0xd:
9267 addr
= load_reg(s
, 13);
9268 if (insn
& (1 << 8))
9272 for (i
= 0; i
< 8; i
++) {
9273 if (insn
& (1 << i
))
9276 if ((insn
& (1 << 11)) == 0) {
9277 tcg_gen_addi_i32(addr
, addr
, -offset
);
9279 for (i
= 0; i
< 8; i
++) {
9280 if (insn
& (1 << i
)) {
9281 if (insn
& (1 << 11)) {
9283 tmp
= gen_ld32(addr
, IS_USER(s
));
9284 store_reg(s
, i
, tmp
);
9287 tmp
= load_reg(s
, i
);
9288 gen_st32(tmp
, addr
, IS_USER(s
));
9290 /* advance to the next address. */
9291 tcg_gen_addi_i32(addr
, addr
, 4);
9295 if (insn
& (1 << 8)) {
9296 if (insn
& (1 << 11)) {
9298 tmp
= gen_ld32(addr
, IS_USER(s
));
9299 /* don't set the pc until the rest of the instruction
9303 tmp
= load_reg(s
, 14);
9304 gen_st32(tmp
, addr
, IS_USER(s
));
9306 tcg_gen_addi_i32(addr
, addr
, 4);
9308 if ((insn
& (1 << 11)) == 0) {
9309 tcg_gen_addi_i32(addr
, addr
, -offset
);
9311 /* write back the new stack pointer */
9312 store_reg(s
, 13, addr
);
9313 /* set the new PC value */
9314 if ((insn
& 0x0900) == 0x0900) {
9315 store_reg_from_load(env
, s
, 15, tmp
);
9319 case 1: case 3: case 9: case 11: /* czb */
9321 tmp
= load_reg(s
, rm
);
9322 s
->condlabel
= gen_new_label();
9324 if (insn
& (1 << 11))
9325 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9327 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9328 tcg_temp_free_i32(tmp
);
9329 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9330 val
= (uint32_t)s
->pc
+ 2;
9335 case 15: /* IT, nop-hint. */
9336 if ((insn
& 0xf) == 0) {
9337 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9341 s
->condexec_cond
= (insn
>> 4) & 0xe;
9342 s
->condexec_mask
= insn
& 0x1f;
9343 /* No actual code generated for this insn, just setup state. */
9346 case 0xe: /* bkpt */
9348 gen_exception_insn(s
, 2, EXCP_BKPT
);
9353 rn
= (insn
>> 3) & 0x7;
9355 tmp
= load_reg(s
, rn
);
9356 switch ((insn
>> 6) & 3) {
9357 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9358 case 1: gen_rev16(tmp
); break;
9359 case 3: gen_revsh(tmp
); break;
9360 default: goto illegal_op
;
9362 store_reg(s
, rd
, tmp
);
9370 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9373 addr
= tcg_const_i32(16);
9374 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9375 tcg_temp_free_i32(addr
);
9379 addr
= tcg_const_i32(17);
9380 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9381 tcg_temp_free_i32(addr
);
9383 tcg_temp_free_i32(tmp
);
9386 if (insn
& (1 << 4))
9387 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9390 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9400 /* load/store multiple */
9401 rn
= (insn
>> 8) & 0x7;
9402 addr
= load_reg(s
, rn
);
9403 for (i
= 0; i
< 8; i
++) {
9404 if (insn
& (1 << i
)) {
9405 if (insn
& (1 << 11)) {
9407 tmp
= gen_ld32(addr
, IS_USER(s
));
9408 store_reg(s
, i
, tmp
);
9411 tmp
= load_reg(s
, i
);
9412 gen_st32(tmp
, addr
, IS_USER(s
));
9414 /* advance to the next address */
9415 tcg_gen_addi_i32(addr
, addr
, 4);
9418 /* Base register writeback. */
9419 if ((insn
& (1 << rn
)) == 0) {
9420 store_reg(s
, rn
, addr
);
9422 tcg_temp_free_i32(addr
);
9427 /* conditional branch or swi */
9428 cond
= (insn
>> 8) & 0xf;
9434 gen_set_pc_im(s
->pc
);
9435 s
->is_jmp
= DISAS_SWI
;
9438 /* generate a conditional jump to next instruction */
9439 s
->condlabel
= gen_new_label();
9440 gen_test_cc(cond
^ 1, s
->condlabel
);
9443 /* jump to the offset */
9444 val
= (uint32_t)s
->pc
+ 2;
9445 offset
= ((int32_t)insn
<< 24) >> 24;
9451 if (insn
& (1 << 11)) {
9452 if (disas_thumb2_insn(env
, s
, insn
))
9456 /* unconditional branch */
9457 val
= (uint32_t)s
->pc
;
9458 offset
= ((int32_t)insn
<< 21) >> 21;
9459 val
+= (offset
<< 1) + 2;
9464 if (disas_thumb2_insn(env
, s
, insn
))
9470 gen_exception_insn(s
, 4, EXCP_UDEF
);
9474 gen_exception_insn(s
, 2, EXCP_UDEF
);
9477 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9478 basic block 'tb'. If search_pc is TRUE, also generate PC
9479 information for each intermediate instruction. */
9480 static inline void gen_intermediate_code_internal(CPUState
*env
,
9481 TranslationBlock
*tb
,
9484 DisasContext dc1
, *dc
= &dc1
;
9486 uint16_t *gen_opc_end
;
9488 target_ulong pc_start
;
9489 uint32_t next_page_start
;
9493 /* generate intermediate code */
9498 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9500 dc
->is_jmp
= DISAS_NEXT
;
9502 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9504 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9505 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9506 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9507 #if !defined(CONFIG_USER_ONLY)
9508 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9510 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9511 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9512 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9513 cpu_F0s
= tcg_temp_new_i32();
9514 cpu_F1s
= tcg_temp_new_i32();
9515 cpu_F0d
= tcg_temp_new_i64();
9516 cpu_F1d
= tcg_temp_new_i64();
9519 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9520 cpu_M0
= tcg_temp_new_i64();
9521 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9524 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9526 max_insns
= CF_COUNT_MASK
;
9530 tcg_clear_temp_count();
9532 /* A note on handling of the condexec (IT) bits:
9534 * We want to avoid the overhead of having to write the updated condexec
9535 * bits back to the CPUState for every instruction in an IT block. So:
9536 * (1) if the condexec bits are not already zero then we write
9537 * zero back into the CPUState now. This avoids complications trying
9538 * to do it at the end of the block. (For example if we don't do this
9539 * it's hard to identify whether we can safely skip writing condexec
9540 * at the end of the TB, which we definitely want to do for the case
9541 * where a TB doesn't do anything with the IT state at all.)
9542 * (2) if we are going to leave the TB then we call gen_set_condexec()
9543 * which will write the correct value into CPUState if zero is wrong.
9544 * This is done both for leaving the TB at the end, and for leaving
9545 * it because of an exception we know will happen, which is done in
9546 * gen_exception_insn(). The latter is necessary because we need to
9547 * leave the TB with the PC/IT state just prior to execution of the
9548 * instruction which caused the exception.
9549 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9550 * then the CPUState will be wrong and we need to reset it.
9551 * This is handled in the same way as restoration of the
9552 * PC in these situations: we will be called again with search_pc=1
9553 * and generate a mapping of the condexec bits for each PC in
9554 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9555 * the condexec bits.
9557 * Note that there are no instructions which can read the condexec
9558 * bits, and none which can write non-static values to them, so
9559 * we don't need to care about whether CPUState is correct in the
9563 /* Reset the conditional execution bits immediately. This avoids
9564 complications trying to do it at the end of the block. */
9565 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9567 TCGv tmp
= tcg_temp_new_i32();
9568 tcg_gen_movi_i32(tmp
, 0);
9569 store_cpu_field(tmp
, condexec_bits
);
9572 #ifdef CONFIG_USER_ONLY
9573 /* Intercept jump to the magic kernel page. */
9574 if (dc
->pc
>= 0xffff0000) {
9575 /* We always get here via a jump, so know we are not in a
9576 conditional execution block. */
9577 gen_exception(EXCP_KERNEL_TRAP
);
9578 dc
->is_jmp
= DISAS_UPDATE
;
9582 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9583 /* We always get here via a jump, so know we are not in a
9584 conditional execution block. */
9585 gen_exception(EXCP_EXCEPTION_EXIT
);
9586 dc
->is_jmp
= DISAS_UPDATE
;
9591 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9592 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9593 if (bp
->pc
== dc
->pc
) {
9594 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9595 /* Advance PC so that clearing the breakpoint will
9596 invalidate this TB. */
9598 goto done_generating
;
9604 j
= gen_opc_ptr
- gen_opc_buf
;
9608 gen_opc_instr_start
[lj
++] = 0;
9610 gen_opc_pc
[lj
] = dc
->pc
;
9611 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9612 gen_opc_instr_start
[lj
] = 1;
9613 gen_opc_icount
[lj
] = num_insns
;
9616 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9619 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
9620 tcg_gen_debug_insn_start(dc
->pc
);
9624 disas_thumb_insn(env
, dc
);
9625 if (dc
->condexec_mask
) {
9626 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9627 | ((dc
->condexec_mask
>> 4) & 1);
9628 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9629 if (dc
->condexec_mask
== 0) {
9630 dc
->condexec_cond
= 0;
9634 disas_arm_insn(env
, dc
);
9637 if (dc
->condjmp
&& !dc
->is_jmp
) {
9638 gen_set_label(dc
->condlabel
);
9642 if (tcg_check_temp_count()) {
9643 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
9646 /* Translation stops when a conditional branch is encountered.
9647 * Otherwise the subsequent code could get translated several times.
9648 * Also stop translation when a page boundary is reached. This
9649 * ensures prefetch aborts occur at the right place. */
9651 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9652 !env
->singlestep_enabled
&&
9654 dc
->pc
< next_page_start
&&
9655 num_insns
< max_insns
);
9657 if (tb
->cflags
& CF_LAST_IO
) {
9659 /* FIXME: This can theoretically happen with self-modifying
9661 cpu_abort(env
, "IO on conditional branch instruction");
9666 /* At this stage dc->condjmp will only be set when the skipped
9667 instruction was a conditional branch or trap, and the PC has
9668 already been written. */
9669 if (unlikely(env
->singlestep_enabled
)) {
9670 /* Make sure the pc is updated, and raise a debug exception. */
9672 gen_set_condexec(dc
);
9673 if (dc
->is_jmp
== DISAS_SWI
) {
9674 gen_exception(EXCP_SWI
);
9676 gen_exception(EXCP_DEBUG
);
9678 gen_set_label(dc
->condlabel
);
9680 if (dc
->condjmp
|| !dc
->is_jmp
) {
9681 gen_set_pc_im(dc
->pc
);
9684 gen_set_condexec(dc
);
9685 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9686 gen_exception(EXCP_SWI
);
9688 /* FIXME: Single stepping a WFI insn will not halt
9690 gen_exception(EXCP_DEBUG
);
9693 /* While branches must always occur at the end of an IT block,
9694 there are a few other things that can cause us to terminate
9695 the TB in the middel of an IT block:
9696 - Exception generating instructions (bkpt, swi, undefined).
9698 - Hardware watchpoints.
9699 Hardware breakpoints have already been handled and skip this code.
9701 gen_set_condexec(dc
);
9702 switch(dc
->is_jmp
) {
9704 gen_goto_tb(dc
, 1, dc
->pc
);
9709 /* indicate that the hash table must be used to find the next TB */
9713 /* nothing more to generate */
9719 gen_exception(EXCP_SWI
);
9723 gen_set_label(dc
->condlabel
);
9724 gen_set_condexec(dc
);
9725 gen_goto_tb(dc
, 1, dc
->pc
);
9731 gen_icount_end(tb
, num_insns
);
9732 *gen_opc_ptr
= INDEX_op_end
;
9735 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9736 qemu_log("----------------\n");
9737 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9738 log_target_disas(pc_start
, dc
->pc
- pc_start
, dc
->thumb
);
9743 j
= gen_opc_ptr
- gen_opc_buf
;
9746 gen_opc_instr_start
[lj
++] = 0;
9748 tb
->size
= dc
->pc
- pc_start
;
9749 tb
->icount
= num_insns
;
9753 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
9755 gen_intermediate_code_internal(env
, tb
, 0);
9758 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
9760 gen_intermediate_code_internal(env
, tb
, 1);
9763 static const char *cpu_mode_names
[16] = {
9764 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9765 "???", "???", "???", "und", "???", "???", "???", "sys"
9768 void cpu_dump_state(CPUState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
9778 /* ??? This assumes float64 and double have the same layout.
9779 Oh well, it's only debug dumps. */
9788 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
9790 cpu_fprintf(f
, "\n");
9792 cpu_fprintf(f
, " ");
9794 psr
= cpsr_read(env
);
9795 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
9797 psr
& (1 << 31) ? 'N' : '-',
9798 psr
& (1 << 30) ? 'Z' : '-',
9799 psr
& (1 << 29) ? 'C' : '-',
9800 psr
& (1 << 28) ? 'V' : '-',
9801 psr
& CPSR_T
? 'T' : 'A',
9802 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
9805 for (i
= 0; i
< 16; i
++) {
9806 d
.d
= env
->vfp
.regs
[i
];
9810 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9811 i
* 2, (int)s0
.i
, s0
.s
,
9812 i
* 2 + 1, (int)s1
.i
, s1
.s
,
9813 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
9816 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
9820 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
9821 unsigned long searched_pc
, int pc_pos
, void *puc
)
9823 env
->regs
[15] = gen_opc_pc
[pc_pos
];
9824 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];