4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
45 /* internal defines */
46 typedef struct DisasContext
{
49 /* Nonzero if this instruction has been conditionally skipped. */
51 /* The label that will be jumped to when the instruction is skipped. */
53 /* Thumb-2 condtional execution bits. */
56 struct TranslationBlock
*tb
;
57 int singlestep_enabled
;
59 #if !defined(CONFIG_USER_ONLY)
67 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
69 #if defined(CONFIG_USER_ONLY)
72 #define IS_USER(s) (s->user)
75 /* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
80 static TCGv_ptr cpu_env
;
81 /* We reuse the same 64-bit temporaries for efficiency. */
82 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
83 static TCGv_i32 cpu_R
[16];
84 static TCGv_i32 cpu_exclusive_addr
;
85 static TCGv_i32 cpu_exclusive_val
;
86 static TCGv_i32 cpu_exclusive_high
;
87 #ifdef CONFIG_USER_ONLY
88 static TCGv_i32 cpu_exclusive_test
;
89 static TCGv_i32 cpu_exclusive_info
;
92 /* FIXME: These should be removed. */
93 static TCGv cpu_F0s
, cpu_F1s
;
94 static TCGv_i64 cpu_F0d
, cpu_F1d
;
96 #include "gen-icount.h"
98 static const char *regnames
[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
102 /* initialize TCG globals. */
103 void arm_translate_init(void)
107 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
109 for (i
= 0; i
< 16; i
++) {
110 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
111 offsetof(CPUState
, regs
[i
]),
114 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUState
, exclusive_addr
), "exclusive_addr");
116 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUState
, exclusive_val
), "exclusive_val");
118 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUState
, exclusive_high
), "exclusive_high");
120 #ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
122 offsetof(CPUState
, exclusive_test
), "exclusive_test");
123 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
124 offsetof(CPUState
, exclusive_info
), "exclusive_info");
131 static inline TCGv
load_cpu_offset(int offset
)
133 TCGv tmp
= tcg_temp_new_i32();
134 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
138 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
140 static inline void store_cpu_offset(TCGv var
, int offset
)
142 tcg_gen_st_i32(var
, cpu_env
, offset
);
143 tcg_temp_free_i32(var
);
146 #define store_cpu_field(var, name) \
147 store_cpu_offset(var, offsetof(CPUState, name))
149 /* Set a variable to the value of a CPU register. */
150 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
154 /* normaly, since we updated PC, we need only to add one insn */
156 addr
= (long)s
->pc
+ 2;
158 addr
= (long)s
->pc
+ 4;
159 tcg_gen_movi_i32(var
, addr
);
161 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
165 /* Create a new temporary and set it to the value of a CPU register. */
166 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
168 TCGv tmp
= tcg_temp_new_i32();
169 load_reg_var(s
, tmp
, reg
);
173 /* Set a CPU register. The source must be a temporary and will be
175 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
178 tcg_gen_andi_i32(var
, var
, ~1);
179 s
->is_jmp
= DISAS_JUMP
;
181 tcg_gen_mov_i32(cpu_R
[reg
], var
);
182 tcg_temp_free_i32(var
);
185 /* Value extensions. */
186 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
187 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
188 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
189 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
191 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
192 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
195 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
197 TCGv tmp_mask
= tcg_const_i32(mask
);
198 gen_helper_cpsr_write(var
, tmp_mask
);
199 tcg_temp_free_i32(tmp_mask
);
201 /* Set NZCV flags from the high 4 bits of var. */
202 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
204 static void gen_exception(int excp
)
206 TCGv tmp
= tcg_temp_new_i32();
207 tcg_gen_movi_i32(tmp
, excp
);
208 gen_helper_exception(tmp
);
209 tcg_temp_free_i32(tmp
);
212 static void gen_smul_dual(TCGv a
, TCGv b
)
214 TCGv tmp1
= tcg_temp_new_i32();
215 TCGv tmp2
= tcg_temp_new_i32();
216 tcg_gen_ext16s_i32(tmp1
, a
);
217 tcg_gen_ext16s_i32(tmp2
, b
);
218 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
219 tcg_temp_free_i32(tmp2
);
220 tcg_gen_sari_i32(a
, a
, 16);
221 tcg_gen_sari_i32(b
, b
, 16);
222 tcg_gen_mul_i32(b
, b
, a
);
223 tcg_gen_mov_i32(a
, tmp1
);
224 tcg_temp_free_i32(tmp1
);
227 /* Byteswap each halfword. */
228 static void gen_rev16(TCGv var
)
230 TCGv tmp
= tcg_temp_new_i32();
231 tcg_gen_shri_i32(tmp
, var
, 8);
232 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
233 tcg_gen_shli_i32(var
, var
, 8);
234 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
235 tcg_gen_or_i32(var
, var
, tmp
);
236 tcg_temp_free_i32(tmp
);
239 /* Byteswap low halfword and sign extend. */
240 static void gen_revsh(TCGv var
)
242 tcg_gen_ext16u_i32(var
, var
);
243 tcg_gen_bswap16_i32(var
, var
);
244 tcg_gen_ext16s_i32(var
, var
);
247 /* Unsigned bitfield extract. */
248 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
251 tcg_gen_shri_i32(var
, var
, shift
);
252 tcg_gen_andi_i32(var
, var
, mask
);
255 /* Signed bitfield extract. */
256 static void gen_sbfx(TCGv var
, int shift
, int width
)
261 tcg_gen_sari_i32(var
, var
, shift
);
262 if (shift
+ width
< 32) {
263 signbit
= 1u << (width
- 1);
264 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
265 tcg_gen_xori_i32(var
, var
, signbit
);
266 tcg_gen_subi_i32(var
, var
, signbit
);
270 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
271 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
273 tcg_gen_andi_i32(val
, val
, mask
);
274 tcg_gen_shli_i32(val
, val
, shift
);
275 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
276 tcg_gen_or_i32(dest
, base
, val
);
279 /* Return (b << 32) + a. Mark inputs as dead */
280 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
282 TCGv_i64 tmp64
= tcg_temp_new_i64();
284 tcg_gen_extu_i32_i64(tmp64
, b
);
285 tcg_temp_free_i32(b
);
286 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
287 tcg_gen_add_i64(a
, tmp64
, a
);
289 tcg_temp_free_i64(tmp64
);
293 /* Return (b << 32) - a. Mark inputs as dead. */
294 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
296 TCGv_i64 tmp64
= tcg_temp_new_i64();
298 tcg_gen_extu_i32_i64(tmp64
, b
);
299 tcg_temp_free_i32(b
);
300 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
301 tcg_gen_sub_i64(a
, tmp64
, a
);
303 tcg_temp_free_i64(tmp64
);
307 /* FIXME: Most targets have native widening multiplication.
308 It would be good to use that instead of a full wide multiply. */
309 /* 32x32->64 multiply. Marks inputs as dead. */
310 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
312 TCGv_i64 tmp1
= tcg_temp_new_i64();
313 TCGv_i64 tmp2
= tcg_temp_new_i64();
315 tcg_gen_extu_i32_i64(tmp1
, a
);
316 tcg_temp_free_i32(a
);
317 tcg_gen_extu_i32_i64(tmp2
, b
);
318 tcg_temp_free_i32(b
);
319 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
320 tcg_temp_free_i64(tmp2
);
324 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
326 TCGv_i64 tmp1
= tcg_temp_new_i64();
327 TCGv_i64 tmp2
= tcg_temp_new_i64();
329 tcg_gen_ext_i32_i64(tmp1
, a
);
330 tcg_temp_free_i32(a
);
331 tcg_gen_ext_i32_i64(tmp2
, b
);
332 tcg_temp_free_i32(b
);
333 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
334 tcg_temp_free_i64(tmp2
);
338 /* Swap low and high halfwords. */
339 static void gen_swap_half(TCGv var
)
341 TCGv tmp
= tcg_temp_new_i32();
342 tcg_gen_shri_i32(tmp
, var
, 16);
343 tcg_gen_shli_i32(var
, var
, 16);
344 tcg_gen_or_i32(var
, var
, tmp
);
345 tcg_temp_free_i32(tmp
);
348 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
349 tmp = (t0 ^ t1) & 0x8000;
352 t0 = (t0 + t1) ^ tmp;
355 static void gen_add16(TCGv t0
, TCGv t1
)
357 TCGv tmp
= tcg_temp_new_i32();
358 tcg_gen_xor_i32(tmp
, t0
, t1
);
359 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
360 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
361 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
362 tcg_gen_add_i32(t0
, t0
, t1
);
363 tcg_gen_xor_i32(t0
, t0
, tmp
);
364 tcg_temp_free_i32(tmp
);
365 tcg_temp_free_i32(t1
);
368 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
370 /* Set CF to the top bit of var. */
371 static void gen_set_CF_bit31(TCGv var
)
373 TCGv tmp
= tcg_temp_new_i32();
374 tcg_gen_shri_i32(tmp
, var
, 31);
376 tcg_temp_free_i32(tmp
);
379 /* Set N and Z flags from var. */
380 static inline void gen_logic_CC(TCGv var
)
382 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
383 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
387 static void gen_adc(TCGv t0
, TCGv t1
)
390 tcg_gen_add_i32(t0
, t0
, t1
);
391 tmp
= load_cpu_field(CF
);
392 tcg_gen_add_i32(t0
, t0
, tmp
);
393 tcg_temp_free_i32(tmp
);
396 /* dest = T0 + T1 + CF. */
397 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
400 tcg_gen_add_i32(dest
, t0
, t1
);
401 tmp
= load_cpu_field(CF
);
402 tcg_gen_add_i32(dest
, dest
, tmp
);
403 tcg_temp_free_i32(tmp
);
406 /* dest = T0 - T1 + CF - 1. */
407 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
410 tcg_gen_sub_i32(dest
, t0
, t1
);
411 tmp
= load_cpu_field(CF
);
412 tcg_gen_add_i32(dest
, dest
, tmp
);
413 tcg_gen_subi_i32(dest
, dest
, 1);
414 tcg_temp_free_i32(tmp
);
417 /* FIXME: Implement this natively. */
418 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
420 static void shifter_out_im(TCGv var
, int shift
)
422 TCGv tmp
= tcg_temp_new_i32();
424 tcg_gen_andi_i32(tmp
, var
, 1);
426 tcg_gen_shri_i32(tmp
, var
, shift
);
428 tcg_gen_andi_i32(tmp
, tmp
, 1);
431 tcg_temp_free_i32(tmp
);
434 /* Shift by immediate. Includes special handling for shift == 0. */
435 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
441 shifter_out_im(var
, 32 - shift
);
442 tcg_gen_shli_i32(var
, var
, shift
);
448 tcg_gen_shri_i32(var
, var
, 31);
451 tcg_gen_movi_i32(var
, 0);
454 shifter_out_im(var
, shift
- 1);
455 tcg_gen_shri_i32(var
, var
, shift
);
462 shifter_out_im(var
, shift
- 1);
465 tcg_gen_sari_i32(var
, var
, shift
);
467 case 3: /* ROR/RRX */
470 shifter_out_im(var
, shift
- 1);
471 tcg_gen_rotri_i32(var
, var
, shift
); break;
473 TCGv tmp
= load_cpu_field(CF
);
475 shifter_out_im(var
, 0);
476 tcg_gen_shri_i32(var
, var
, 1);
477 tcg_gen_shli_i32(tmp
, tmp
, 31);
478 tcg_gen_or_i32(var
, var
, tmp
);
479 tcg_temp_free_i32(tmp
);
484 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
485 TCGv shift
, int flags
)
489 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
490 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
491 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
492 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
496 case 0: gen_helper_shl(var
, var
, shift
); break;
497 case 1: gen_helper_shr(var
, var
, shift
); break;
498 case 2: gen_helper_sar(var
, var
, shift
); break;
499 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
500 tcg_gen_rotr_i32(var
, var
, shift
); break;
503 tcg_temp_free_i32(shift
);
506 #define PAS_OP(pfx) \
508 case 0: gen_pas_helper(glue(pfx,add16)); break; \
509 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
510 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
511 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
512 case 4: gen_pas_helper(glue(pfx,add8)); break; \
513 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
515 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
520 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
522 tmp
= tcg_temp_new_ptr();
523 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
525 tcg_temp_free_ptr(tmp
);
528 tmp
= tcg_temp_new_ptr();
529 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
531 tcg_temp_free_ptr(tmp
);
533 #undef gen_pas_helper
534 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
547 #undef gen_pas_helper
552 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
553 #define PAS_OP(pfx) \
555 case 0: gen_pas_helper(glue(pfx,add8)); break; \
556 case 1: gen_pas_helper(glue(pfx,add16)); break; \
557 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
558 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
559 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
560 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
562 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
567 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
569 tmp
= tcg_temp_new_ptr();
570 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
572 tcg_temp_free_ptr(tmp
);
575 tmp
= tcg_temp_new_ptr();
576 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
578 tcg_temp_free_ptr(tmp
);
580 #undef gen_pas_helper
581 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
594 #undef gen_pas_helper
599 static void gen_test_cc(int cc
, int label
)
607 tmp
= load_cpu_field(ZF
);
608 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
611 tmp
= load_cpu_field(ZF
);
612 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
615 tmp
= load_cpu_field(CF
);
616 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
619 tmp
= load_cpu_field(CF
);
620 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
623 tmp
= load_cpu_field(NF
);
624 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
627 tmp
= load_cpu_field(NF
);
628 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
631 tmp
= load_cpu_field(VF
);
632 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
635 tmp
= load_cpu_field(VF
);
636 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
638 case 8: /* hi: C && !Z */
639 inv
= gen_new_label();
640 tmp
= load_cpu_field(CF
);
641 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
642 tcg_temp_free_i32(tmp
);
643 tmp
= load_cpu_field(ZF
);
644 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
647 case 9: /* ls: !C || Z */
648 tmp
= load_cpu_field(CF
);
649 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
650 tcg_temp_free_i32(tmp
);
651 tmp
= load_cpu_field(ZF
);
652 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
654 case 10: /* ge: N == V -> N ^ V == 0 */
655 tmp
= load_cpu_field(VF
);
656 tmp2
= load_cpu_field(NF
);
657 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
658 tcg_temp_free_i32(tmp2
);
659 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
661 case 11: /* lt: N != V -> N ^ V != 0 */
662 tmp
= load_cpu_field(VF
);
663 tmp2
= load_cpu_field(NF
);
664 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
665 tcg_temp_free_i32(tmp2
);
666 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
668 case 12: /* gt: !Z && N == V */
669 inv
= gen_new_label();
670 tmp
= load_cpu_field(ZF
);
671 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
672 tcg_temp_free_i32(tmp
);
673 tmp
= load_cpu_field(VF
);
674 tmp2
= load_cpu_field(NF
);
675 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
676 tcg_temp_free_i32(tmp2
);
677 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
680 case 13: /* le: Z || N != V */
681 tmp
= load_cpu_field(ZF
);
682 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
683 tcg_temp_free_i32(tmp
);
684 tmp
= load_cpu_field(VF
);
685 tmp2
= load_cpu_field(NF
);
686 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
687 tcg_temp_free_i32(tmp2
);
688 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
691 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
694 tcg_temp_free_i32(tmp
);
697 static const uint8_t table_logic_cc
[16] = {
716 /* Set PC and Thumb state from an immediate address. */
717 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
721 s
->is_jmp
= DISAS_UPDATE
;
722 if (s
->thumb
!= (addr
& 1)) {
723 tmp
= tcg_temp_new_i32();
724 tcg_gen_movi_i32(tmp
, addr
& 1);
725 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
726 tcg_temp_free_i32(tmp
);
728 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
731 /* Set PC and Thumb state from var. var is marked as dead. */
732 static inline void gen_bx(DisasContext
*s
, TCGv var
)
734 s
->is_jmp
= DISAS_UPDATE
;
735 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
736 tcg_gen_andi_i32(var
, var
, 1);
737 store_cpu_field(var
, thumb
);
740 /* Variant of store_reg which uses branch&exchange logic when storing
741 to r15 in ARM architecture v7 and above. The source must be a temporary
742 and will be marked as dead. */
743 static inline void store_reg_bx(CPUState
*env
, DisasContext
*s
,
746 if (reg
== 15 && ENABLE_ARCH_7
) {
749 store_reg(s
, reg
, var
);
753 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
755 TCGv tmp
= tcg_temp_new_i32();
756 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
759 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
761 TCGv tmp
= tcg_temp_new_i32();
762 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
765 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
767 TCGv tmp
= tcg_temp_new_i32();
768 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
771 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
773 TCGv tmp
= tcg_temp_new_i32();
774 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
777 static inline TCGv
gen_ld32(TCGv addr
, int index
)
779 TCGv tmp
= tcg_temp_new_i32();
780 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
783 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
785 TCGv_i64 tmp
= tcg_temp_new_i64();
786 tcg_gen_qemu_ld64(tmp
, addr
, index
);
789 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
791 tcg_gen_qemu_st8(val
, addr
, index
);
792 tcg_temp_free_i32(val
);
794 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
796 tcg_gen_qemu_st16(val
, addr
, index
);
797 tcg_temp_free_i32(val
);
799 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
801 tcg_gen_qemu_st32(val
, addr
, index
);
802 tcg_temp_free_i32(val
);
804 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
806 tcg_gen_qemu_st64(val
, addr
, index
);
807 tcg_temp_free_i64(val
);
810 static inline void gen_set_pc_im(uint32_t val
)
812 tcg_gen_movi_i32(cpu_R
[15], val
);
815 /* Force a TB lookup after an instruction that changes the CPU state. */
816 static inline void gen_lookup_tb(DisasContext
*s
)
818 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
819 s
->is_jmp
= DISAS_UPDATE
;
822 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
825 int val
, rm
, shift
, shiftop
;
828 if (!(insn
& (1 << 25))) {
831 if (!(insn
& (1 << 23)))
834 tcg_gen_addi_i32(var
, var
, val
);
838 shift
= (insn
>> 7) & 0x1f;
839 shiftop
= (insn
>> 5) & 3;
840 offset
= load_reg(s
, rm
);
841 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
842 if (!(insn
& (1 << 23)))
843 tcg_gen_sub_i32(var
, var
, offset
);
845 tcg_gen_add_i32(var
, var
, offset
);
846 tcg_temp_free_i32(offset
);
850 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
856 if (insn
& (1 << 22)) {
858 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
859 if (!(insn
& (1 << 23)))
863 tcg_gen_addi_i32(var
, var
, val
);
867 tcg_gen_addi_i32(var
, var
, extra
);
869 offset
= load_reg(s
, rm
);
870 if (!(insn
& (1 << 23)))
871 tcg_gen_sub_i32(var
, var
, offset
);
873 tcg_gen_add_i32(var
, var
, offset
);
874 tcg_temp_free_i32(offset
);
878 #define VFP_OP2(name) \
879 static inline void gen_vfp_##name(int dp) \
882 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
884 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
894 static inline void gen_vfp_abs(int dp
)
897 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
899 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
902 static inline void gen_vfp_neg(int dp
)
905 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
907 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
910 static inline void gen_vfp_sqrt(int dp
)
913 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
915 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
918 static inline void gen_vfp_cmp(int dp
)
921 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
923 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
926 static inline void gen_vfp_cmpe(int dp
)
929 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
931 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
934 static inline void gen_vfp_F1_ld0(int dp
)
937 tcg_gen_movi_i64(cpu_F1d
, 0);
939 tcg_gen_movi_i32(cpu_F1s
, 0);
942 static inline void gen_vfp_uito(int dp
)
945 gen_helper_vfp_uitod(cpu_F0d
, cpu_F0s
, cpu_env
);
947 gen_helper_vfp_uitos(cpu_F0s
, cpu_F0s
, cpu_env
);
950 static inline void gen_vfp_sito(int dp
)
953 gen_helper_vfp_sitod(cpu_F0d
, cpu_F0s
, cpu_env
);
955 gen_helper_vfp_sitos(cpu_F0s
, cpu_F0s
, cpu_env
);
958 static inline void gen_vfp_toui(int dp
)
961 gen_helper_vfp_touid(cpu_F0s
, cpu_F0d
, cpu_env
);
963 gen_helper_vfp_touis(cpu_F0s
, cpu_F0s
, cpu_env
);
966 static inline void gen_vfp_touiz(int dp
)
969 gen_helper_vfp_touizd(cpu_F0s
, cpu_F0d
, cpu_env
);
971 gen_helper_vfp_touizs(cpu_F0s
, cpu_F0s
, cpu_env
);
974 static inline void gen_vfp_tosi(int dp
)
977 gen_helper_vfp_tosid(cpu_F0s
, cpu_F0d
, cpu_env
);
979 gen_helper_vfp_tosis(cpu_F0s
, cpu_F0s
, cpu_env
);
982 static inline void gen_vfp_tosiz(int dp
)
985 gen_helper_vfp_tosizd(cpu_F0s
, cpu_F0d
, cpu_env
);
987 gen_helper_vfp_tosizs(cpu_F0s
, cpu_F0s
, cpu_env
);
990 #define VFP_GEN_FIX(name) \
991 static inline void gen_vfp_##name(int dp, int shift) \
993 TCGv tmp_shift = tcg_const_i32(shift); \
995 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
997 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
998 tcg_temp_free_i32(tmp_shift); \
1010 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1013 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1015 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1018 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1021 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1023 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1027 vfp_reg_offset (int dp
, int reg
)
1030 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1032 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1033 + offsetof(CPU_DoubleU
, l
.upper
);
1035 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1036 + offsetof(CPU_DoubleU
, l
.lower
);
1040 /* Return the offset of a 32-bit piece of a NEON register.
1041 zero is the least significant end of the register. */
1043 neon_reg_offset (int reg
, int n
)
1047 return vfp_reg_offset(0, sreg
);
1050 static TCGv
neon_load_reg(int reg
, int pass
)
1052 TCGv tmp
= tcg_temp_new_i32();
1053 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1057 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1059 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1060 tcg_temp_free_i32(var
);
1063 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1065 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1068 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1070 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1073 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1074 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1075 #define tcg_gen_st_f32 tcg_gen_st_i32
1076 #define tcg_gen_st_f64 tcg_gen_st_i64
1078 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1081 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1083 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1086 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1089 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1091 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1094 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1097 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1099 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1102 #define ARM_CP_RW_BIT (1 << 20)
1104 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1106 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1109 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1111 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1114 static inline TCGv
iwmmxt_load_creg(int reg
)
1116 TCGv var
= tcg_temp_new_i32();
1117 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1121 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1123 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1124 tcg_temp_free_i32(var
);
1127 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1129 iwmmxt_store_reg(cpu_M0
, rn
);
1132 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1134 iwmmxt_load_reg(cpu_M0
, rn
);
1137 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1139 iwmmxt_load_reg(cpu_V1
, rn
);
1140 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1143 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1145 iwmmxt_load_reg(cpu_V1
, rn
);
1146 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1149 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1151 iwmmxt_load_reg(cpu_V1
, rn
);
1152 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1155 #define IWMMXT_OP(name) \
1156 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1158 iwmmxt_load_reg(cpu_V1, rn); \
1159 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1162 #define IWMMXT_OP_ENV(name) \
1163 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1165 iwmmxt_load_reg(cpu_V1, rn); \
1166 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1169 #define IWMMXT_OP_ENV_SIZE(name) \
1170 IWMMXT_OP_ENV(name##b) \
1171 IWMMXT_OP_ENV(name##w) \
1172 IWMMXT_OP_ENV(name##l)
1174 #define IWMMXT_OP_ENV1(name) \
1175 static inline void gen_op_iwmmxt_##name##_M0(void) \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1191 IWMMXT_OP_ENV_SIZE(unpackl
)
1192 IWMMXT_OP_ENV_SIZE(unpackh
)
1194 IWMMXT_OP_ENV1(unpacklub
)
1195 IWMMXT_OP_ENV1(unpackluw
)
1196 IWMMXT_OP_ENV1(unpacklul
)
1197 IWMMXT_OP_ENV1(unpackhub
)
1198 IWMMXT_OP_ENV1(unpackhuw
)
1199 IWMMXT_OP_ENV1(unpackhul
)
1200 IWMMXT_OP_ENV1(unpacklsb
)
1201 IWMMXT_OP_ENV1(unpacklsw
)
1202 IWMMXT_OP_ENV1(unpacklsl
)
1203 IWMMXT_OP_ENV1(unpackhsb
)
1204 IWMMXT_OP_ENV1(unpackhsw
)
1205 IWMMXT_OP_ENV1(unpackhsl
)
1207 IWMMXT_OP_ENV_SIZE(cmpeq
)
1208 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1209 IWMMXT_OP_ENV_SIZE(cmpgts
)
1211 IWMMXT_OP_ENV_SIZE(mins
)
1212 IWMMXT_OP_ENV_SIZE(minu
)
1213 IWMMXT_OP_ENV_SIZE(maxs
)
1214 IWMMXT_OP_ENV_SIZE(maxu
)
1216 IWMMXT_OP_ENV_SIZE(subn
)
1217 IWMMXT_OP_ENV_SIZE(addn
)
1218 IWMMXT_OP_ENV_SIZE(subu
)
1219 IWMMXT_OP_ENV_SIZE(addu
)
1220 IWMMXT_OP_ENV_SIZE(subs
)
1221 IWMMXT_OP_ENV_SIZE(adds
)
1223 IWMMXT_OP_ENV(avgb0
)
1224 IWMMXT_OP_ENV(avgb1
)
1225 IWMMXT_OP_ENV(avgw0
)
1226 IWMMXT_OP_ENV(avgw1
)
1230 IWMMXT_OP_ENV(packuw
)
1231 IWMMXT_OP_ENV(packul
)
1232 IWMMXT_OP_ENV(packuq
)
1233 IWMMXT_OP_ENV(packsw
)
1234 IWMMXT_OP_ENV(packsl
)
1235 IWMMXT_OP_ENV(packsq
)
1237 static void gen_op_iwmmxt_set_mup(void)
1240 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1241 tcg_gen_ori_i32(tmp
, tmp
, 2);
1242 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1245 static void gen_op_iwmmxt_set_cup(void)
1248 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1249 tcg_gen_ori_i32(tmp
, tmp
, 1);
1250 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1253 static void gen_op_iwmmxt_setpsr_nz(void)
1255 TCGv tmp
= tcg_temp_new_i32();
1256 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1257 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1260 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1262 iwmmxt_load_reg(cpu_V1
, rn
);
1263 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1264 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1267 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1273 rd
= (insn
>> 16) & 0xf;
1274 tmp
= load_reg(s
, rd
);
1276 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1277 if (insn
& (1 << 24)) {
1279 if (insn
& (1 << 23))
1280 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1282 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1283 tcg_gen_mov_i32(dest
, tmp
);
1284 if (insn
& (1 << 21))
1285 store_reg(s
, rd
, tmp
);
1287 tcg_temp_free_i32(tmp
);
1288 } else if (insn
& (1 << 21)) {
1290 tcg_gen_mov_i32(dest
, tmp
);
1291 if (insn
& (1 << 23))
1292 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1294 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1295 store_reg(s
, rd
, tmp
);
1296 } else if (!(insn
& (1 << 23)))
1301 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1303 int rd
= (insn
>> 0) & 0xf;
1306 if (insn
& (1 << 8)) {
1307 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1310 tmp
= iwmmxt_load_creg(rd
);
1313 tmp
= tcg_temp_new_i32();
1314 iwmmxt_load_reg(cpu_V0
, rd
);
1315 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1317 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1318 tcg_gen_mov_i32(dest
, tmp
);
1319 tcg_temp_free_i32(tmp
);
1323 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1324 (ie. an undefined instruction). */
1325 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1328 int rdhi
, rdlo
, rd0
, rd1
, i
;
1330 TCGv tmp
, tmp2
, tmp3
;
1332 if ((insn
& 0x0e000e00) == 0x0c000000) {
1333 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1335 rdlo
= (insn
>> 12) & 0xf;
1336 rdhi
= (insn
>> 16) & 0xf;
1337 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1338 iwmmxt_load_reg(cpu_V0
, wrd
);
1339 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1340 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1341 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1342 } else { /* TMCRR */
1343 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1344 iwmmxt_store_reg(cpu_V0
, wrd
);
1345 gen_op_iwmmxt_set_mup();
1350 wrd
= (insn
>> 12) & 0xf;
1351 addr
= tcg_temp_new_i32();
1352 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1353 tcg_temp_free_i32(addr
);
1356 if (insn
& ARM_CP_RW_BIT
) {
1357 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1358 tmp
= tcg_temp_new_i32();
1359 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1360 iwmmxt_store_creg(wrd
, tmp
);
1363 if (insn
& (1 << 8)) {
1364 if (insn
& (1 << 22)) { /* WLDRD */
1365 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1367 } else { /* WLDRW wRd */
1368 tmp
= gen_ld32(addr
, IS_USER(s
));
1371 if (insn
& (1 << 22)) { /* WLDRH */
1372 tmp
= gen_ld16u(addr
, IS_USER(s
));
1373 } else { /* WLDRB */
1374 tmp
= gen_ld8u(addr
, IS_USER(s
));
1378 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1379 tcg_temp_free_i32(tmp
);
1381 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1384 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1385 tmp
= iwmmxt_load_creg(wrd
);
1386 gen_st32(tmp
, addr
, IS_USER(s
));
1388 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1389 tmp
= tcg_temp_new_i32();
1390 if (insn
& (1 << 8)) {
1391 if (insn
& (1 << 22)) { /* WSTRD */
1392 tcg_temp_free_i32(tmp
);
1393 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1394 } else { /* WSTRW wRd */
1395 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1396 gen_st32(tmp
, addr
, IS_USER(s
));
1399 if (insn
& (1 << 22)) { /* WSTRH */
1400 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1401 gen_st16(tmp
, addr
, IS_USER(s
));
1402 } else { /* WSTRB */
1403 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1404 gen_st8(tmp
, addr
, IS_USER(s
));
1409 tcg_temp_free_i32(addr
);
1413 if ((insn
& 0x0f000000) != 0x0e000000)
1416 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1417 case 0x000: /* WOR */
1418 wrd
= (insn
>> 12) & 0xf;
1419 rd0
= (insn
>> 0) & 0xf;
1420 rd1
= (insn
>> 16) & 0xf;
1421 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1422 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1423 gen_op_iwmmxt_setpsr_nz();
1424 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1425 gen_op_iwmmxt_set_mup();
1426 gen_op_iwmmxt_set_cup();
1428 case 0x011: /* TMCR */
1431 rd
= (insn
>> 12) & 0xf;
1432 wrd
= (insn
>> 16) & 0xf;
1434 case ARM_IWMMXT_wCID
:
1435 case ARM_IWMMXT_wCASF
:
1437 case ARM_IWMMXT_wCon
:
1438 gen_op_iwmmxt_set_cup();
1440 case ARM_IWMMXT_wCSSF
:
1441 tmp
= iwmmxt_load_creg(wrd
);
1442 tmp2
= load_reg(s
, rd
);
1443 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1444 tcg_temp_free_i32(tmp2
);
1445 iwmmxt_store_creg(wrd
, tmp
);
1447 case ARM_IWMMXT_wCGR0
:
1448 case ARM_IWMMXT_wCGR1
:
1449 case ARM_IWMMXT_wCGR2
:
1450 case ARM_IWMMXT_wCGR3
:
1451 gen_op_iwmmxt_set_cup();
1452 tmp
= load_reg(s
, rd
);
1453 iwmmxt_store_creg(wrd
, tmp
);
1459 case 0x100: /* WXOR */
1460 wrd
= (insn
>> 12) & 0xf;
1461 rd0
= (insn
>> 0) & 0xf;
1462 rd1
= (insn
>> 16) & 0xf;
1463 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1464 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1465 gen_op_iwmmxt_setpsr_nz();
1466 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1467 gen_op_iwmmxt_set_mup();
1468 gen_op_iwmmxt_set_cup();
1470 case 0x111: /* TMRC */
1473 rd
= (insn
>> 12) & 0xf;
1474 wrd
= (insn
>> 16) & 0xf;
1475 tmp
= iwmmxt_load_creg(wrd
);
1476 store_reg(s
, rd
, tmp
);
1478 case 0x300: /* WANDN */
1479 wrd
= (insn
>> 12) & 0xf;
1480 rd0
= (insn
>> 0) & 0xf;
1481 rd1
= (insn
>> 16) & 0xf;
1482 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1483 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1484 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1485 gen_op_iwmmxt_setpsr_nz();
1486 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1487 gen_op_iwmmxt_set_mup();
1488 gen_op_iwmmxt_set_cup();
1490 case 0x200: /* WAND */
1491 wrd
= (insn
>> 12) & 0xf;
1492 rd0
= (insn
>> 0) & 0xf;
1493 rd1
= (insn
>> 16) & 0xf;
1494 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1501 case 0x810: case 0xa10: /* WMADD */
1502 wrd
= (insn
>> 12) & 0xf;
1503 rd0
= (insn
>> 0) & 0xf;
1504 rd1
= (insn
>> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1506 if (insn
& (1 << 21))
1507 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1509 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1510 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1511 gen_op_iwmmxt_set_mup();
1513 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1514 wrd
= (insn
>> 12) & 0xf;
1515 rd0
= (insn
>> 16) & 0xf;
1516 rd1
= (insn
>> 0) & 0xf;
1517 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1518 switch ((insn
>> 22) & 3) {
1520 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1523 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1526 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1531 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1535 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1536 wrd
= (insn
>> 12) & 0xf;
1537 rd0
= (insn
>> 16) & 0xf;
1538 rd1
= (insn
>> 0) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1540 switch ((insn
>> 22) & 3) {
1542 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1545 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1548 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1553 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1554 gen_op_iwmmxt_set_mup();
1555 gen_op_iwmmxt_set_cup();
1557 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1558 wrd
= (insn
>> 12) & 0xf;
1559 rd0
= (insn
>> 16) & 0xf;
1560 rd1
= (insn
>> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1562 if (insn
& (1 << 22))
1563 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1565 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1566 if (!(insn
& (1 << 20)))
1567 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1568 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1569 gen_op_iwmmxt_set_mup();
1571 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1572 wrd
= (insn
>> 12) & 0xf;
1573 rd0
= (insn
>> 16) & 0xf;
1574 rd1
= (insn
>> 0) & 0xf;
1575 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1576 if (insn
& (1 << 21)) {
1577 if (insn
& (1 << 20))
1578 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1580 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1582 if (insn
& (1 << 20))
1583 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1585 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1587 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1588 gen_op_iwmmxt_set_mup();
1590 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1591 wrd
= (insn
>> 12) & 0xf;
1592 rd0
= (insn
>> 16) & 0xf;
1593 rd1
= (insn
>> 0) & 0xf;
1594 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1595 if (insn
& (1 << 21))
1596 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1598 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1599 if (!(insn
& (1 << 20))) {
1600 iwmmxt_load_reg(cpu_V1
, wrd
);
1601 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1603 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1604 gen_op_iwmmxt_set_mup();
1606 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1607 wrd
= (insn
>> 12) & 0xf;
1608 rd0
= (insn
>> 16) & 0xf;
1609 rd1
= (insn
>> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1611 switch ((insn
>> 22) & 3) {
1613 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1616 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1619 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1624 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1625 gen_op_iwmmxt_set_mup();
1626 gen_op_iwmmxt_set_cup();
1628 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1629 wrd
= (insn
>> 12) & 0xf;
1630 rd0
= (insn
>> 16) & 0xf;
1631 rd1
= (insn
>> 0) & 0xf;
1632 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1633 if (insn
& (1 << 22)) {
1634 if (insn
& (1 << 20))
1635 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1637 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1639 if (insn
& (1 << 20))
1640 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1642 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1644 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1645 gen_op_iwmmxt_set_mup();
1646 gen_op_iwmmxt_set_cup();
1648 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1649 wrd
= (insn
>> 12) & 0xf;
1650 rd0
= (insn
>> 16) & 0xf;
1651 rd1
= (insn
>> 0) & 0xf;
1652 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1653 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1654 tcg_gen_andi_i32(tmp
, tmp
, 7);
1655 iwmmxt_load_reg(cpu_V1
, rd1
);
1656 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1657 tcg_temp_free_i32(tmp
);
1658 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1659 gen_op_iwmmxt_set_mup();
1661 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1662 if (((insn
>> 6) & 3) == 3)
1664 rd
= (insn
>> 12) & 0xf;
1665 wrd
= (insn
>> 16) & 0xf;
1666 tmp
= load_reg(s
, rd
);
1667 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1668 switch ((insn
>> 6) & 3) {
1670 tmp2
= tcg_const_i32(0xff);
1671 tmp3
= tcg_const_i32((insn
& 7) << 3);
1674 tmp2
= tcg_const_i32(0xffff);
1675 tmp3
= tcg_const_i32((insn
& 3) << 4);
1678 tmp2
= tcg_const_i32(0xffffffff);
1679 tmp3
= tcg_const_i32((insn
& 1) << 5);
1685 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1686 tcg_temp_free(tmp3
);
1687 tcg_temp_free(tmp2
);
1688 tcg_temp_free_i32(tmp
);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1690 gen_op_iwmmxt_set_mup();
1692 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1693 rd
= (insn
>> 12) & 0xf;
1694 wrd
= (insn
>> 16) & 0xf;
1695 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1697 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1698 tmp
= tcg_temp_new_i32();
1699 switch ((insn
>> 22) & 3) {
1701 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1702 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1704 tcg_gen_ext8s_i32(tmp
, tmp
);
1706 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1710 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1711 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1713 tcg_gen_ext16s_i32(tmp
, tmp
);
1715 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1719 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1720 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1723 store_reg(s
, rd
, tmp
);
1725 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1726 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1728 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1729 switch ((insn
>> 22) & 3) {
1731 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1734 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1737 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1740 tcg_gen_shli_i32(tmp
, tmp
, 28);
1742 tcg_temp_free_i32(tmp
);
1744 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1745 if (((insn
>> 6) & 3) == 3)
1747 rd
= (insn
>> 12) & 0xf;
1748 wrd
= (insn
>> 16) & 0xf;
1749 tmp
= load_reg(s
, rd
);
1750 switch ((insn
>> 6) & 3) {
1752 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1755 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1758 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1761 tcg_temp_free_i32(tmp
);
1762 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1763 gen_op_iwmmxt_set_mup();
1765 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1766 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1768 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1769 tmp2
= tcg_temp_new_i32();
1770 tcg_gen_mov_i32(tmp2
, tmp
);
1771 switch ((insn
>> 22) & 3) {
1773 for (i
= 0; i
< 7; i
++) {
1774 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1775 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1779 for (i
= 0; i
< 3; i
++) {
1780 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1781 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1785 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1786 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1790 tcg_temp_free_i32(tmp2
);
1791 tcg_temp_free_i32(tmp
);
1793 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1794 wrd
= (insn
>> 12) & 0xf;
1795 rd0
= (insn
>> 16) & 0xf;
1796 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1797 switch ((insn
>> 22) & 3) {
1799 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1802 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1805 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1810 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1811 gen_op_iwmmxt_set_mup();
1813 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1814 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1816 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1817 tmp2
= tcg_temp_new_i32();
1818 tcg_gen_mov_i32(tmp2
, tmp
);
1819 switch ((insn
>> 22) & 3) {
1821 for (i
= 0; i
< 7; i
++) {
1822 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1823 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1827 for (i
= 0; i
< 3; i
++) {
1828 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1829 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1833 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1834 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1838 tcg_temp_free_i32(tmp2
);
1839 tcg_temp_free_i32(tmp
);
1841 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1842 rd
= (insn
>> 12) & 0xf;
1843 rd0
= (insn
>> 16) & 0xf;
1844 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1846 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1847 tmp
= tcg_temp_new_i32();
1848 switch ((insn
>> 22) & 3) {
1850 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1853 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1856 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1859 store_reg(s
, rd
, tmp
);
1861 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1862 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1863 wrd
= (insn
>> 12) & 0xf;
1864 rd0
= (insn
>> 16) & 0xf;
1865 rd1
= (insn
>> 0) & 0xf;
1866 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1867 switch ((insn
>> 22) & 3) {
1869 if (insn
& (1 << 21))
1870 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1872 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1875 if (insn
& (1 << 21))
1876 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1878 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1881 if (insn
& (1 << 21))
1882 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1884 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1889 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1890 gen_op_iwmmxt_set_mup();
1891 gen_op_iwmmxt_set_cup();
1893 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1894 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1895 wrd
= (insn
>> 12) & 0xf;
1896 rd0
= (insn
>> 16) & 0xf;
1897 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1898 switch ((insn
>> 22) & 3) {
1900 if (insn
& (1 << 21))
1901 gen_op_iwmmxt_unpacklsb_M0();
1903 gen_op_iwmmxt_unpacklub_M0();
1906 if (insn
& (1 << 21))
1907 gen_op_iwmmxt_unpacklsw_M0();
1909 gen_op_iwmmxt_unpackluw_M0();
1912 if (insn
& (1 << 21))
1913 gen_op_iwmmxt_unpacklsl_M0();
1915 gen_op_iwmmxt_unpacklul_M0();
1920 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1921 gen_op_iwmmxt_set_mup();
1922 gen_op_iwmmxt_set_cup();
1924 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1925 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1926 wrd
= (insn
>> 12) & 0xf;
1927 rd0
= (insn
>> 16) & 0xf;
1928 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1929 switch ((insn
>> 22) & 3) {
1931 if (insn
& (1 << 21))
1932 gen_op_iwmmxt_unpackhsb_M0();
1934 gen_op_iwmmxt_unpackhub_M0();
1937 if (insn
& (1 << 21))
1938 gen_op_iwmmxt_unpackhsw_M0();
1940 gen_op_iwmmxt_unpackhuw_M0();
1943 if (insn
& (1 << 21))
1944 gen_op_iwmmxt_unpackhsl_M0();
1946 gen_op_iwmmxt_unpackhul_M0();
1951 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1952 gen_op_iwmmxt_set_mup();
1953 gen_op_iwmmxt_set_cup();
1955 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1956 case 0x214: case 0x614: case 0xa14: case 0xe14:
1957 if (((insn
>> 22) & 3) == 0)
1959 wrd
= (insn
>> 12) & 0xf;
1960 rd0
= (insn
>> 16) & 0xf;
1961 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1962 tmp
= tcg_temp_new_i32();
1963 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
1964 tcg_temp_free_i32(tmp
);
1967 switch ((insn
>> 22) & 3) {
1969 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1972 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1975 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1978 tcg_temp_free_i32(tmp
);
1979 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1980 gen_op_iwmmxt_set_mup();
1981 gen_op_iwmmxt_set_cup();
1983 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1984 case 0x014: case 0x414: case 0x814: case 0xc14:
1985 if (((insn
>> 22) & 3) == 0)
1987 wrd
= (insn
>> 12) & 0xf;
1988 rd0
= (insn
>> 16) & 0xf;
1989 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1990 tmp
= tcg_temp_new_i32();
1991 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
1992 tcg_temp_free_i32(tmp
);
1995 switch ((insn
>> 22) & 3) {
1997 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2000 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2003 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2006 tcg_temp_free_i32(tmp
);
2007 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2008 gen_op_iwmmxt_set_mup();
2009 gen_op_iwmmxt_set_cup();
2011 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2012 case 0x114: case 0x514: case 0x914: case 0xd14:
2013 if (((insn
>> 22) & 3) == 0)
2015 wrd
= (insn
>> 12) & 0xf;
2016 rd0
= (insn
>> 16) & 0xf;
2017 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2018 tmp
= tcg_temp_new_i32();
2019 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2020 tcg_temp_free_i32(tmp
);
2023 switch ((insn
>> 22) & 3) {
2025 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2028 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2031 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2034 tcg_temp_free_i32(tmp
);
2035 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2036 gen_op_iwmmxt_set_mup();
2037 gen_op_iwmmxt_set_cup();
2039 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2040 case 0x314: case 0x714: case 0xb14: case 0xf14:
2041 if (((insn
>> 22) & 3) == 0)
2043 wrd
= (insn
>> 12) & 0xf;
2044 rd0
= (insn
>> 16) & 0xf;
2045 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2046 tmp
= tcg_temp_new_i32();
2047 switch ((insn
>> 22) & 3) {
2049 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2050 tcg_temp_free_i32(tmp
);
2053 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2056 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2057 tcg_temp_free_i32(tmp
);
2060 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2063 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2064 tcg_temp_free_i32(tmp
);
2067 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2070 tcg_temp_free_i32(tmp
);
2071 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2072 gen_op_iwmmxt_set_mup();
2073 gen_op_iwmmxt_set_cup();
2075 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2076 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2077 wrd
= (insn
>> 12) & 0xf;
2078 rd0
= (insn
>> 16) & 0xf;
2079 rd1
= (insn
>> 0) & 0xf;
2080 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2081 switch ((insn
>> 22) & 3) {
2083 if (insn
& (1 << 21))
2084 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2086 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2089 if (insn
& (1 << 21))
2090 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2092 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2095 if (insn
& (1 << 21))
2096 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2098 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2103 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2104 gen_op_iwmmxt_set_mup();
2106 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2107 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2108 wrd
= (insn
>> 12) & 0xf;
2109 rd0
= (insn
>> 16) & 0xf;
2110 rd1
= (insn
>> 0) & 0xf;
2111 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2112 switch ((insn
>> 22) & 3) {
2114 if (insn
& (1 << 21))
2115 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2117 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2120 if (insn
& (1 << 21))
2121 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2123 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2126 if (insn
& (1 << 21))
2127 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2129 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2134 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2135 gen_op_iwmmxt_set_mup();
2137 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2138 case 0x402: case 0x502: case 0x602: case 0x702:
2139 wrd
= (insn
>> 12) & 0xf;
2140 rd0
= (insn
>> 16) & 0xf;
2141 rd1
= (insn
>> 0) & 0xf;
2142 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2143 tmp
= tcg_const_i32((insn
>> 20) & 3);
2144 iwmmxt_load_reg(cpu_V1
, rd1
);
2145 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2147 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2148 gen_op_iwmmxt_set_mup();
2150 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2151 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2152 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2153 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2154 wrd
= (insn
>> 12) & 0xf;
2155 rd0
= (insn
>> 16) & 0xf;
2156 rd1
= (insn
>> 0) & 0xf;
2157 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2158 switch ((insn
>> 20) & 0xf) {
2160 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2163 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2166 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2169 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2172 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2175 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2178 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2181 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2184 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2189 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2190 gen_op_iwmmxt_set_mup();
2191 gen_op_iwmmxt_set_cup();
2193 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2194 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2195 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2196 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2197 wrd
= (insn
>> 12) & 0xf;
2198 rd0
= (insn
>> 16) & 0xf;
2199 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2200 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2201 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2203 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2204 gen_op_iwmmxt_set_mup();
2205 gen_op_iwmmxt_set_cup();
2207 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2208 case 0x418: case 0x518: case 0x618: case 0x718:
2209 case 0x818: case 0x918: case 0xa18: case 0xb18:
2210 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2211 wrd
= (insn
>> 12) & 0xf;
2212 rd0
= (insn
>> 16) & 0xf;
2213 rd1
= (insn
>> 0) & 0xf;
2214 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2215 switch ((insn
>> 20) & 0xf) {
2217 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2220 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2223 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2226 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2229 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2232 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2235 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2238 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2241 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2246 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2247 gen_op_iwmmxt_set_mup();
2248 gen_op_iwmmxt_set_cup();
2250 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2251 case 0x408: case 0x508: case 0x608: case 0x708:
2252 case 0x808: case 0x908: case 0xa08: case 0xb08:
2253 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2254 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2256 wrd
= (insn
>> 12) & 0xf;
2257 rd0
= (insn
>> 16) & 0xf;
2258 rd1
= (insn
>> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2260 switch ((insn
>> 22) & 3) {
2262 if (insn
& (1 << 21))
2263 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2265 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2268 if (insn
& (1 << 21))
2269 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2271 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2274 if (insn
& (1 << 21))
2275 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2277 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2280 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2281 gen_op_iwmmxt_set_mup();
2282 gen_op_iwmmxt_set_cup();
2284 case 0x201: case 0x203: case 0x205: case 0x207:
2285 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2286 case 0x211: case 0x213: case 0x215: case 0x217:
2287 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2288 wrd
= (insn
>> 5) & 0xf;
2289 rd0
= (insn
>> 12) & 0xf;
2290 rd1
= (insn
>> 0) & 0xf;
2291 if (rd0
== 0xf || rd1
== 0xf)
2293 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2294 tmp
= load_reg(s
, rd0
);
2295 tmp2
= load_reg(s
, rd1
);
2296 switch ((insn
>> 16) & 0xf) {
2297 case 0x0: /* TMIA */
2298 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2300 case 0x8: /* TMIAPH */
2301 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2303 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2304 if (insn
& (1 << 16))
2305 tcg_gen_shri_i32(tmp
, tmp
, 16);
2306 if (insn
& (1 << 17))
2307 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2308 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2311 tcg_temp_free_i32(tmp2
);
2312 tcg_temp_free_i32(tmp
);
2315 tcg_temp_free_i32(tmp2
);
2316 tcg_temp_free_i32(tmp
);
2317 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2318 gen_op_iwmmxt_set_mup();
2327 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2328 (ie. an undefined instruction). */
2329 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2331 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2334 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2335 /* Multiply with Internal Accumulate Format */
2336 rd0
= (insn
>> 12) & 0xf;
2338 acc
= (insn
>> 5) & 7;
2343 tmp
= load_reg(s
, rd0
);
2344 tmp2
= load_reg(s
, rd1
);
2345 switch ((insn
>> 16) & 0xf) {
2347 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2349 case 0x8: /* MIAPH */
2350 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2352 case 0xc: /* MIABB */
2353 case 0xd: /* MIABT */
2354 case 0xe: /* MIATB */
2355 case 0xf: /* MIATT */
2356 if (insn
& (1 << 16))
2357 tcg_gen_shri_i32(tmp
, tmp
, 16);
2358 if (insn
& (1 << 17))
2359 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2360 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2365 tcg_temp_free_i32(tmp2
);
2366 tcg_temp_free_i32(tmp
);
2368 gen_op_iwmmxt_movq_wRn_M0(acc
);
2372 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2373 /* Internal Accumulator Access Format */
2374 rdhi
= (insn
>> 16) & 0xf;
2375 rdlo
= (insn
>> 12) & 0xf;
2381 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2382 iwmmxt_load_reg(cpu_V0
, acc
);
2383 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2384 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2385 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2386 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2388 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2389 iwmmxt_store_reg(cpu_V0
, acc
);
2397 /* Disassemble system coprocessor instruction. Return nonzero if
2398 instruction is not defined. */
2399 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2402 uint32_t rd
= (insn
>> 12) & 0xf;
2403 uint32_t cp
= (insn
>> 8) & 0xf;
2408 if (insn
& ARM_CP_RW_BIT
) {
2409 if (!env
->cp
[cp
].cp_read
)
2411 gen_set_pc_im(s
->pc
);
2412 tmp
= tcg_temp_new_i32();
2413 tmp2
= tcg_const_i32(insn
);
2414 gen_helper_get_cp(tmp
, cpu_env
, tmp2
);
2415 tcg_temp_free(tmp2
);
2416 store_reg(s
, rd
, tmp
);
2418 if (!env
->cp
[cp
].cp_write
)
2420 gen_set_pc_im(s
->pc
);
2421 tmp
= load_reg(s
, rd
);
2422 tmp2
= tcg_const_i32(insn
);
2423 gen_helper_set_cp(cpu_env
, tmp2
, tmp
);
2424 tcg_temp_free(tmp2
);
2425 tcg_temp_free_i32(tmp
);
2430 static int cp15_user_ok(uint32_t insn
)
2432 int cpn
= (insn
>> 16) & 0xf;
2433 int cpm
= insn
& 0xf;
2434 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2436 if (cpn
== 13 && cpm
== 0) {
2438 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2442 /* ISB, DSB, DMB. */
2443 if ((cpm
== 5 && op
== 4)
2444 || (cpm
== 10 && (op
== 4 || op
== 5)))
2450 static int cp15_tls_load_store(CPUState
*env
, DisasContext
*s
, uint32_t insn
, uint32_t rd
)
2453 int cpn
= (insn
>> 16) & 0xf;
2454 int cpm
= insn
& 0xf;
2455 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2457 if (!arm_feature(env
, ARM_FEATURE_V6K
))
2460 if (!(cpn
== 13 && cpm
== 0))
2463 if (insn
& ARM_CP_RW_BIT
) {
2466 tmp
= load_cpu_field(cp15
.c13_tls1
);
2469 tmp
= load_cpu_field(cp15
.c13_tls2
);
2472 tmp
= load_cpu_field(cp15
.c13_tls3
);
2477 store_reg(s
, rd
, tmp
);
2480 tmp
= load_reg(s
, rd
);
2483 store_cpu_field(tmp
, cp15
.c13_tls1
);
2486 store_cpu_field(tmp
, cp15
.c13_tls2
);
2489 store_cpu_field(tmp
, cp15
.c13_tls3
);
2492 tcg_temp_free_i32(tmp
);
2499 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2500 instruction is not defined. */
2501 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2506 /* M profile cores use memory mapped registers instead of cp15. */
2507 if (arm_feature(env
, ARM_FEATURE_M
))
2510 if ((insn
& (1 << 25)) == 0) {
2511 if (insn
& (1 << 20)) {
2515 /* mcrr. Used for block cache operations, so implement as no-op. */
2518 if ((insn
& (1 << 4)) == 0) {
2522 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
2526 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2527 * instructions rather than a separate instruction.
2529 if ((insn
& 0x0fff0fff) == 0x0e070f90) {
2530 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2531 * In v7, this must NOP.
2533 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
2534 /* Wait for interrupt. */
2535 gen_set_pc_im(s
->pc
);
2536 s
->is_jmp
= DISAS_WFI
;
2541 if ((insn
& 0x0fff0fff) == 0x0e070f58) {
2542 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2543 * so this is slightly over-broad.
2545 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
2546 /* Wait for interrupt. */
2547 gen_set_pc_im(s
->pc
);
2548 s
->is_jmp
= DISAS_WFI
;
2551 /* Otherwise fall through to handle via helper function.
2552 * In particular, on v7 and some v6 cores this is one of
2553 * the VA-PA registers.
2557 rd
= (insn
>> 12) & 0xf;
2559 if (cp15_tls_load_store(env
, s
, insn
, rd
))
2562 tmp2
= tcg_const_i32(insn
);
2563 if (insn
& ARM_CP_RW_BIT
) {
2564 tmp
= tcg_temp_new_i32();
2565 gen_helper_get_cp15(tmp
, cpu_env
, tmp2
);
2566 /* If the destination register is r15 then sets condition codes. */
2568 store_reg(s
, rd
, tmp
);
2570 tcg_temp_free_i32(tmp
);
2572 tmp
= load_reg(s
, rd
);
2573 gen_helper_set_cp15(cpu_env
, tmp2
, tmp
);
2574 tcg_temp_free_i32(tmp
);
2575 /* Normally we would always end the TB here, but Linux
2576 * arch/arm/mach-pxa/sleep.S expects two instructions following
2577 * an MMU enable to execute from cache. Imitate this behaviour. */
2578 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2579 (insn
& 0x0fff0fff) != 0x0e010f10)
2582 tcg_temp_free_i32(tmp2
);
2586 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2587 #define VFP_SREG(insn, bigbit, smallbit) \
2588 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2589 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2590 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2591 reg = (((insn) >> (bigbit)) & 0x0f) \
2592 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2594 if (insn & (1 << (smallbit))) \
2596 reg = ((insn) >> (bigbit)) & 0x0f; \
2599 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2600 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2601 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2602 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2603 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2604 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2606 /* Move between integer and VFP cores. */
2607 static TCGv
gen_vfp_mrs(void)
2609 TCGv tmp
= tcg_temp_new_i32();
2610 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2614 static void gen_vfp_msr(TCGv tmp
)
2616 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2617 tcg_temp_free_i32(tmp
);
2620 static void gen_neon_dup_u8(TCGv var
, int shift
)
2622 TCGv tmp
= tcg_temp_new_i32();
2624 tcg_gen_shri_i32(var
, var
, shift
);
2625 tcg_gen_ext8u_i32(var
, var
);
2626 tcg_gen_shli_i32(tmp
, var
, 8);
2627 tcg_gen_or_i32(var
, var
, tmp
);
2628 tcg_gen_shli_i32(tmp
, var
, 16);
2629 tcg_gen_or_i32(var
, var
, tmp
);
2630 tcg_temp_free_i32(tmp
);
2633 static void gen_neon_dup_low16(TCGv var
)
2635 TCGv tmp
= tcg_temp_new_i32();
2636 tcg_gen_ext16u_i32(var
, var
);
2637 tcg_gen_shli_i32(tmp
, var
, 16);
2638 tcg_gen_or_i32(var
, var
, tmp
);
2639 tcg_temp_free_i32(tmp
);
2642 static void gen_neon_dup_high16(TCGv var
)
2644 TCGv tmp
= tcg_temp_new_i32();
2645 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2646 tcg_gen_shri_i32(tmp
, var
, 16);
2647 tcg_gen_or_i32(var
, var
, tmp
);
2648 tcg_temp_free_i32(tmp
);
2651 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2652 (ie. an undefined instruction). */
2653 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2655 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2661 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2664 if (!s
->vfp_enabled
) {
2665 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2666 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2668 rn
= (insn
>> 16) & 0xf;
2669 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2670 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2673 dp
= ((insn
& 0xf00) == 0xb00);
2674 switch ((insn
>> 24) & 0xf) {
2676 if (insn
& (1 << 4)) {
2677 /* single register transfer */
2678 rd
= (insn
>> 12) & 0xf;
2683 VFP_DREG_N(rn
, insn
);
2686 if (insn
& 0x00c00060
2687 && !arm_feature(env
, ARM_FEATURE_NEON
))
2690 pass
= (insn
>> 21) & 1;
2691 if (insn
& (1 << 22)) {
2693 offset
= ((insn
>> 5) & 3) * 8;
2694 } else if (insn
& (1 << 5)) {
2696 offset
= (insn
& (1 << 6)) ? 16 : 0;
2701 if (insn
& ARM_CP_RW_BIT
) {
2703 tmp
= neon_load_reg(rn
, pass
);
2707 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2708 if (insn
& (1 << 23))
2714 if (insn
& (1 << 23)) {
2716 tcg_gen_shri_i32(tmp
, tmp
, 16);
2722 tcg_gen_sari_i32(tmp
, tmp
, 16);
2731 store_reg(s
, rd
, tmp
);
2734 tmp
= load_reg(s
, rd
);
2735 if (insn
& (1 << 23)) {
2738 gen_neon_dup_u8(tmp
, 0);
2739 } else if (size
== 1) {
2740 gen_neon_dup_low16(tmp
);
2742 for (n
= 0; n
<= pass
* 2; n
++) {
2743 tmp2
= tcg_temp_new_i32();
2744 tcg_gen_mov_i32(tmp2
, tmp
);
2745 neon_store_reg(rn
, n
, tmp2
);
2747 neon_store_reg(rn
, n
, tmp
);
2752 tmp2
= neon_load_reg(rn
, pass
);
2753 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2754 tcg_temp_free_i32(tmp2
);
2757 tmp2
= neon_load_reg(rn
, pass
);
2758 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2759 tcg_temp_free_i32(tmp2
);
2764 neon_store_reg(rn
, pass
, tmp
);
2768 if ((insn
& 0x6f) != 0x00)
2770 rn
= VFP_SREG_N(insn
);
2771 if (insn
& ARM_CP_RW_BIT
) {
2773 if (insn
& (1 << 21)) {
2774 /* system register */
2779 /* VFP2 allows access to FSID from userspace.
2780 VFP3 restricts all id registers to privileged
2783 && arm_feature(env
, ARM_FEATURE_VFP3
))
2785 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2790 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2792 case ARM_VFP_FPINST
:
2793 case ARM_VFP_FPINST2
:
2794 /* Not present in VFP3. */
2796 || arm_feature(env
, ARM_FEATURE_VFP3
))
2798 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2802 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2803 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2805 tmp
= tcg_temp_new_i32();
2806 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2812 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2814 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2820 gen_mov_F0_vreg(0, rn
);
2821 tmp
= gen_vfp_mrs();
2824 /* Set the 4 flag bits in the CPSR. */
2826 tcg_temp_free_i32(tmp
);
2828 store_reg(s
, rd
, tmp
);
2832 tmp
= load_reg(s
, rd
);
2833 if (insn
& (1 << 21)) {
2835 /* system register */
2840 /* Writes are ignored. */
2843 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2844 tcg_temp_free_i32(tmp
);
2850 /* TODO: VFP subarchitecture support.
2851 * For now, keep the EN bit only */
2852 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2853 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2856 case ARM_VFP_FPINST
:
2857 case ARM_VFP_FPINST2
:
2858 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2865 gen_mov_vreg_F0(0, rn
);
2870 /* data processing */
2871 /* The opcode is in bits 23, 21, 20 and 6. */
2872 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2876 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2878 /* rn is register number */
2879 VFP_DREG_N(rn
, insn
);
2882 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2883 /* Integer or single precision destination. */
2884 rd
= VFP_SREG_D(insn
);
2886 VFP_DREG_D(rd
, insn
);
2889 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2890 /* VCVT from int is always from S reg regardless of dp bit.
2891 * VCVT with immediate frac_bits has same format as SREG_M
2893 rm
= VFP_SREG_M(insn
);
2895 VFP_DREG_M(rm
, insn
);
2898 rn
= VFP_SREG_N(insn
);
2899 if (op
== 15 && rn
== 15) {
2900 /* Double precision destination. */
2901 VFP_DREG_D(rd
, insn
);
2903 rd
= VFP_SREG_D(insn
);
2905 /* NB that we implicitly rely on the encoding for the frac_bits
2906 * in VCVT of fixed to float being the same as that of an SREG_M
2908 rm
= VFP_SREG_M(insn
);
2911 veclen
= s
->vec_len
;
2912 if (op
== 15 && rn
> 3)
2915 /* Shut up compiler warnings. */
2926 /* Figure out what type of vector operation this is. */
2927 if ((rd
& bank_mask
) == 0) {
2932 delta_d
= (s
->vec_stride
>> 1) + 1;
2934 delta_d
= s
->vec_stride
+ 1;
2936 if ((rm
& bank_mask
) == 0) {
2937 /* mixed scalar/vector */
2946 /* Load the initial operands. */
2951 /* Integer source */
2952 gen_mov_F0_vreg(0, rm
);
2957 gen_mov_F0_vreg(dp
, rd
);
2958 gen_mov_F1_vreg(dp
, rm
);
2962 /* Compare with zero */
2963 gen_mov_F0_vreg(dp
, rd
);
2974 /* Source and destination the same. */
2975 gen_mov_F0_vreg(dp
, rd
);
2978 /* One source operand. */
2979 gen_mov_F0_vreg(dp
, rm
);
2983 /* Two source operands. */
2984 gen_mov_F0_vreg(dp
, rn
);
2985 gen_mov_F1_vreg(dp
, rm
);
2989 /* Perform the calculation. */
2991 case 0: /* mac: fd + (fn * fm) */
2993 gen_mov_F1_vreg(dp
, rd
);
2996 case 1: /* nmac: fd - (fn * fm) */
2999 gen_mov_F1_vreg(dp
, rd
);
3002 case 2: /* msc: -fd + (fn * fm) */
3004 gen_mov_F1_vreg(dp
, rd
);
3007 case 3: /* nmsc: -fd - (fn * fm) */
3010 gen_mov_F1_vreg(dp
, rd
);
3013 case 4: /* mul: fn * fm */
3016 case 5: /* nmul: -(fn * fm) */
3020 case 6: /* add: fn + fm */
3023 case 7: /* sub: fn - fm */
3026 case 8: /* div: fn / fm */
3029 case 14: /* fconst */
3030 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3033 n
= (insn
<< 12) & 0x80000000;
3034 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3041 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3048 tcg_gen_movi_i32(cpu_F0s
, n
);
3051 case 15: /* extension space */
3065 case 4: /* vcvtb.f32.f16 */
3066 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3068 tmp
= gen_vfp_mrs();
3069 tcg_gen_ext16u_i32(tmp
, tmp
);
3070 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3071 tcg_temp_free_i32(tmp
);
3073 case 5: /* vcvtt.f32.f16 */
3074 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3076 tmp
= gen_vfp_mrs();
3077 tcg_gen_shri_i32(tmp
, tmp
, 16);
3078 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3079 tcg_temp_free_i32(tmp
);
3081 case 6: /* vcvtb.f16.f32 */
3082 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3084 tmp
= tcg_temp_new_i32();
3085 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3086 gen_mov_F0_vreg(0, rd
);
3087 tmp2
= gen_vfp_mrs();
3088 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3089 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3090 tcg_temp_free_i32(tmp2
);
3093 case 7: /* vcvtt.f16.f32 */
3094 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3096 tmp
= tcg_temp_new_i32();
3097 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3098 tcg_gen_shli_i32(tmp
, tmp
, 16);
3099 gen_mov_F0_vreg(0, rd
);
3100 tmp2
= gen_vfp_mrs();
3101 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3102 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3103 tcg_temp_free_i32(tmp2
);
3115 case 11: /* cmpez */
3119 case 15: /* single<->double conversion */
3121 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3123 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3125 case 16: /* fuito */
3128 case 17: /* fsito */
3131 case 20: /* fshto */
3132 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3134 gen_vfp_shto(dp
, 16 - rm
);
3136 case 21: /* fslto */
3137 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3139 gen_vfp_slto(dp
, 32 - rm
);
3141 case 22: /* fuhto */
3142 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3144 gen_vfp_uhto(dp
, 16 - rm
);
3146 case 23: /* fulto */
3147 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3149 gen_vfp_ulto(dp
, 32 - rm
);
3151 case 24: /* ftoui */
3154 case 25: /* ftouiz */
3157 case 26: /* ftosi */
3160 case 27: /* ftosiz */
3163 case 28: /* ftosh */
3164 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3166 gen_vfp_tosh(dp
, 16 - rm
);
3168 case 29: /* ftosl */
3169 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3171 gen_vfp_tosl(dp
, 32 - rm
);
3173 case 30: /* ftouh */
3174 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3176 gen_vfp_touh(dp
, 16 - rm
);
3178 case 31: /* ftoul */
3179 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3181 gen_vfp_toul(dp
, 32 - rm
);
3183 default: /* undefined */
3184 printf ("rn:%d\n", rn
);
3188 default: /* undefined */
3189 printf ("op:%d\n", op
);
3193 /* Write back the result. */
3194 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3195 ; /* Comparison, do nothing. */
3196 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3197 /* VCVT double to int: always integer result. */
3198 gen_mov_vreg_F0(0, rd
);
3199 else if (op
== 15 && rn
== 15)
3201 gen_mov_vreg_F0(!dp
, rd
);
3203 gen_mov_vreg_F0(dp
, rd
);
3205 /* break out of the loop if we have finished */
3209 if (op
== 15 && delta_m
== 0) {
3210 /* single source one-many */
3212 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3214 gen_mov_vreg_F0(dp
, rd
);
3218 /* Setup the next operands. */
3220 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3224 /* One source operand. */
3225 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3227 gen_mov_F0_vreg(dp
, rm
);
3229 /* Two source operands. */
3230 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3232 gen_mov_F0_vreg(dp
, rn
);
3234 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3236 gen_mov_F1_vreg(dp
, rm
);
3244 if ((insn
& 0x03e00000) == 0x00400000) {
3245 /* two-register transfer */
3246 rn
= (insn
>> 16) & 0xf;
3247 rd
= (insn
>> 12) & 0xf;
3249 VFP_DREG_M(rm
, insn
);
3251 rm
= VFP_SREG_M(insn
);
3254 if (insn
& ARM_CP_RW_BIT
) {
3257 gen_mov_F0_vreg(0, rm
* 2);
3258 tmp
= gen_vfp_mrs();
3259 store_reg(s
, rd
, tmp
);
3260 gen_mov_F0_vreg(0, rm
* 2 + 1);
3261 tmp
= gen_vfp_mrs();
3262 store_reg(s
, rn
, tmp
);
3264 gen_mov_F0_vreg(0, rm
);
3265 tmp
= gen_vfp_mrs();
3266 store_reg(s
, rd
, tmp
);
3267 gen_mov_F0_vreg(0, rm
+ 1);
3268 tmp
= gen_vfp_mrs();
3269 store_reg(s
, rn
, tmp
);
3274 tmp
= load_reg(s
, rd
);
3276 gen_mov_vreg_F0(0, rm
* 2);
3277 tmp
= load_reg(s
, rn
);
3279 gen_mov_vreg_F0(0, rm
* 2 + 1);
3281 tmp
= load_reg(s
, rd
);
3283 gen_mov_vreg_F0(0, rm
);
3284 tmp
= load_reg(s
, rn
);
3286 gen_mov_vreg_F0(0, rm
+ 1);
3291 rn
= (insn
>> 16) & 0xf;
3293 VFP_DREG_D(rd
, insn
);
3295 rd
= VFP_SREG_D(insn
);
3296 if (s
->thumb
&& rn
== 15) {
3297 addr
= tcg_temp_new_i32();
3298 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3300 addr
= load_reg(s
, rn
);
3302 if ((insn
& 0x01200000) == 0x01000000) {
3303 /* Single load/store */
3304 offset
= (insn
& 0xff) << 2;
3305 if ((insn
& (1 << 23)) == 0)
3307 tcg_gen_addi_i32(addr
, addr
, offset
);
3308 if (insn
& (1 << 20)) {
3309 gen_vfp_ld(s
, dp
, addr
);
3310 gen_mov_vreg_F0(dp
, rd
);
3312 gen_mov_F0_vreg(dp
, rd
);
3313 gen_vfp_st(s
, dp
, addr
);
3315 tcg_temp_free_i32(addr
);
3317 /* load/store multiple */
3319 n
= (insn
>> 1) & 0x7f;
3323 if (insn
& (1 << 24)) /* pre-decrement */
3324 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3330 for (i
= 0; i
< n
; i
++) {
3331 if (insn
& ARM_CP_RW_BIT
) {
3333 gen_vfp_ld(s
, dp
, addr
);
3334 gen_mov_vreg_F0(dp
, rd
+ i
);
3337 gen_mov_F0_vreg(dp
, rd
+ i
);
3338 gen_vfp_st(s
, dp
, addr
);
3340 tcg_gen_addi_i32(addr
, addr
, offset
);
3342 if (insn
& (1 << 21)) {
3344 if (insn
& (1 << 24))
3345 offset
= -offset
* n
;
3346 else if (dp
&& (insn
& 1))
3352 tcg_gen_addi_i32(addr
, addr
, offset
);
3353 store_reg(s
, rn
, addr
);
3355 tcg_temp_free_i32(addr
);
3361 /* Should never happen. */
3367 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3369 TranslationBlock
*tb
;
3372 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3374 gen_set_pc_im(dest
);
3375 tcg_gen_exit_tb((long)tb
+ n
);
3377 gen_set_pc_im(dest
);
3382 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3384 if (unlikely(s
->singlestep_enabled
)) {
3385 /* An indirect jump so that we still trigger the debug exception. */
3390 gen_goto_tb(s
, 0, dest
);
3391 s
->is_jmp
= DISAS_TB_JUMP
;
3395 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3398 tcg_gen_sari_i32(t0
, t0
, 16);
3402 tcg_gen_sari_i32(t1
, t1
, 16);
3405 tcg_gen_mul_i32(t0
, t0
, t1
);
3408 /* Return the mask of PSR bits set by a MSR instruction. */
3409 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3413 if (flags
& (1 << 0))
3415 if (flags
& (1 << 1))
3417 if (flags
& (1 << 2))
3419 if (flags
& (1 << 3))
3422 /* Mask out undefined bits. */
3423 mask
&= ~CPSR_RESERVED
;
3424 if (!arm_feature(env
, ARM_FEATURE_V6
))
3425 mask
&= ~(CPSR_E
| CPSR_GE
);
3426 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3428 /* Mask out execution state bits. */
3431 /* Mask out privileged bits. */
3437 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3438 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3442 /* ??? This is also undefined in system mode. */
3446 tmp
= load_cpu_field(spsr
);
3447 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3448 tcg_gen_andi_i32(t0
, t0
, mask
);
3449 tcg_gen_or_i32(tmp
, tmp
, t0
);
3450 store_cpu_field(tmp
, spsr
);
3452 gen_set_cpsr(t0
, mask
);
3454 tcg_temp_free_i32(t0
);
3459 /* Returns nonzero if access to the PSR is not permitted. */
3460 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3463 tmp
= tcg_temp_new_i32();
3464 tcg_gen_movi_i32(tmp
, val
);
3465 return gen_set_psr(s
, mask
, spsr
, tmp
);
3468 /* Generate an old-style exception return. Marks pc as dead. */
3469 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3472 store_reg(s
, 15, pc
);
3473 tmp
= load_cpu_field(spsr
);
3474 gen_set_cpsr(tmp
, 0xffffffff);
3475 tcg_temp_free_i32(tmp
);
3476 s
->is_jmp
= DISAS_UPDATE
;
3479 /* Generate a v6 exception return. Marks both values as dead. */
3480 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3482 gen_set_cpsr(cpsr
, 0xffffffff);
3483 tcg_temp_free_i32(cpsr
);
3484 store_reg(s
, 15, pc
);
3485 s
->is_jmp
= DISAS_UPDATE
;
3489 gen_set_condexec (DisasContext
*s
)
3491 if (s
->condexec_mask
) {
3492 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3493 TCGv tmp
= tcg_temp_new_i32();
3494 tcg_gen_movi_i32(tmp
, val
);
3495 store_cpu_field(tmp
, condexec_bits
);
3499 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3501 gen_set_condexec(s
);
3502 gen_set_pc_im(s
->pc
- offset
);
3503 gen_exception(excp
);
3504 s
->is_jmp
= DISAS_JUMP
;
3507 static void gen_nop_hint(DisasContext
*s
, int val
)
3511 gen_set_pc_im(s
->pc
);
3512 s
->is_jmp
= DISAS_WFI
;
3516 /* TODO: Implement SEV and WFE. May help SMP performance. */
3522 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3524 static inline int gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3527 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3528 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3529 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3535 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3538 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3539 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3540 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3545 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3546 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3547 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3548 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3549 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3551 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3552 switch ((size << 1) | u) { \
3554 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3557 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3560 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3563 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3566 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3569 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3571 default: return 1; \
3574 #define GEN_NEON_INTEGER_OP(name) do { \
3575 switch ((size << 1) | u) { \
3577 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3580 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3583 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3586 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3589 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3592 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3594 default: return 1; \
3597 static TCGv
neon_load_scratch(int scratch
)
3599 TCGv tmp
= tcg_temp_new_i32();
3600 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3604 static void neon_store_scratch(int scratch
, TCGv var
)
3606 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3607 tcg_temp_free_i32(var
);
3610 static inline TCGv
neon_get_scalar(int size
, int reg
)
3614 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3616 gen_neon_dup_high16(tmp
);
3618 gen_neon_dup_low16(tmp
);
3621 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3626 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3629 if (size
== 3 || (!q
&& size
== 2)) {
3632 tmp
= tcg_const_i32(rd
);
3633 tmp2
= tcg_const_i32(rm
);
3637 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
3640 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
3643 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
3651 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
3654 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
3660 tcg_temp_free_i32(tmp
);
3661 tcg_temp_free_i32(tmp2
);
3665 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3668 if (size
== 3 || (!q
&& size
== 2)) {
3671 tmp
= tcg_const_i32(rd
);
3672 tmp2
= tcg_const_i32(rm
);
3676 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
3679 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
3682 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
3690 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
3693 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
3699 tcg_temp_free_i32(tmp
);
3700 tcg_temp_free_i32(tmp2
);
3704 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3708 rd
= tcg_temp_new_i32();
3709 tmp
= tcg_temp_new_i32();
3711 tcg_gen_shli_i32(rd
, t0
, 8);
3712 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3713 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3714 tcg_gen_or_i32(rd
, rd
, tmp
);
3716 tcg_gen_shri_i32(t1
, t1
, 8);
3717 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3718 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3719 tcg_gen_or_i32(t1
, t1
, tmp
);
3720 tcg_gen_mov_i32(t0
, rd
);
3722 tcg_temp_free_i32(tmp
);
3723 tcg_temp_free_i32(rd
);
3726 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3730 rd
= tcg_temp_new_i32();
3731 tmp
= tcg_temp_new_i32();
3733 tcg_gen_shli_i32(rd
, t0
, 16);
3734 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3735 tcg_gen_or_i32(rd
, rd
, tmp
);
3736 tcg_gen_shri_i32(t1
, t1
, 16);
3737 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3738 tcg_gen_or_i32(t1
, t1
, tmp
);
3739 tcg_gen_mov_i32(t0
, rd
);
3741 tcg_temp_free_i32(tmp
);
3742 tcg_temp_free_i32(rd
);
3750 } neon_ls_element_type
[11] = {
3764 /* Translate a NEON load/store element instruction. Return nonzero if the
3765 instruction is invalid. */
3766 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3785 if (!s
->vfp_enabled
)
3787 VFP_DREG_D(rd
, insn
);
3788 rn
= (insn
>> 16) & 0xf;
3790 load
= (insn
& (1 << 21)) != 0;
3791 addr
= tcg_temp_new_i32();
3792 if ((insn
& (1 << 23)) == 0) {
3793 /* Load store all elements. */
3794 op
= (insn
>> 8) & 0xf;
3795 size
= (insn
>> 6) & 3;
3798 nregs
= neon_ls_element_type
[op
].nregs
;
3799 interleave
= neon_ls_element_type
[op
].interleave
;
3800 spacing
= neon_ls_element_type
[op
].spacing
;
3801 if (size
== 3 && (interleave
| spacing
) != 1)
3803 load_reg_var(s
, addr
, rn
);
3804 stride
= (1 << size
) * interleave
;
3805 for (reg
= 0; reg
< nregs
; reg
++) {
3806 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3807 load_reg_var(s
, addr
, rn
);
3808 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3809 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3810 load_reg_var(s
, addr
, rn
);
3811 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3815 tmp64
= gen_ld64(addr
, IS_USER(s
));
3816 neon_store_reg64(tmp64
, rd
);
3817 tcg_temp_free_i64(tmp64
);
3819 tmp64
= tcg_temp_new_i64();
3820 neon_load_reg64(tmp64
, rd
);
3821 gen_st64(tmp64
, addr
, IS_USER(s
));
3823 tcg_gen_addi_i32(addr
, addr
, stride
);
3825 for (pass
= 0; pass
< 2; pass
++) {
3828 tmp
= gen_ld32(addr
, IS_USER(s
));
3829 neon_store_reg(rd
, pass
, tmp
);
3831 tmp
= neon_load_reg(rd
, pass
);
3832 gen_st32(tmp
, addr
, IS_USER(s
));
3834 tcg_gen_addi_i32(addr
, addr
, stride
);
3835 } else if (size
== 1) {
3837 tmp
= gen_ld16u(addr
, IS_USER(s
));
3838 tcg_gen_addi_i32(addr
, addr
, stride
);
3839 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3840 tcg_gen_addi_i32(addr
, addr
, stride
);
3841 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3842 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3843 tcg_temp_free_i32(tmp2
);
3844 neon_store_reg(rd
, pass
, tmp
);
3846 tmp
= neon_load_reg(rd
, pass
);
3847 tmp2
= tcg_temp_new_i32();
3848 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3849 gen_st16(tmp
, addr
, IS_USER(s
));
3850 tcg_gen_addi_i32(addr
, addr
, stride
);
3851 gen_st16(tmp2
, addr
, IS_USER(s
));
3852 tcg_gen_addi_i32(addr
, addr
, stride
);
3854 } else /* size == 0 */ {
3857 for (n
= 0; n
< 4; n
++) {
3858 tmp
= gen_ld8u(addr
, IS_USER(s
));
3859 tcg_gen_addi_i32(addr
, addr
, stride
);
3863 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
3864 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3865 tcg_temp_free_i32(tmp
);
3868 neon_store_reg(rd
, pass
, tmp2
);
3870 tmp2
= neon_load_reg(rd
, pass
);
3871 for (n
= 0; n
< 4; n
++) {
3872 tmp
= tcg_temp_new_i32();
3874 tcg_gen_mov_i32(tmp
, tmp2
);
3876 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3878 gen_st8(tmp
, addr
, IS_USER(s
));
3879 tcg_gen_addi_i32(addr
, addr
, stride
);
3881 tcg_temp_free_i32(tmp2
);
3890 size
= (insn
>> 10) & 3;
3892 /* Load single element to all lanes. */
3895 size
= (insn
>> 6) & 3;
3896 nregs
= ((insn
>> 8) & 3) + 1;
3897 stride
= (insn
& (1 << 5)) ? 2 : 1;
3898 load_reg_var(s
, addr
, rn
);
3899 for (reg
= 0; reg
< nregs
; reg
++) {
3902 tmp
= gen_ld8u(addr
, IS_USER(s
));
3903 gen_neon_dup_u8(tmp
, 0);
3906 tmp
= gen_ld16u(addr
, IS_USER(s
));
3907 gen_neon_dup_low16(tmp
);
3910 tmp
= gen_ld32(addr
, IS_USER(s
));
3914 default: /* Avoid compiler warnings. */
3917 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3918 tmp2
= tcg_temp_new_i32();
3919 tcg_gen_mov_i32(tmp2
, tmp
);
3920 neon_store_reg(rd
, 0, tmp2
);
3921 neon_store_reg(rd
, 1, tmp
);
3924 stride
= (1 << size
) * nregs
;
3926 /* Single element. */
3927 pass
= (insn
>> 7) & 1;
3930 shift
= ((insn
>> 5) & 3) * 8;
3934 shift
= ((insn
>> 6) & 1) * 16;
3935 stride
= (insn
& (1 << 5)) ? 2 : 1;
3939 stride
= (insn
& (1 << 6)) ? 2 : 1;
3944 nregs
= ((insn
>> 8) & 3) + 1;
3945 load_reg_var(s
, addr
, rn
);
3946 for (reg
= 0; reg
< nregs
; reg
++) {
3950 tmp
= gen_ld8u(addr
, IS_USER(s
));
3953 tmp
= gen_ld16u(addr
, IS_USER(s
));
3956 tmp
= gen_ld32(addr
, IS_USER(s
));
3958 default: /* Avoid compiler warnings. */
3962 tmp2
= neon_load_reg(rd
, pass
);
3963 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
3964 tcg_temp_free_i32(tmp2
);
3966 neon_store_reg(rd
, pass
, tmp
);
3967 } else { /* Store */
3968 tmp
= neon_load_reg(rd
, pass
);
3970 tcg_gen_shri_i32(tmp
, tmp
, shift
);
3973 gen_st8(tmp
, addr
, IS_USER(s
));
3976 gen_st16(tmp
, addr
, IS_USER(s
));
3979 gen_st32(tmp
, addr
, IS_USER(s
));
3984 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3986 stride
= nregs
* (1 << size
);
3989 tcg_temp_free_i32(addr
);
3993 base
= load_reg(s
, rn
);
3995 tcg_gen_addi_i32(base
, base
, stride
);
3998 index
= load_reg(s
, rm
);
3999 tcg_gen_add_i32(base
, base
, index
);
4000 tcg_temp_free_i32(index
);
4002 store_reg(s
, rn
, base
);
4007 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4008 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4010 tcg_gen_and_i32(t
, t
, c
);
4011 tcg_gen_andc_i32(f
, f
, c
);
4012 tcg_gen_or_i32(dest
, t
, f
);
4015 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4018 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4019 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4020 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4025 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4028 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4029 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4030 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4035 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4038 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4039 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4040 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4045 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4048 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4049 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4050 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4055 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4061 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4062 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4067 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4068 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4075 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4076 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4081 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4082 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4089 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4093 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4094 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4095 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4100 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4101 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4102 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4106 tcg_temp_free_i32(src
);
4109 static inline void gen_neon_addl(int size
)
4112 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4113 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4114 case 2: tcg_gen_add_i64(CPU_V001
); break;
4119 static inline void gen_neon_subl(int size
)
4122 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4123 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4124 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4129 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4132 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4133 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4134 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4139 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4142 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4143 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4148 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4152 switch ((size
<< 1) | u
) {
4153 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4154 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4155 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4156 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4158 tmp
= gen_muls_i64_i32(a
, b
);
4159 tcg_gen_mov_i64(dest
, tmp
);
4160 tcg_temp_free_i64(tmp
);
4163 tmp
= gen_mulu_i64_i32(a
, b
);
4164 tcg_gen_mov_i64(dest
, tmp
);
4165 tcg_temp_free_i64(tmp
);
4170 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4171 Don't forget to clean them now. */
4173 tcg_temp_free_i32(a
);
4174 tcg_temp_free_i32(b
);
4178 static void gen_neon_narrow_op(int op
, int u
, int size
, TCGv dest
, TCGv_i64 src
)
4182 gen_neon_unarrow_sats(size
, dest
, src
);
4184 gen_neon_narrow(size
, dest
, src
);
4188 gen_neon_narrow_satu(size
, dest
, src
);
4190 gen_neon_narrow_sats(size
, dest
, src
);
4195 /* Translate a NEON data processing instruction. Return nonzero if the
4196 instruction is invalid.
4197 We process data in a mixture of 32-bit and 64-bit chunks.
4198 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4200 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4213 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4216 if (!s
->vfp_enabled
)
4218 q
= (insn
& (1 << 6)) != 0;
4219 u
= (insn
>> 24) & 1;
4220 VFP_DREG_D(rd
, insn
);
4221 VFP_DREG_N(rn
, insn
);
4222 VFP_DREG_M(rm
, insn
);
4223 size
= (insn
>> 20) & 3;
4224 if ((insn
& (1 << 23)) == 0) {
4225 /* Three register same length. */
4226 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4227 if (size
== 3 && (op
== 1 || op
== 5 || op
== 8 || op
== 9
4228 || op
== 10 || op
== 11 || op
== 16)) {
4229 /* 64-bit element instructions. */
4230 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4231 neon_load_reg64(cpu_V0
, rn
+ pass
);
4232 neon_load_reg64(cpu_V1
, rm
+ pass
);
4236 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
4239 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
4245 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
4248 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
4254 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4256 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4261 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4264 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4268 case 10: /* VRSHL */
4270 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4272 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4275 case 11: /* VQRSHL */
4277 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4280 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4286 tcg_gen_sub_i64(CPU_V001
);
4288 tcg_gen_add_i64(CPU_V001
);
4294 neon_store_reg64(cpu_V0
, rd
+ pass
);
4301 case 10: /* VRSHL */
4302 case 11: /* VQRSHL */
4305 /* Shift instruction operands are reversed. */
4312 case 20: /* VPMAX */
4313 case 21: /* VPMIN */
4314 case 23: /* VPADD */
4317 case 26: /* VPADD (float) */
4318 pairwise
= (u
&& size
< 2);
4320 case 30: /* VPMIN/VPMAX (float) */
4328 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4337 tmp
= neon_load_reg(rn
, n
);
4338 tmp2
= neon_load_reg(rn
, n
+ 1);
4340 tmp
= neon_load_reg(rm
, n
);
4341 tmp2
= neon_load_reg(rm
, n
+ 1);
4345 tmp
= neon_load_reg(rn
, pass
);
4346 tmp2
= neon_load_reg(rm
, pass
);
4350 GEN_NEON_INTEGER_OP(hadd
);
4353 GEN_NEON_INTEGER_OP_ENV(qadd
);
4355 case 2: /* VRHADD */
4356 GEN_NEON_INTEGER_OP(rhadd
);
4358 case 3: /* Logic ops. */
4359 switch ((u
<< 2) | size
) {
4361 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4364 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4367 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4370 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4373 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4376 tmp3
= neon_load_reg(rd
, pass
);
4377 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4378 tcg_temp_free_i32(tmp3
);
4381 tmp3
= neon_load_reg(rd
, pass
);
4382 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4383 tcg_temp_free_i32(tmp3
);
4386 tmp3
= neon_load_reg(rd
, pass
);
4387 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4388 tcg_temp_free_i32(tmp3
);
4393 GEN_NEON_INTEGER_OP(hsub
);
4396 GEN_NEON_INTEGER_OP_ENV(qsub
);
4399 GEN_NEON_INTEGER_OP(cgt
);
4402 GEN_NEON_INTEGER_OP(cge
);
4405 GEN_NEON_INTEGER_OP(shl
);
4408 GEN_NEON_INTEGER_OP_ENV(qshl
);
4410 case 10: /* VRSHL */
4411 GEN_NEON_INTEGER_OP(rshl
);
4413 case 11: /* VQRSHL */
4414 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4417 GEN_NEON_INTEGER_OP(max
);
4420 GEN_NEON_INTEGER_OP(min
);
4423 GEN_NEON_INTEGER_OP(abd
);
4426 GEN_NEON_INTEGER_OP(abd
);
4427 tcg_temp_free_i32(tmp2
);
4428 tmp2
= neon_load_reg(rd
, pass
);
4429 gen_neon_add(size
, tmp
, tmp2
);
4432 if (!u
) { /* VADD */
4433 if (gen_neon_add(size
, tmp
, tmp2
))
4437 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4438 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4439 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4445 if (!u
) { /* VTST */
4447 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4448 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4449 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4454 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4455 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4456 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4461 case 18: /* Multiply. */
4463 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4464 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4465 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4468 tcg_temp_free_i32(tmp2
);
4469 tmp2
= neon_load_reg(rd
, pass
);
4471 gen_neon_rsb(size
, tmp
, tmp2
);
4473 gen_neon_add(size
, tmp
, tmp2
);
4477 if (u
) { /* polynomial */
4478 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4479 } else { /* Integer */
4481 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4482 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4483 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4488 case 20: /* VPMAX */
4489 GEN_NEON_INTEGER_OP(pmax
);
4491 case 21: /* VPMIN */
4492 GEN_NEON_INTEGER_OP(pmin
);
4494 case 22: /* Hultiply high. */
4495 if (!u
) { /* VQDMULH */
4497 case 1: gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
); break;
4498 case 2: gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
); break;
4501 } else { /* VQRDHMUL */
4503 case 1: gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
); break;
4504 case 2: gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
); break;
4509 case 23: /* VPADD */
4513 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4514 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4515 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4519 case 26: /* Floating point arithnetic. */
4520 switch ((u
<< 2) | size
) {
4522 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4525 gen_helper_neon_sub_f32(tmp
, tmp
, tmp2
);
4528 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4531 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
);
4537 case 27: /* Float multiply. */
4538 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
4540 tcg_temp_free_i32(tmp2
);
4541 tmp2
= neon_load_reg(rd
, pass
);
4543 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4545 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
4549 case 28: /* Float compare. */
4551 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
4554 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
4556 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
4559 case 29: /* Float compare absolute. */
4563 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
);
4565 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
);
4567 case 30: /* Float min/max. */
4569 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
);
4571 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
);
4575 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4577 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4582 tcg_temp_free_i32(tmp2
);
4584 /* Save the result. For elementwise operations we can put it
4585 straight into the destination register. For pairwise operations
4586 we have to be careful to avoid clobbering the source operands. */
4587 if (pairwise
&& rd
== rm
) {
4588 neon_store_scratch(pass
, tmp
);
4590 neon_store_reg(rd
, pass
, tmp
);
4594 if (pairwise
&& rd
== rm
) {
4595 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4596 tmp
= neon_load_scratch(pass
);
4597 neon_store_reg(rd
, pass
, tmp
);
4600 /* End of 3 register same size operations. */
4601 } else if (insn
& (1 << 4)) {
4602 if ((insn
& 0x00380080) != 0) {
4603 /* Two registers and shift. */
4604 op
= (insn
>> 8) & 0xf;
4605 if (insn
& (1 << 7)) {
4610 while ((insn
& (1 << (size
+ 19))) == 0)
4613 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4614 /* To avoid excessive dumplication of ops we implement shift
4615 by immediate using the variable shift operations. */
4617 /* Shift by immediate:
4618 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4619 /* Right shifts are encoded as N - shift, where N is the
4620 element size in bits. */
4622 shift
= shift
- (1 << (size
+ 3));
4630 imm
= (uint8_t) shift
;
4635 imm
= (uint16_t) shift
;
4646 for (pass
= 0; pass
< count
; pass
++) {
4648 neon_load_reg64(cpu_V0
, rm
+ pass
);
4649 tcg_gen_movi_i64(cpu_V1
, imm
);
4654 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4656 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4661 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4663 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4668 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4670 case 5: /* VSHL, VSLI */
4671 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4673 case 6: /* VQSHLU */
4675 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
4683 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4686 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4691 if (op
== 1 || op
== 3) {
4693 neon_load_reg64(cpu_V1
, rd
+ pass
);
4694 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4695 } else if (op
== 4 || (op
== 5 && u
)) {
4697 neon_load_reg64(cpu_V1
, rd
+ pass
);
4699 if (shift
< -63 || shift
> 63) {
4703 mask
= 0xffffffffffffffffull
>> -shift
;
4705 mask
= 0xffffffffffffffffull
<< shift
;
4708 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
4709 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4711 neon_store_reg64(cpu_V0
, rd
+ pass
);
4712 } else { /* size < 3 */
4713 /* Operands in T0 and T1. */
4714 tmp
= neon_load_reg(rm
, pass
);
4715 tmp2
= tcg_temp_new_i32();
4716 tcg_gen_movi_i32(tmp2
, imm
);
4720 GEN_NEON_INTEGER_OP(shl
);
4724 GEN_NEON_INTEGER_OP(rshl
);
4729 GEN_NEON_INTEGER_OP(shl
);
4731 case 5: /* VSHL, VSLI */
4733 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
4734 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
4735 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
4739 case 6: /* VQSHLU */
4745 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
4749 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
4753 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
4761 GEN_NEON_INTEGER_OP_ENV(qshl
);
4764 tcg_temp_free_i32(tmp2
);
4766 if (op
== 1 || op
== 3) {
4768 tmp2
= neon_load_reg(rd
, pass
);
4769 gen_neon_add(size
, tmp
, tmp2
);
4770 tcg_temp_free_i32(tmp2
);
4771 } else if (op
== 4 || (op
== 5 && u
)) {
4776 mask
= 0xff >> -shift
;
4778 mask
= (uint8_t)(0xff << shift
);
4784 mask
= 0xffff >> -shift
;
4786 mask
= (uint16_t)(0xffff << shift
);
4790 if (shift
< -31 || shift
> 31) {
4794 mask
= 0xffffffffu
>> -shift
;
4796 mask
= 0xffffffffu
<< shift
;
4802 tmp2
= neon_load_reg(rd
, pass
);
4803 tcg_gen_andi_i32(tmp
, tmp
, mask
);
4804 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
4805 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4806 tcg_temp_free_i32(tmp2
);
4808 neon_store_reg(rd
, pass
, tmp
);
4811 } else if (op
< 10) {
4812 /* Shift by immediate and narrow:
4813 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4814 int input_unsigned
= (op
== 8) ? !u
: u
;
4816 shift
= shift
- (1 << (size
+ 3));
4819 tmp64
= tcg_const_i64(shift
);
4820 neon_load_reg64(cpu_V0
, rm
);
4821 neon_load_reg64(cpu_V1
, rm
+ 1);
4822 for (pass
= 0; pass
< 2; pass
++) {
4830 if (input_unsigned
) {
4831 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
4833 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
4836 if (input_unsigned
) {
4837 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
4839 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
4842 tmp
= tcg_temp_new_i32();
4843 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
4844 neon_store_reg(rd
, pass
, tmp
);
4846 tcg_temp_free_i64(tmp64
);
4849 imm
= (uint16_t)shift
;
4853 imm
= (uint32_t)shift
;
4855 tmp2
= tcg_const_i32(imm
);
4856 tmp4
= neon_load_reg(rm
+ 1, 0);
4857 tmp5
= neon_load_reg(rm
+ 1, 1);
4858 for (pass
= 0; pass
< 2; pass
++) {
4860 tmp
= neon_load_reg(rm
, 0);
4864 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
4867 tmp3
= neon_load_reg(rm
, 1);
4871 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
4873 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
4874 tcg_temp_free_i32(tmp
);
4875 tcg_temp_free_i32(tmp3
);
4876 tmp
= tcg_temp_new_i32();
4877 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
4878 neon_store_reg(rd
, pass
, tmp
);
4880 tcg_temp_free_i32(tmp2
);
4882 } else if (op
== 10) {
4886 tmp
= neon_load_reg(rm
, 0);
4887 tmp2
= neon_load_reg(rm
, 1);
4888 for (pass
= 0; pass
< 2; pass
++) {
4892 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4895 /* The shift is less than the width of the source
4896 type, so we can just shift the whole register. */
4897 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
4898 /* Widen the result of shift: we need to clear
4899 * the potential overflow bits resulting from
4900 * left bits of the narrow input appearing as
4901 * right bits of left the neighbour narrow
4903 if (size
< 2 || !u
) {
4906 imm
= (0xffu
>> (8 - shift
));
4908 } else if (size
== 1) {
4909 imm
= 0xffff >> (16 - shift
);
4912 imm
= 0xffffffff >> (32 - shift
);
4915 imm64
= imm
| (((uint64_t)imm
) << 32);
4919 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
4922 neon_store_reg64(cpu_V0
, rd
+ pass
);
4924 } else if (op
>= 14) {
4925 /* VCVT fixed-point. */
4926 /* We have already masked out the must-be-1 top bit of imm6,
4927 * hence this 32-shift where the ARM ARM has 64-imm6.
4930 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4931 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
4934 gen_vfp_ulto(0, shift
);
4936 gen_vfp_slto(0, shift
);
4939 gen_vfp_toul(0, shift
);
4941 gen_vfp_tosl(0, shift
);
4943 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
4948 } else { /* (insn & 0x00380080) == 0 */
4951 op
= (insn
>> 8) & 0xf;
4952 /* One register and immediate. */
4953 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
4954 invert
= (insn
& (1 << 5)) != 0;
4972 imm
= (imm
<< 8) | (imm
<< 24);
4975 imm
= (imm
<< 8) | 0xff;
4978 imm
= (imm
<< 16) | 0xffff;
4981 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
4986 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
4987 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
4993 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4994 if (op
& 1 && op
< 12) {
4995 tmp
= neon_load_reg(rd
, pass
);
4997 /* The immediate value has already been inverted, so
4999 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5001 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5005 tmp
= tcg_temp_new_i32();
5006 if (op
== 14 && invert
) {
5009 for (n
= 0; n
< 4; n
++) {
5010 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5011 val
|= 0xff << (n
* 8);
5013 tcg_gen_movi_i32(tmp
, val
);
5015 tcg_gen_movi_i32(tmp
, imm
);
5018 neon_store_reg(rd
, pass
, tmp
);
5021 } else { /* (insn & 0x00800010 == 0x00800000) */
5023 op
= (insn
>> 8) & 0xf;
5024 if ((insn
& (1 << 6)) == 0) {
5025 /* Three registers of different lengths. */
5029 /* prewiden, src1_wide, src2_wide */
5030 static const int neon_3reg_wide
[16][3] = {
5031 {1, 0, 0}, /* VADDL */
5032 {1, 1, 0}, /* VADDW */
5033 {1, 0, 0}, /* VSUBL */
5034 {1, 1, 0}, /* VSUBW */
5035 {0, 1, 1}, /* VADDHN */
5036 {0, 0, 0}, /* VABAL */
5037 {0, 1, 1}, /* VSUBHN */
5038 {0, 0, 0}, /* VABDL */
5039 {0, 0, 0}, /* VMLAL */
5040 {0, 0, 0}, /* VQDMLAL */
5041 {0, 0, 0}, /* VMLSL */
5042 {0, 0, 0}, /* VQDMLSL */
5043 {0, 0, 0}, /* Integer VMULL */
5044 {0, 0, 0}, /* VQDMULL */
5045 {0, 0, 0} /* Polynomial VMULL */
5048 prewiden
= neon_3reg_wide
[op
][0];
5049 src1_wide
= neon_3reg_wide
[op
][1];
5050 src2_wide
= neon_3reg_wide
[op
][2];
5052 if (size
== 0 && (op
== 9 || op
== 11 || op
== 13))
5055 /* Avoid overlapping operands. Wide source operands are
5056 always aligned so will never overlap with wide
5057 destinations in problematic ways. */
5058 if (rd
== rm
&& !src2_wide
) {
5059 tmp
= neon_load_reg(rm
, 1);
5060 neon_store_scratch(2, tmp
);
5061 } else if (rd
== rn
&& !src1_wide
) {
5062 tmp
= neon_load_reg(rn
, 1);
5063 neon_store_scratch(2, tmp
);
5066 for (pass
= 0; pass
< 2; pass
++) {
5068 neon_load_reg64(cpu_V0
, rn
+ pass
);
5071 if (pass
== 1 && rd
== rn
) {
5072 tmp
= neon_load_scratch(2);
5074 tmp
= neon_load_reg(rn
, pass
);
5077 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5081 neon_load_reg64(cpu_V1
, rm
+ pass
);
5084 if (pass
== 1 && rd
== rm
) {
5085 tmp2
= neon_load_scratch(2);
5087 tmp2
= neon_load_reg(rm
, pass
);
5090 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5094 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5095 gen_neon_addl(size
);
5097 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5098 gen_neon_subl(size
);
5100 case 5: case 7: /* VABAL, VABDL */
5101 switch ((size
<< 1) | u
) {
5103 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5106 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5109 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5112 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5115 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5118 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5122 tcg_temp_free_i32(tmp2
);
5123 tcg_temp_free_i32(tmp
);
5125 case 8: case 9: case 10: case 11: case 12: case 13:
5126 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5127 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5129 case 14: /* Polynomial VMULL */
5130 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5131 tcg_temp_free_i32(tmp2
);
5132 tcg_temp_free_i32(tmp
);
5134 default: /* 15 is RESERVED. */
5139 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5140 neon_store_reg64(cpu_V0
, rd
+ pass
);
5141 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5143 neon_load_reg64(cpu_V1
, rd
+ pass
);
5145 case 10: /* VMLSL */
5146 gen_neon_negl(cpu_V0
, size
);
5148 case 5: case 8: /* VABAL, VMLAL */
5149 gen_neon_addl(size
);
5151 case 9: case 11: /* VQDMLAL, VQDMLSL */
5152 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5154 gen_neon_negl(cpu_V0
, size
);
5156 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5161 neon_store_reg64(cpu_V0
, rd
+ pass
);
5162 } else if (op
== 4 || op
== 6) {
5163 /* Narrowing operation. */
5164 tmp
= tcg_temp_new_i32();
5168 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5171 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5174 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5175 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5182 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5185 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5188 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5189 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5190 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5198 neon_store_reg(rd
, 0, tmp3
);
5199 neon_store_reg(rd
, 1, tmp
);
5202 /* Write back the result. */
5203 neon_store_reg64(cpu_V0
, rd
+ pass
);
5207 /* Two registers and a scalar. */
5209 case 0: /* Integer VMLA scalar */
5210 case 1: /* Float VMLA scalar */
5211 case 4: /* Integer VMLS scalar */
5212 case 5: /* Floating point VMLS scalar */
5213 case 8: /* Integer VMUL scalar */
5214 case 9: /* Floating point VMUL scalar */
5215 case 12: /* VQDMULH scalar */
5216 case 13: /* VQRDMULH scalar */
5217 tmp
= neon_get_scalar(size
, rm
);
5218 neon_store_scratch(0, tmp
);
5219 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5220 tmp
= neon_load_scratch(0);
5221 tmp2
= neon_load_reg(rn
, pass
);
5224 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5226 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5228 } else if (op
== 13) {
5230 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5232 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5234 } else if (op
& 1) {
5235 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
5238 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5239 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5240 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5244 tcg_temp_free_i32(tmp2
);
5247 tmp2
= neon_load_reg(rd
, pass
);
5250 gen_neon_add(size
, tmp
, tmp2
);
5253 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
5256 gen_neon_rsb(size
, tmp
, tmp2
);
5259 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
5264 tcg_temp_free_i32(tmp2
);
5266 neon_store_reg(rd
, pass
, tmp
);
5269 case 2: /* VMLAL sclar */
5270 case 3: /* VQDMLAL scalar */
5271 case 6: /* VMLSL scalar */
5272 case 7: /* VQDMLSL scalar */
5273 case 10: /* VMULL scalar */
5274 case 11: /* VQDMULL scalar */
5275 if (size
== 0 && (op
== 3 || op
== 7 || op
== 11))
5278 tmp2
= neon_get_scalar(size
, rm
);
5279 /* We need a copy of tmp2 because gen_neon_mull
5280 * deletes it during pass 0. */
5281 tmp4
= tcg_temp_new_i32();
5282 tcg_gen_mov_i32(tmp4
, tmp2
);
5283 tmp3
= neon_load_reg(rn
, 1);
5285 for (pass
= 0; pass
< 2; pass
++) {
5287 tmp
= neon_load_reg(rn
, 0);
5292 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5294 neon_load_reg64(cpu_V1
, rd
+ pass
);
5298 gen_neon_negl(cpu_V0
, size
);
5301 gen_neon_addl(size
);
5304 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5306 gen_neon_negl(cpu_V0
, size
);
5308 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5314 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5319 neon_store_reg64(cpu_V0
, rd
+ pass
);
5324 default: /* 14 and 15 are RESERVED */
5328 } else { /* size == 3 */
5331 imm
= (insn
>> 8) & 0xf;
5337 neon_load_reg64(cpu_V0
, rn
);
5339 neon_load_reg64(cpu_V1
, rn
+ 1);
5341 } else if (imm
== 8) {
5342 neon_load_reg64(cpu_V0
, rn
+ 1);
5344 neon_load_reg64(cpu_V1
, rm
);
5347 tmp64
= tcg_temp_new_i64();
5349 neon_load_reg64(cpu_V0
, rn
);
5350 neon_load_reg64(tmp64
, rn
+ 1);
5352 neon_load_reg64(cpu_V0
, rn
+ 1);
5353 neon_load_reg64(tmp64
, rm
);
5355 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5356 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5357 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5359 neon_load_reg64(cpu_V1
, rm
);
5361 neon_load_reg64(cpu_V1
, rm
+ 1);
5364 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5365 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5366 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5367 tcg_temp_free_i64(tmp64
);
5370 neon_load_reg64(cpu_V0
, rn
);
5371 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5372 neon_load_reg64(cpu_V1
, rm
);
5373 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5374 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5376 neon_store_reg64(cpu_V0
, rd
);
5378 neon_store_reg64(cpu_V1
, rd
+ 1);
5380 } else if ((insn
& (1 << 11)) == 0) {
5381 /* Two register misc. */
5382 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5383 size
= (insn
>> 18) & 3;
5385 case 0: /* VREV64 */
5388 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5389 tmp
= neon_load_reg(rm
, pass
* 2);
5390 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5392 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5393 case 1: gen_swap_half(tmp
); break;
5394 case 2: /* no-op */ break;
5397 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5399 neon_store_reg(rd
, pass
* 2, tmp2
);
5402 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5403 case 1: gen_swap_half(tmp2
); break;
5406 neon_store_reg(rd
, pass
* 2, tmp2
);
5410 case 4: case 5: /* VPADDL */
5411 case 12: case 13: /* VPADAL */
5414 for (pass
= 0; pass
< q
+ 1; pass
++) {
5415 tmp
= neon_load_reg(rm
, pass
* 2);
5416 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5417 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5418 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5420 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5421 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5422 case 2: tcg_gen_add_i64(CPU_V001
); break;
5427 neon_load_reg64(cpu_V1
, rd
+ pass
);
5428 gen_neon_addl(size
);
5430 neon_store_reg64(cpu_V0
, rd
+ pass
);
5435 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5436 tmp
= neon_load_reg(rm
, n
);
5437 tmp2
= neon_load_reg(rd
, n
+ 1);
5438 neon_store_reg(rm
, n
, tmp2
);
5439 neon_store_reg(rd
, n
+ 1, tmp
);
5446 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
5451 if (gen_neon_zip(rd
, rm
, size
, q
)) {
5455 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5459 for (pass
= 0; pass
< 2; pass
++) {
5460 neon_load_reg64(cpu_V0
, rm
+ pass
);
5461 tmp
= tcg_temp_new_i32();
5462 gen_neon_narrow_op(op
== 36, q
, size
, tmp
, cpu_V0
);
5466 neon_store_reg(rd
, 0, tmp2
);
5467 neon_store_reg(rd
, 1, tmp
);
5471 case 38: /* VSHLL */
5474 tmp
= neon_load_reg(rm
, 0);
5475 tmp2
= neon_load_reg(rm
, 1);
5476 for (pass
= 0; pass
< 2; pass
++) {
5479 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5480 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5481 neon_store_reg64(cpu_V0
, rd
+ pass
);
5484 case 44: /* VCVT.F16.F32 */
5485 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5487 tmp
= tcg_temp_new_i32();
5488 tmp2
= tcg_temp_new_i32();
5489 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5490 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5491 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5492 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5493 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5494 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5495 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5496 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5497 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5498 neon_store_reg(rd
, 0, tmp2
);
5499 tmp2
= tcg_temp_new_i32();
5500 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5501 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5502 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5503 neon_store_reg(rd
, 1, tmp2
);
5504 tcg_temp_free_i32(tmp
);
5506 case 46: /* VCVT.F32.F16 */
5507 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5509 tmp3
= tcg_temp_new_i32();
5510 tmp
= neon_load_reg(rm
, 0);
5511 tmp2
= neon_load_reg(rm
, 1);
5512 tcg_gen_ext16u_i32(tmp3
, tmp
);
5513 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5514 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5515 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5516 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5517 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5518 tcg_temp_free_i32(tmp
);
5519 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5520 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5521 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5522 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5523 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5524 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5525 tcg_temp_free_i32(tmp2
);
5526 tcg_temp_free_i32(tmp3
);
5530 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5531 if (op
== 30 || op
== 31 || op
>= 58) {
5532 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5533 neon_reg_offset(rm
, pass
));
5536 tmp
= neon_load_reg(rm
, pass
);
5539 case 1: /* VREV32 */
5541 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5542 case 1: gen_swap_half(tmp
); break;
5546 case 2: /* VREV16 */
5553 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5554 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5555 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5561 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5562 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5563 case 2: gen_helper_clz(tmp
, tmp
); break;
5570 gen_helper_neon_cnt_u8(tmp
, tmp
);
5575 tcg_gen_not_i32(tmp
, tmp
);
5577 case 14: /* VQABS */
5579 case 0: gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
); break;
5580 case 1: gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
); break;
5581 case 2: gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
); break;
5585 case 15: /* VQNEG */
5587 case 0: gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
); break;
5588 case 1: gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
); break;
5589 case 2: gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
); break;
5593 case 16: case 19: /* VCGT #0, VCLE #0 */
5594 tmp2
= tcg_const_i32(0);
5596 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5597 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5598 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
5601 tcg_temp_free(tmp2
);
5603 tcg_gen_not_i32(tmp
, tmp
);
5605 case 17: case 20: /* VCGE #0, VCLT #0 */
5606 tmp2
= tcg_const_i32(0);
5608 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
5609 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
5610 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
5613 tcg_temp_free(tmp2
);
5615 tcg_gen_not_i32(tmp
, tmp
);
5617 case 18: /* VCEQ #0 */
5618 tmp2
= tcg_const_i32(0);
5620 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5621 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5622 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5625 tcg_temp_free(tmp2
);
5629 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
5630 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
5631 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
5638 tmp2
= tcg_const_i32(0);
5639 gen_neon_rsb(size
, tmp
, tmp2
);
5640 tcg_temp_free(tmp2
);
5642 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5643 tmp2
= tcg_const_i32(0);
5644 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
5645 tcg_temp_free(tmp2
);
5647 tcg_gen_not_i32(tmp
, tmp
);
5649 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5650 tmp2
= tcg_const_i32(0);
5651 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
5652 tcg_temp_free(tmp2
);
5654 tcg_gen_not_i32(tmp
, tmp
);
5656 case 26: /* Float VCEQ #0 */
5657 tmp2
= tcg_const_i32(0);
5658 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
5659 tcg_temp_free(tmp2
);
5661 case 30: /* Float VABS */
5664 case 31: /* Float VNEG */
5668 tmp2
= neon_load_reg(rd
, pass
);
5669 neon_store_reg(rm
, pass
, tmp2
);
5672 tmp2
= neon_load_reg(rd
, pass
);
5674 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
5675 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
5679 neon_store_reg(rm
, pass
, tmp2
);
5681 case 56: /* Integer VRECPE */
5682 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
5684 case 57: /* Integer VRSQRTE */
5685 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
5687 case 58: /* Float VRECPE */
5688 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5690 case 59: /* Float VRSQRTE */
5691 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5693 case 60: /* VCVT.F32.S32 */
5696 case 61: /* VCVT.F32.U32 */
5699 case 62: /* VCVT.S32.F32 */
5702 case 63: /* VCVT.U32.F32 */
5706 /* Reserved: 21, 29, 39-56 */
5709 if (op
== 30 || op
== 31 || op
>= 58) {
5710 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
5711 neon_reg_offset(rd
, pass
));
5713 neon_store_reg(rd
, pass
, tmp
);
5718 } else if ((insn
& (1 << 10)) == 0) {
5720 n
= ((insn
>> 5) & 0x18) + 8;
5721 if (insn
& (1 << 6)) {
5722 tmp
= neon_load_reg(rd
, 0);
5724 tmp
= tcg_temp_new_i32();
5725 tcg_gen_movi_i32(tmp
, 0);
5727 tmp2
= neon_load_reg(rm
, 0);
5728 tmp4
= tcg_const_i32(rn
);
5729 tmp5
= tcg_const_i32(n
);
5730 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
5731 tcg_temp_free_i32(tmp
);
5732 if (insn
& (1 << 6)) {
5733 tmp
= neon_load_reg(rd
, 1);
5735 tmp
= tcg_temp_new_i32();
5736 tcg_gen_movi_i32(tmp
, 0);
5738 tmp3
= neon_load_reg(rm
, 1);
5739 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
5740 tcg_temp_free_i32(tmp5
);
5741 tcg_temp_free_i32(tmp4
);
5742 neon_store_reg(rd
, 0, tmp2
);
5743 neon_store_reg(rd
, 1, tmp3
);
5744 tcg_temp_free_i32(tmp
);
5745 } else if ((insn
& 0x380) == 0) {
5747 if (insn
& (1 << 19)) {
5748 tmp
= neon_load_reg(rm
, 1);
5750 tmp
= neon_load_reg(rm
, 0);
5752 if (insn
& (1 << 16)) {
5753 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
5754 } else if (insn
& (1 << 17)) {
5755 if ((insn
>> 18) & 1)
5756 gen_neon_dup_high16(tmp
);
5758 gen_neon_dup_low16(tmp
);
5760 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5761 tmp2
= tcg_temp_new_i32();
5762 tcg_gen_mov_i32(tmp2
, tmp
);
5763 neon_store_reg(rd
, pass
, tmp2
);
5765 tcg_temp_free_i32(tmp
);
5774 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5776 int crn
= (insn
>> 16) & 0xf;
5777 int crm
= insn
& 0xf;
5778 int op1
= (insn
>> 21) & 7;
5779 int op2
= (insn
>> 5) & 7;
5780 int rt
= (insn
>> 12) & 0xf;
5783 /* Minimal set of debug registers, since we don't support debug */
5784 if (op1
== 0 && crn
== 0 && op2
== 0) {
5787 /* DBGDIDR: just RAZ. In particular this means the
5788 * "debug architecture version" bits will read as
5789 * a reserved value, which should cause Linux to
5790 * not try to use the debug hardware.
5792 tmp
= tcg_const_i32(0);
5793 store_reg(s
, rt
, tmp
);
5797 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
5798 * don't implement memory mapped debug components
5800 if (ENABLE_ARCH_7
) {
5801 tmp
= tcg_const_i32(0);
5802 store_reg(s
, rt
, tmp
);
5811 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5812 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5816 tmp
= load_cpu_field(teecr
);
5817 store_reg(s
, rt
, tmp
);
5820 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5822 if (IS_USER(s
) && (env
->teecr
& 1))
5824 tmp
= load_cpu_field(teehbr
);
5825 store_reg(s
, rt
, tmp
);
5829 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5830 op1
, crn
, crm
, op2
);
5834 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5836 int crn
= (insn
>> 16) & 0xf;
5837 int crm
= insn
& 0xf;
5838 int op1
= (insn
>> 21) & 7;
5839 int op2
= (insn
>> 5) & 7;
5840 int rt
= (insn
>> 12) & 0xf;
5843 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5844 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5848 tmp
= load_reg(s
, rt
);
5849 gen_helper_set_teecr(cpu_env
, tmp
);
5850 tcg_temp_free_i32(tmp
);
5853 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5855 if (IS_USER(s
) && (env
->teecr
& 1))
5857 tmp
= load_reg(s
, rt
);
5858 store_cpu_field(tmp
, teehbr
);
5862 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5863 op1
, crn
, crm
, op2
);
5867 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5871 cpnum
= (insn
>> 8) & 0xf;
5872 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
5873 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
5879 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5880 return disas_iwmmxt_insn(env
, s
, insn
);
5881 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5882 return disas_dsp_insn(env
, s
, insn
);
5887 return disas_vfp_insn (env
, s
, insn
);
5889 /* Coprocessors 7-15 are architecturally reserved by ARM.
5890 Unfortunately Intel decided to ignore this. */
5891 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
5893 if (insn
& (1 << 20))
5894 return disas_cp14_read(env
, s
, insn
);
5896 return disas_cp14_write(env
, s
, insn
);
5898 return disas_cp15_insn (env
, s
, insn
);
5901 /* Unknown coprocessor. See if the board has hooked it. */
5902 return disas_cp_insn (env
, s
, insn
);
5907 /* Store a 64-bit value to a register pair. Clobbers val. */
5908 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
5911 tmp
= tcg_temp_new_i32();
5912 tcg_gen_trunc_i64_i32(tmp
, val
);
5913 store_reg(s
, rlow
, tmp
);
5914 tmp
= tcg_temp_new_i32();
5915 tcg_gen_shri_i64(val
, val
, 32);
5916 tcg_gen_trunc_i64_i32(tmp
, val
);
5917 store_reg(s
, rhigh
, tmp
);
5920 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5921 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
5926 /* Load value and extend to 64 bits. */
5927 tmp
= tcg_temp_new_i64();
5928 tmp2
= load_reg(s
, rlow
);
5929 tcg_gen_extu_i32_i64(tmp
, tmp2
);
5930 tcg_temp_free_i32(tmp2
);
5931 tcg_gen_add_i64(val
, val
, tmp
);
5932 tcg_temp_free_i64(tmp
);
5935 /* load and add a 64-bit value from a register pair. */
5936 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
5942 /* Load 64-bit value rd:rn. */
5943 tmpl
= load_reg(s
, rlow
);
5944 tmph
= load_reg(s
, rhigh
);
5945 tmp
= tcg_temp_new_i64();
5946 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
5947 tcg_temp_free_i32(tmpl
);
5948 tcg_temp_free_i32(tmph
);
5949 tcg_gen_add_i64(val
, val
, tmp
);
5950 tcg_temp_free_i64(tmp
);
5953 /* Set N and Z flags from a 64-bit value. */
5954 static void gen_logicq_cc(TCGv_i64 val
)
5956 TCGv tmp
= tcg_temp_new_i32();
5957 gen_helper_logicq_cc(tmp
, val
);
5959 tcg_temp_free_i32(tmp
);
5962 /* Load/Store exclusive instructions are implemented by remembering
5963 the value/address loaded, and seeing if these are the same
5964 when the store is performed. This should be is sufficient to implement
5965 the architecturally mandated semantics, and avoids having to monitor
5968 In system emulation mode only one CPU will be running at once, so
5969 this sequence is effectively atomic. In user emulation mode we
5970 throw an exception and handle the atomic operation elsewhere. */
5971 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
5972 TCGv addr
, int size
)
5978 tmp
= gen_ld8u(addr
, IS_USER(s
));
5981 tmp
= gen_ld16u(addr
, IS_USER(s
));
5985 tmp
= gen_ld32(addr
, IS_USER(s
));
5990 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
5991 store_reg(s
, rt
, tmp
);
5993 TCGv tmp2
= tcg_temp_new_i32();
5994 tcg_gen_addi_i32(tmp2
, addr
, 4);
5995 tmp
= gen_ld32(tmp2
, IS_USER(s
));
5996 tcg_temp_free_i32(tmp2
);
5997 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
5998 store_reg(s
, rt2
, tmp
);
6000 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6003 static void gen_clrex(DisasContext
*s
)
6005 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6008 #ifdef CONFIG_USER_ONLY
6009 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6010 TCGv addr
, int size
)
6012 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6013 tcg_gen_movi_i32(cpu_exclusive_info
,
6014 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6015 gen_exception_insn(s
, 4, EXCP_STREX
);
6018 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6019 TCGv addr
, int size
)
6025 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6031 fail_label
= gen_new_label();
6032 done_label
= gen_new_label();
6033 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6036 tmp
= gen_ld8u(addr
, IS_USER(s
));
6039 tmp
= gen_ld16u(addr
, IS_USER(s
));
6043 tmp
= gen_ld32(addr
, IS_USER(s
));
6048 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6049 tcg_temp_free_i32(tmp
);
6051 TCGv tmp2
= tcg_temp_new_i32();
6052 tcg_gen_addi_i32(tmp2
, addr
, 4);
6053 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6054 tcg_temp_free_i32(tmp2
);
6055 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6056 tcg_temp_free_i32(tmp
);
6058 tmp
= load_reg(s
, rt
);
6061 gen_st8(tmp
, addr
, IS_USER(s
));
6064 gen_st16(tmp
, addr
, IS_USER(s
));
6068 gen_st32(tmp
, addr
, IS_USER(s
));
6074 tcg_gen_addi_i32(addr
, addr
, 4);
6075 tmp
= load_reg(s
, rt2
);
6076 gen_st32(tmp
, addr
, IS_USER(s
));
6078 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6079 tcg_gen_br(done_label
);
6080 gen_set_label(fail_label
);
6081 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6082 gen_set_label(done_label
);
6083 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6087 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
6089 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6096 insn
= ldl_code(s
->pc
);
6099 /* M variants do not implement ARM mode. */
6104 /* Unconditional instructions. */
6105 if (((insn
>> 25) & 7) == 1) {
6106 /* NEON Data processing. */
6107 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6110 if (disas_neon_data_insn(env
, s
, insn
))
6114 if ((insn
& 0x0f100000) == 0x04000000) {
6115 /* NEON load/store. */
6116 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6119 if (disas_neon_ls_insn(env
, s
, insn
))
6123 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6124 ((insn
& 0x0f30f010) == 0x0710f000)) {
6125 if ((insn
& (1 << 22)) == 0) {
6127 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6131 /* Otherwise PLD; v5TE+ */
6134 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6135 ((insn
& 0x0f70f010) == 0x0650f000)) {
6137 return; /* PLI; V7 */
6139 if (((insn
& 0x0f700000) == 0x04100000) ||
6140 ((insn
& 0x0f700010) == 0x06100000)) {
6141 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6144 return; /* v7MP: Unallocated memory hint: must NOP */
6147 if ((insn
& 0x0ffffdff) == 0x01010000) {
6150 if (insn
& (1 << 9)) {
6151 /* BE8 mode not implemented. */
6155 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6156 switch ((insn
>> 4) & 0xf) {
6165 /* We don't emulate caches so these are a no-op. */
6170 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6176 op1
= (insn
& 0x1f);
6177 addr
= tcg_temp_new_i32();
6178 tmp
= tcg_const_i32(op1
);
6179 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6180 tcg_temp_free_i32(tmp
);
6181 i
= (insn
>> 23) & 3;
6183 case 0: offset
= -4; break; /* DA */
6184 case 1: offset
= 0; break; /* IA */
6185 case 2: offset
= -8; break; /* DB */
6186 case 3: offset
= 4; break; /* IB */
6190 tcg_gen_addi_i32(addr
, addr
, offset
);
6191 tmp
= load_reg(s
, 14);
6192 gen_st32(tmp
, addr
, 0);
6193 tmp
= load_cpu_field(spsr
);
6194 tcg_gen_addi_i32(addr
, addr
, 4);
6195 gen_st32(tmp
, addr
, 0);
6196 if (insn
& (1 << 21)) {
6197 /* Base writeback. */
6199 case 0: offset
= -8; break;
6200 case 1: offset
= 4; break;
6201 case 2: offset
= -4; break;
6202 case 3: offset
= 0; break;
6206 tcg_gen_addi_i32(addr
, addr
, offset
);
6207 tmp
= tcg_const_i32(op1
);
6208 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6209 tcg_temp_free_i32(tmp
);
6210 tcg_temp_free_i32(addr
);
6212 tcg_temp_free_i32(addr
);
6215 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6221 rn
= (insn
>> 16) & 0xf;
6222 addr
= load_reg(s
, rn
);
6223 i
= (insn
>> 23) & 3;
6225 case 0: offset
= -4; break; /* DA */
6226 case 1: offset
= 0; break; /* IA */
6227 case 2: offset
= -8; break; /* DB */
6228 case 3: offset
= 4; break; /* IB */
6232 tcg_gen_addi_i32(addr
, addr
, offset
);
6233 /* Load PC into tmp and CPSR into tmp2. */
6234 tmp
= gen_ld32(addr
, 0);
6235 tcg_gen_addi_i32(addr
, addr
, 4);
6236 tmp2
= gen_ld32(addr
, 0);
6237 if (insn
& (1 << 21)) {
6238 /* Base writeback. */
6240 case 0: offset
= -8; break;
6241 case 1: offset
= 4; break;
6242 case 2: offset
= -4; break;
6243 case 3: offset
= 0; break;
6247 tcg_gen_addi_i32(addr
, addr
, offset
);
6248 store_reg(s
, rn
, addr
);
6250 tcg_temp_free_i32(addr
);
6252 gen_rfe(s
, tmp
, tmp2
);
6254 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6255 /* branch link and change to thumb (blx <offset>) */
6258 val
= (uint32_t)s
->pc
;
6259 tmp
= tcg_temp_new_i32();
6260 tcg_gen_movi_i32(tmp
, val
);
6261 store_reg(s
, 14, tmp
);
6262 /* Sign-extend the 24-bit offset */
6263 offset
= (((int32_t)insn
) << 8) >> 8;
6264 /* offset * 4 + bit24 * 2 + (thumb bit) */
6265 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6266 /* pipeline offset */
6270 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6271 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6272 /* iWMMXt register transfer. */
6273 if (env
->cp15
.c15_cpar
& (1 << 1))
6274 if (!disas_iwmmxt_insn(env
, s
, insn
))
6277 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6278 /* Coprocessor double register transfer. */
6279 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6280 /* Additional coprocessor register transfer. */
6281 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6284 /* cps (privileged) */
6288 if (insn
& (1 << 19)) {
6289 if (insn
& (1 << 8))
6291 if (insn
& (1 << 7))
6293 if (insn
& (1 << 6))
6295 if (insn
& (1 << 18))
6298 if (insn
& (1 << 17)) {
6300 val
|= (insn
& 0x1f);
6303 gen_set_psr_im(s
, mask
, 0, val
);
6310 /* if not always execute, we generate a conditional jump to
6312 s
->condlabel
= gen_new_label();
6313 gen_test_cc(cond
^ 1, s
->condlabel
);
6316 if ((insn
& 0x0f900000) == 0x03000000) {
6317 if ((insn
& (1 << 21)) == 0) {
6319 rd
= (insn
>> 12) & 0xf;
6320 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6321 if ((insn
& (1 << 22)) == 0) {
6323 tmp
= tcg_temp_new_i32();
6324 tcg_gen_movi_i32(tmp
, val
);
6327 tmp
= load_reg(s
, rd
);
6328 tcg_gen_ext16u_i32(tmp
, tmp
);
6329 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6331 store_reg(s
, rd
, tmp
);
6333 if (((insn
>> 12) & 0xf) != 0xf)
6335 if (((insn
>> 16) & 0xf) == 0) {
6336 gen_nop_hint(s
, insn
& 0xff);
6338 /* CPSR = immediate */
6340 shift
= ((insn
>> 8) & 0xf) * 2;
6342 val
= (val
>> shift
) | (val
<< (32 - shift
));
6343 i
= ((insn
& (1 << 22)) != 0);
6344 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6348 } else if ((insn
& 0x0f900000) == 0x01000000
6349 && (insn
& 0x00000090) != 0x00000090) {
6350 /* miscellaneous instructions */
6351 op1
= (insn
>> 21) & 3;
6352 sh
= (insn
>> 4) & 0xf;
6355 case 0x0: /* move program status register */
6358 tmp
= load_reg(s
, rm
);
6359 i
= ((op1
& 2) != 0);
6360 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6364 rd
= (insn
>> 12) & 0xf;
6368 tmp
= load_cpu_field(spsr
);
6370 tmp
= tcg_temp_new_i32();
6371 gen_helper_cpsr_read(tmp
);
6373 store_reg(s
, rd
, tmp
);
6378 /* branch/exchange thumb (bx). */
6379 tmp
= load_reg(s
, rm
);
6381 } else if (op1
== 3) {
6383 rd
= (insn
>> 12) & 0xf;
6384 tmp
= load_reg(s
, rm
);
6385 gen_helper_clz(tmp
, tmp
);
6386 store_reg(s
, rd
, tmp
);
6394 /* Trivial implementation equivalent to bx. */
6395 tmp
= load_reg(s
, rm
);
6405 /* branch link/exchange thumb (blx) */
6406 tmp
= load_reg(s
, rm
);
6407 tmp2
= tcg_temp_new_i32();
6408 tcg_gen_movi_i32(tmp2
, s
->pc
);
6409 store_reg(s
, 14, tmp2
);
6412 case 0x5: /* saturating add/subtract */
6413 rd
= (insn
>> 12) & 0xf;
6414 rn
= (insn
>> 16) & 0xf;
6415 tmp
= load_reg(s
, rm
);
6416 tmp2
= load_reg(s
, rn
);
6418 gen_helper_double_saturate(tmp2
, tmp2
);
6420 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6422 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6423 tcg_temp_free_i32(tmp2
);
6424 store_reg(s
, rd
, tmp
);
6427 /* SMC instruction (op1 == 3)
6428 and undefined instructions (op1 == 0 || op1 == 2)
6434 gen_exception_insn(s
, 4, EXCP_BKPT
);
6436 case 0x8: /* signed multiply */
6440 rs
= (insn
>> 8) & 0xf;
6441 rn
= (insn
>> 12) & 0xf;
6442 rd
= (insn
>> 16) & 0xf;
6444 /* (32 * 16) >> 16 */
6445 tmp
= load_reg(s
, rm
);
6446 tmp2
= load_reg(s
, rs
);
6448 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6451 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6452 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6453 tmp
= tcg_temp_new_i32();
6454 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6455 tcg_temp_free_i64(tmp64
);
6456 if ((sh
& 2) == 0) {
6457 tmp2
= load_reg(s
, rn
);
6458 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6459 tcg_temp_free_i32(tmp2
);
6461 store_reg(s
, rd
, tmp
);
6464 tmp
= load_reg(s
, rm
);
6465 tmp2
= load_reg(s
, rs
);
6466 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6467 tcg_temp_free_i32(tmp2
);
6469 tmp64
= tcg_temp_new_i64();
6470 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6471 tcg_temp_free_i32(tmp
);
6472 gen_addq(s
, tmp64
, rn
, rd
);
6473 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6474 tcg_temp_free_i64(tmp64
);
6477 tmp2
= load_reg(s
, rn
);
6478 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6479 tcg_temp_free_i32(tmp2
);
6481 store_reg(s
, rd
, tmp
);
6488 } else if (((insn
& 0x0e000000) == 0 &&
6489 (insn
& 0x00000090) != 0x90) ||
6490 ((insn
& 0x0e000000) == (1 << 25))) {
6491 int set_cc
, logic_cc
, shiftop
;
6493 op1
= (insn
>> 21) & 0xf;
6494 set_cc
= (insn
>> 20) & 1;
6495 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6497 /* data processing instruction */
6498 if (insn
& (1 << 25)) {
6499 /* immediate operand */
6501 shift
= ((insn
>> 8) & 0xf) * 2;
6503 val
= (val
>> shift
) | (val
<< (32 - shift
));
6505 tmp2
= tcg_temp_new_i32();
6506 tcg_gen_movi_i32(tmp2
, val
);
6507 if (logic_cc
&& shift
) {
6508 gen_set_CF_bit31(tmp2
);
6513 tmp2
= load_reg(s
, rm
);
6514 shiftop
= (insn
>> 5) & 3;
6515 if (!(insn
& (1 << 4))) {
6516 shift
= (insn
>> 7) & 0x1f;
6517 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6519 rs
= (insn
>> 8) & 0xf;
6520 tmp
= load_reg(s
, rs
);
6521 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6524 if (op1
!= 0x0f && op1
!= 0x0d) {
6525 rn
= (insn
>> 16) & 0xf;
6526 tmp
= load_reg(s
, rn
);
6530 rd
= (insn
>> 12) & 0xf;
6533 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6537 store_reg_bx(env
, s
, rd
, tmp
);
6540 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6544 store_reg_bx(env
, s
, rd
, tmp
);
6547 if (set_cc
&& rd
== 15) {
6548 /* SUBS r15, ... is used for exception return. */
6552 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6553 gen_exception_return(s
, tmp
);
6556 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6558 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6560 store_reg_bx(env
, s
, rd
, tmp
);
6565 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
6567 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6569 store_reg_bx(env
, s
, rd
, tmp
);
6573 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6575 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6577 store_reg_bx(env
, s
, rd
, tmp
);
6581 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
6583 gen_add_carry(tmp
, tmp
, tmp2
);
6585 store_reg_bx(env
, s
, rd
, tmp
);
6589 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
6591 gen_sub_carry(tmp
, tmp
, tmp2
);
6593 store_reg_bx(env
, s
, rd
, tmp
);
6597 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
6599 gen_sub_carry(tmp
, tmp2
, tmp
);
6601 store_reg_bx(env
, s
, rd
, tmp
);
6605 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6608 tcg_temp_free_i32(tmp
);
6612 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6615 tcg_temp_free_i32(tmp
);
6619 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6621 tcg_temp_free_i32(tmp
);
6625 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6627 tcg_temp_free_i32(tmp
);
6630 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6634 store_reg_bx(env
, s
, rd
, tmp
);
6637 if (logic_cc
&& rd
== 15) {
6638 /* MOVS r15, ... is used for exception return. */
6642 gen_exception_return(s
, tmp2
);
6647 store_reg_bx(env
, s
, rd
, tmp2
);
6651 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
6655 store_reg_bx(env
, s
, rd
, tmp
);
6659 tcg_gen_not_i32(tmp2
, tmp2
);
6663 store_reg_bx(env
, s
, rd
, tmp2
);
6666 if (op1
!= 0x0f && op1
!= 0x0d) {
6667 tcg_temp_free_i32(tmp2
);
6670 /* other instructions */
6671 op1
= (insn
>> 24) & 0xf;
6675 /* multiplies, extra load/stores */
6676 sh
= (insn
>> 5) & 3;
6679 rd
= (insn
>> 16) & 0xf;
6680 rn
= (insn
>> 12) & 0xf;
6681 rs
= (insn
>> 8) & 0xf;
6683 op1
= (insn
>> 20) & 0xf;
6685 case 0: case 1: case 2: case 3: case 6:
6687 tmp
= load_reg(s
, rs
);
6688 tmp2
= load_reg(s
, rm
);
6689 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
6690 tcg_temp_free_i32(tmp2
);
6691 if (insn
& (1 << 22)) {
6692 /* Subtract (mls) */
6694 tmp2
= load_reg(s
, rn
);
6695 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6696 tcg_temp_free_i32(tmp2
);
6697 } else if (insn
& (1 << 21)) {
6699 tmp2
= load_reg(s
, rn
);
6700 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6701 tcg_temp_free_i32(tmp2
);
6703 if (insn
& (1 << 20))
6705 store_reg(s
, rd
, tmp
);
6708 /* 64 bit mul double accumulate (UMAAL) */
6710 tmp
= load_reg(s
, rs
);
6711 tmp2
= load_reg(s
, rm
);
6712 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6713 gen_addq_lo(s
, tmp64
, rn
);
6714 gen_addq_lo(s
, tmp64
, rd
);
6715 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6716 tcg_temp_free_i64(tmp64
);
6718 case 8: case 9: case 10: case 11:
6719 case 12: case 13: case 14: case 15:
6720 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6721 tmp
= load_reg(s
, rs
);
6722 tmp2
= load_reg(s
, rm
);
6723 if (insn
& (1 << 22)) {
6724 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6726 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6728 if (insn
& (1 << 21)) { /* mult accumulate */
6729 gen_addq(s
, tmp64
, rn
, rd
);
6731 if (insn
& (1 << 20)) {
6732 gen_logicq_cc(tmp64
);
6734 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6735 tcg_temp_free_i64(tmp64
);
6741 rn
= (insn
>> 16) & 0xf;
6742 rd
= (insn
>> 12) & 0xf;
6743 if (insn
& (1 << 23)) {
6744 /* load/store exclusive */
6745 op1
= (insn
>> 21) & 0x3;
6750 addr
= tcg_temp_local_new_i32();
6751 load_reg_var(s
, addr
, rn
);
6752 if (insn
& (1 << 20)) {
6755 gen_load_exclusive(s
, rd
, 15, addr
, 2);
6757 case 1: /* ldrexd */
6758 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
6760 case 2: /* ldrexb */
6761 gen_load_exclusive(s
, rd
, 15, addr
, 0);
6763 case 3: /* ldrexh */
6764 gen_load_exclusive(s
, rd
, 15, addr
, 1);
6773 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
6775 case 1: /* strexd */
6776 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
6778 case 2: /* strexb */
6779 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
6781 case 3: /* strexh */
6782 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
6788 tcg_temp_free(addr
);
6790 /* SWP instruction */
6793 /* ??? This is not really atomic. However we know
6794 we never have multiple CPUs running in parallel,
6795 so it is good enough. */
6796 addr
= load_reg(s
, rn
);
6797 tmp
= load_reg(s
, rm
);
6798 if (insn
& (1 << 22)) {
6799 tmp2
= gen_ld8u(addr
, IS_USER(s
));
6800 gen_st8(tmp
, addr
, IS_USER(s
));
6802 tmp2
= gen_ld32(addr
, IS_USER(s
));
6803 gen_st32(tmp
, addr
, IS_USER(s
));
6805 tcg_temp_free_i32(addr
);
6806 store_reg(s
, rd
, tmp2
);
6812 /* Misc load/store */
6813 rn
= (insn
>> 16) & 0xf;
6814 rd
= (insn
>> 12) & 0xf;
6815 addr
= load_reg(s
, rn
);
6816 if (insn
& (1 << 24))
6817 gen_add_datah_offset(s
, insn
, 0, addr
);
6819 if (insn
& (1 << 20)) {
6823 tmp
= gen_ld16u(addr
, IS_USER(s
));
6826 tmp
= gen_ld8s(addr
, IS_USER(s
));
6830 tmp
= gen_ld16s(addr
, IS_USER(s
));
6834 } else if (sh
& 2) {
6838 tmp
= load_reg(s
, rd
);
6839 gen_st32(tmp
, addr
, IS_USER(s
));
6840 tcg_gen_addi_i32(addr
, addr
, 4);
6841 tmp
= load_reg(s
, rd
+ 1);
6842 gen_st32(tmp
, addr
, IS_USER(s
));
6846 tmp
= gen_ld32(addr
, IS_USER(s
));
6847 store_reg(s
, rd
, tmp
);
6848 tcg_gen_addi_i32(addr
, addr
, 4);
6849 tmp
= gen_ld32(addr
, IS_USER(s
));
6853 address_offset
= -4;
6856 tmp
= load_reg(s
, rd
);
6857 gen_st16(tmp
, addr
, IS_USER(s
));
6860 /* Perform base writeback before the loaded value to
6861 ensure correct behavior with overlapping index registers.
6862 ldrd with base writeback is is undefined if the
6863 destination and index registers overlap. */
6864 if (!(insn
& (1 << 24))) {
6865 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
6866 store_reg(s
, rn
, addr
);
6867 } else if (insn
& (1 << 21)) {
6869 tcg_gen_addi_i32(addr
, addr
, address_offset
);
6870 store_reg(s
, rn
, addr
);
6872 tcg_temp_free_i32(addr
);
6875 /* Complete the load. */
6876 store_reg(s
, rd
, tmp
);
6885 if (insn
& (1 << 4)) {
6887 /* Armv6 Media instructions. */
6889 rn
= (insn
>> 16) & 0xf;
6890 rd
= (insn
>> 12) & 0xf;
6891 rs
= (insn
>> 8) & 0xf;
6892 switch ((insn
>> 23) & 3) {
6893 case 0: /* Parallel add/subtract. */
6894 op1
= (insn
>> 20) & 7;
6895 tmp
= load_reg(s
, rn
);
6896 tmp2
= load_reg(s
, rm
);
6897 sh
= (insn
>> 5) & 7;
6898 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
6900 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
6901 tcg_temp_free_i32(tmp2
);
6902 store_reg(s
, rd
, tmp
);
6905 if ((insn
& 0x00700020) == 0) {
6906 /* Halfword pack. */
6907 tmp
= load_reg(s
, rn
);
6908 tmp2
= load_reg(s
, rm
);
6909 shift
= (insn
>> 7) & 0x1f;
6910 if (insn
& (1 << 6)) {
6914 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
6915 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
6916 tcg_gen_ext16u_i32(tmp2
, tmp2
);
6920 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
6921 tcg_gen_ext16u_i32(tmp
, tmp
);
6922 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
6924 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6925 tcg_temp_free_i32(tmp2
);
6926 store_reg(s
, rd
, tmp
);
6927 } else if ((insn
& 0x00200020) == 0x00200000) {
6929 tmp
= load_reg(s
, rm
);
6930 shift
= (insn
>> 7) & 0x1f;
6931 if (insn
& (1 << 6)) {
6934 tcg_gen_sari_i32(tmp
, tmp
, shift
);
6936 tcg_gen_shli_i32(tmp
, tmp
, shift
);
6938 sh
= (insn
>> 16) & 0x1f;
6939 tmp2
= tcg_const_i32(sh
);
6940 if (insn
& (1 << 22))
6941 gen_helper_usat(tmp
, tmp
, tmp2
);
6943 gen_helper_ssat(tmp
, tmp
, tmp2
);
6944 tcg_temp_free_i32(tmp2
);
6945 store_reg(s
, rd
, tmp
);
6946 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
6948 tmp
= load_reg(s
, rm
);
6949 sh
= (insn
>> 16) & 0x1f;
6950 tmp2
= tcg_const_i32(sh
);
6951 if (insn
& (1 << 22))
6952 gen_helper_usat16(tmp
, tmp
, tmp2
);
6954 gen_helper_ssat16(tmp
, tmp
, tmp2
);
6955 tcg_temp_free_i32(tmp2
);
6956 store_reg(s
, rd
, tmp
);
6957 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
6959 tmp
= load_reg(s
, rn
);
6960 tmp2
= load_reg(s
, rm
);
6961 tmp3
= tcg_temp_new_i32();
6962 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
6963 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
6964 tcg_temp_free_i32(tmp3
);
6965 tcg_temp_free_i32(tmp2
);
6966 store_reg(s
, rd
, tmp
);
6967 } else if ((insn
& 0x000003e0) == 0x00000060) {
6968 tmp
= load_reg(s
, rm
);
6969 shift
= (insn
>> 10) & 3;
6970 /* ??? In many cases it's not neccessary to do a
6971 rotate, a shift is sufficient. */
6973 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
6974 op1
= (insn
>> 20) & 7;
6976 case 0: gen_sxtb16(tmp
); break;
6977 case 2: gen_sxtb(tmp
); break;
6978 case 3: gen_sxth(tmp
); break;
6979 case 4: gen_uxtb16(tmp
); break;
6980 case 6: gen_uxtb(tmp
); break;
6981 case 7: gen_uxth(tmp
); break;
6982 default: goto illegal_op
;
6985 tmp2
= load_reg(s
, rn
);
6986 if ((op1
& 3) == 0) {
6987 gen_add16(tmp
, tmp2
);
6989 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6990 tcg_temp_free_i32(tmp2
);
6993 store_reg(s
, rd
, tmp
);
6994 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
6996 tmp
= load_reg(s
, rm
);
6997 if (insn
& (1 << 22)) {
6998 if (insn
& (1 << 7)) {
7002 gen_helper_rbit(tmp
, tmp
);
7005 if (insn
& (1 << 7))
7008 tcg_gen_bswap32_i32(tmp
, tmp
);
7010 store_reg(s
, rd
, tmp
);
7015 case 2: /* Multiplies (Type 3). */
7016 tmp
= load_reg(s
, rm
);
7017 tmp2
= load_reg(s
, rs
);
7018 if (insn
& (1 << 20)) {
7019 /* Signed multiply most significant [accumulate].
7020 (SMMUL, SMMLA, SMMLS) */
7021 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7024 tmp
= load_reg(s
, rd
);
7025 if (insn
& (1 << 6)) {
7026 tmp64
= gen_subq_msw(tmp64
, tmp
);
7028 tmp64
= gen_addq_msw(tmp64
, tmp
);
7031 if (insn
& (1 << 5)) {
7032 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7034 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7035 tmp
= tcg_temp_new_i32();
7036 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7037 tcg_temp_free_i64(tmp64
);
7038 store_reg(s
, rn
, tmp
);
7040 if (insn
& (1 << 5))
7041 gen_swap_half(tmp2
);
7042 gen_smul_dual(tmp
, tmp2
);
7043 if (insn
& (1 << 6)) {
7044 /* This subtraction cannot overflow. */
7045 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7047 /* This addition cannot overflow 32 bits;
7048 * however it may overflow considered as a signed
7049 * operation, in which case we must set the Q flag.
7051 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7053 tcg_temp_free_i32(tmp2
);
7054 if (insn
& (1 << 22)) {
7055 /* smlald, smlsld */
7056 tmp64
= tcg_temp_new_i64();
7057 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7058 tcg_temp_free_i32(tmp
);
7059 gen_addq(s
, tmp64
, rd
, rn
);
7060 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7061 tcg_temp_free_i64(tmp64
);
7063 /* smuad, smusd, smlad, smlsd */
7066 tmp2
= load_reg(s
, rd
);
7067 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7068 tcg_temp_free_i32(tmp2
);
7070 store_reg(s
, rn
, tmp
);
7075 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7077 case 0: /* Unsigned sum of absolute differences. */
7079 tmp
= load_reg(s
, rm
);
7080 tmp2
= load_reg(s
, rs
);
7081 gen_helper_usad8(tmp
, tmp
, tmp2
);
7082 tcg_temp_free_i32(tmp2
);
7084 tmp2
= load_reg(s
, rd
);
7085 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7086 tcg_temp_free_i32(tmp2
);
7088 store_reg(s
, rn
, tmp
);
7090 case 0x20: case 0x24: case 0x28: case 0x2c:
7091 /* Bitfield insert/clear. */
7093 shift
= (insn
>> 7) & 0x1f;
7094 i
= (insn
>> 16) & 0x1f;
7097 tmp
= tcg_temp_new_i32();
7098 tcg_gen_movi_i32(tmp
, 0);
7100 tmp
= load_reg(s
, rm
);
7103 tmp2
= load_reg(s
, rd
);
7104 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7105 tcg_temp_free_i32(tmp2
);
7107 store_reg(s
, rd
, tmp
);
7109 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7110 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7112 tmp
= load_reg(s
, rm
);
7113 shift
= (insn
>> 7) & 0x1f;
7114 i
= ((insn
>> 16) & 0x1f) + 1;
7119 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7121 gen_sbfx(tmp
, shift
, i
);
7124 store_reg(s
, rd
, tmp
);
7134 /* Check for undefined extension instructions
7135 * per the ARM Bible IE:
7136 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7138 sh
= (0xf << 20) | (0xf << 4);
7139 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7143 /* load/store byte/word */
7144 rn
= (insn
>> 16) & 0xf;
7145 rd
= (insn
>> 12) & 0xf;
7146 tmp2
= load_reg(s
, rn
);
7147 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7148 if (insn
& (1 << 24))
7149 gen_add_data_offset(s
, insn
, tmp2
);
7150 if (insn
& (1 << 20)) {
7152 if (insn
& (1 << 22)) {
7153 tmp
= gen_ld8u(tmp2
, i
);
7155 tmp
= gen_ld32(tmp2
, i
);
7159 tmp
= load_reg(s
, rd
);
7160 if (insn
& (1 << 22))
7161 gen_st8(tmp
, tmp2
, i
);
7163 gen_st32(tmp
, tmp2
, i
);
7165 if (!(insn
& (1 << 24))) {
7166 gen_add_data_offset(s
, insn
, tmp2
);
7167 store_reg(s
, rn
, tmp2
);
7168 } else if (insn
& (1 << 21)) {
7169 store_reg(s
, rn
, tmp2
);
7171 tcg_temp_free_i32(tmp2
);
7173 if (insn
& (1 << 20)) {
7174 /* Complete the load. */
7178 store_reg(s
, rd
, tmp
);
7184 int j
, n
, user
, loaded_base
;
7186 /* load/store multiple words */
7187 /* XXX: store correct base if write back */
7189 if (insn
& (1 << 22)) {
7191 goto illegal_op
; /* only usable in supervisor mode */
7193 if ((insn
& (1 << 15)) == 0)
7196 rn
= (insn
>> 16) & 0xf;
7197 addr
= load_reg(s
, rn
);
7199 /* compute total size */
7201 TCGV_UNUSED(loaded_var
);
7204 if (insn
& (1 << i
))
7207 /* XXX: test invalid n == 0 case ? */
7208 if (insn
& (1 << 23)) {
7209 if (insn
& (1 << 24)) {
7211 tcg_gen_addi_i32(addr
, addr
, 4);
7213 /* post increment */
7216 if (insn
& (1 << 24)) {
7218 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7220 /* post decrement */
7222 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7227 if (insn
& (1 << i
)) {
7228 if (insn
& (1 << 20)) {
7230 tmp
= gen_ld32(addr
, IS_USER(s
));
7234 tmp2
= tcg_const_i32(i
);
7235 gen_helper_set_user_reg(tmp2
, tmp
);
7236 tcg_temp_free_i32(tmp2
);
7237 tcg_temp_free_i32(tmp
);
7238 } else if (i
== rn
) {
7242 store_reg(s
, i
, tmp
);
7247 /* special case: r15 = PC + 8 */
7248 val
= (long)s
->pc
+ 4;
7249 tmp
= tcg_temp_new_i32();
7250 tcg_gen_movi_i32(tmp
, val
);
7252 tmp
= tcg_temp_new_i32();
7253 tmp2
= tcg_const_i32(i
);
7254 gen_helper_get_user_reg(tmp
, tmp2
);
7255 tcg_temp_free_i32(tmp2
);
7257 tmp
= load_reg(s
, i
);
7259 gen_st32(tmp
, addr
, IS_USER(s
));
7262 /* no need to add after the last transfer */
7264 tcg_gen_addi_i32(addr
, addr
, 4);
7267 if (insn
& (1 << 21)) {
7269 if (insn
& (1 << 23)) {
7270 if (insn
& (1 << 24)) {
7273 /* post increment */
7274 tcg_gen_addi_i32(addr
, addr
, 4);
7277 if (insn
& (1 << 24)) {
7280 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7282 /* post decrement */
7283 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7286 store_reg(s
, rn
, addr
);
7288 tcg_temp_free_i32(addr
);
7291 store_reg(s
, rn
, loaded_var
);
7293 if ((insn
& (1 << 22)) && !user
) {
7294 /* Restore CPSR from SPSR. */
7295 tmp
= load_cpu_field(spsr
);
7296 gen_set_cpsr(tmp
, 0xffffffff);
7297 tcg_temp_free_i32(tmp
);
7298 s
->is_jmp
= DISAS_UPDATE
;
7307 /* branch (and link) */
7308 val
= (int32_t)s
->pc
;
7309 if (insn
& (1 << 24)) {
7310 tmp
= tcg_temp_new_i32();
7311 tcg_gen_movi_i32(tmp
, val
);
7312 store_reg(s
, 14, tmp
);
7314 offset
= (((int32_t)insn
<< 8) >> 8);
7315 val
+= (offset
<< 2) + 4;
7323 if (disas_coproc_insn(env
, s
, insn
))
7328 gen_set_pc_im(s
->pc
);
7329 s
->is_jmp
= DISAS_SWI
;
7333 gen_exception_insn(s
, 4, EXCP_UDEF
);
7339 /* Return true if this is a Thumb-2 logical op. */
7341 thumb2_logic_op(int op
)
7346 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7347 then set condition code flags based on the result of the operation.
7348 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7349 to the high bit of T1.
7350 Returns zero if the opcode is valid. */
7353 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7360 tcg_gen_and_i32(t0
, t0
, t1
);
7364 tcg_gen_andc_i32(t0
, t0
, t1
);
7368 tcg_gen_or_i32(t0
, t0
, t1
);
7372 tcg_gen_orc_i32(t0
, t0
, t1
);
7376 tcg_gen_xor_i32(t0
, t0
, t1
);
7381 gen_helper_add_cc(t0
, t0
, t1
);
7383 tcg_gen_add_i32(t0
, t0
, t1
);
7387 gen_helper_adc_cc(t0
, t0
, t1
);
7393 gen_helper_sbc_cc(t0
, t0
, t1
);
7395 gen_sub_carry(t0
, t0
, t1
);
7399 gen_helper_sub_cc(t0
, t0
, t1
);
7401 tcg_gen_sub_i32(t0
, t0
, t1
);
7405 gen_helper_sub_cc(t0
, t1
, t0
);
7407 tcg_gen_sub_i32(t0
, t1
, t0
);
7409 default: /* 5, 6, 7, 9, 12, 15. */
7415 gen_set_CF_bit31(t1
);
7420 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7422 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7424 uint32_t insn
, imm
, shift
, offset
;
7425 uint32_t rd
, rn
, rm
, rs
;
7436 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7437 || arm_feature (env
, ARM_FEATURE_M
))) {
7438 /* Thumb-1 cores may need to treat bl and blx as a pair of
7439 16-bit instructions to get correct prefetch abort behavior. */
7441 if ((insn
& (1 << 12)) == 0) {
7442 /* Second half of blx. */
7443 offset
= ((insn
& 0x7ff) << 1);
7444 tmp
= load_reg(s
, 14);
7445 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7446 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7448 tmp2
= tcg_temp_new_i32();
7449 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7450 store_reg(s
, 14, tmp2
);
7454 if (insn
& (1 << 11)) {
7455 /* Second half of bl. */
7456 offset
= ((insn
& 0x7ff) << 1) | 1;
7457 tmp
= load_reg(s
, 14);
7458 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7460 tmp2
= tcg_temp_new_i32();
7461 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7462 store_reg(s
, 14, tmp2
);
7466 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7467 /* Instruction spans a page boundary. Implement it as two
7468 16-bit instructions in case the second half causes an
7470 offset
= ((int32_t)insn
<< 21) >> 9;
7471 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7474 /* Fall through to 32-bit decode. */
7477 insn
= lduw_code(s
->pc
);
7479 insn
|= (uint32_t)insn_hw1
<< 16;
7481 if ((insn
& 0xf800e800) != 0xf000e800) {
7485 rn
= (insn
>> 16) & 0xf;
7486 rs
= (insn
>> 12) & 0xf;
7487 rd
= (insn
>> 8) & 0xf;
7489 switch ((insn
>> 25) & 0xf) {
7490 case 0: case 1: case 2: case 3:
7491 /* 16-bit instructions. Should never happen. */
7494 if (insn
& (1 << 22)) {
7495 /* Other load/store, table branch. */
7496 if (insn
& 0x01200000) {
7497 /* Load/store doubleword. */
7499 addr
= tcg_temp_new_i32();
7500 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7502 addr
= load_reg(s
, rn
);
7504 offset
= (insn
& 0xff) * 4;
7505 if ((insn
& (1 << 23)) == 0)
7507 if (insn
& (1 << 24)) {
7508 tcg_gen_addi_i32(addr
, addr
, offset
);
7511 if (insn
& (1 << 20)) {
7513 tmp
= gen_ld32(addr
, IS_USER(s
));
7514 store_reg(s
, rs
, tmp
);
7515 tcg_gen_addi_i32(addr
, addr
, 4);
7516 tmp
= gen_ld32(addr
, IS_USER(s
));
7517 store_reg(s
, rd
, tmp
);
7520 tmp
= load_reg(s
, rs
);
7521 gen_st32(tmp
, addr
, IS_USER(s
));
7522 tcg_gen_addi_i32(addr
, addr
, 4);
7523 tmp
= load_reg(s
, rd
);
7524 gen_st32(tmp
, addr
, IS_USER(s
));
7526 if (insn
& (1 << 21)) {
7527 /* Base writeback. */
7530 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7531 store_reg(s
, rn
, addr
);
7533 tcg_temp_free_i32(addr
);
7535 } else if ((insn
& (1 << 23)) == 0) {
7536 /* Load/store exclusive word. */
7537 addr
= tcg_temp_local_new();
7538 load_reg_var(s
, addr
, rn
);
7539 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
7540 if (insn
& (1 << 20)) {
7541 gen_load_exclusive(s
, rs
, 15, addr
, 2);
7543 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
7545 tcg_temp_free(addr
);
7546 } else if ((insn
& (1 << 6)) == 0) {
7549 addr
= tcg_temp_new_i32();
7550 tcg_gen_movi_i32(addr
, s
->pc
);
7552 addr
= load_reg(s
, rn
);
7554 tmp
= load_reg(s
, rm
);
7555 tcg_gen_add_i32(addr
, addr
, tmp
);
7556 if (insn
& (1 << 4)) {
7558 tcg_gen_add_i32(addr
, addr
, tmp
);
7559 tcg_temp_free_i32(tmp
);
7560 tmp
= gen_ld16u(addr
, IS_USER(s
));
7562 tcg_temp_free_i32(tmp
);
7563 tmp
= gen_ld8u(addr
, IS_USER(s
));
7565 tcg_temp_free_i32(addr
);
7566 tcg_gen_shli_i32(tmp
, tmp
, 1);
7567 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
7568 store_reg(s
, 15, tmp
);
7570 /* Load/store exclusive byte/halfword/doubleword. */
7572 op
= (insn
>> 4) & 0x3;
7576 addr
= tcg_temp_local_new();
7577 load_reg_var(s
, addr
, rn
);
7578 if (insn
& (1 << 20)) {
7579 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
7581 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
7583 tcg_temp_free(addr
);
7586 /* Load/store multiple, RFE, SRS. */
7587 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
7588 /* Not available in user mode. */
7591 if (insn
& (1 << 20)) {
7593 addr
= load_reg(s
, rn
);
7594 if ((insn
& (1 << 24)) == 0)
7595 tcg_gen_addi_i32(addr
, addr
, -8);
7596 /* Load PC into tmp and CPSR into tmp2. */
7597 tmp
= gen_ld32(addr
, 0);
7598 tcg_gen_addi_i32(addr
, addr
, 4);
7599 tmp2
= gen_ld32(addr
, 0);
7600 if (insn
& (1 << 21)) {
7601 /* Base writeback. */
7602 if (insn
& (1 << 24)) {
7603 tcg_gen_addi_i32(addr
, addr
, 4);
7605 tcg_gen_addi_i32(addr
, addr
, -4);
7607 store_reg(s
, rn
, addr
);
7609 tcg_temp_free_i32(addr
);
7611 gen_rfe(s
, tmp
, tmp2
);
7615 addr
= tcg_temp_new_i32();
7616 tmp
= tcg_const_i32(op
);
7617 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7618 tcg_temp_free_i32(tmp
);
7619 if ((insn
& (1 << 24)) == 0) {
7620 tcg_gen_addi_i32(addr
, addr
, -8);
7622 tmp
= load_reg(s
, 14);
7623 gen_st32(tmp
, addr
, 0);
7624 tcg_gen_addi_i32(addr
, addr
, 4);
7625 tmp
= tcg_temp_new_i32();
7626 gen_helper_cpsr_read(tmp
);
7627 gen_st32(tmp
, addr
, 0);
7628 if (insn
& (1 << 21)) {
7629 if ((insn
& (1 << 24)) == 0) {
7630 tcg_gen_addi_i32(addr
, addr
, -4);
7632 tcg_gen_addi_i32(addr
, addr
, 4);
7634 tmp
= tcg_const_i32(op
);
7635 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7636 tcg_temp_free_i32(tmp
);
7638 tcg_temp_free_i32(addr
);
7643 /* Load/store multiple. */
7644 addr
= load_reg(s
, rn
);
7646 for (i
= 0; i
< 16; i
++) {
7647 if (insn
& (1 << i
))
7650 if (insn
& (1 << 24)) {
7651 tcg_gen_addi_i32(addr
, addr
, -offset
);
7654 for (i
= 0; i
< 16; i
++) {
7655 if ((insn
& (1 << i
)) == 0)
7657 if (insn
& (1 << 20)) {
7659 tmp
= gen_ld32(addr
, IS_USER(s
));
7663 store_reg(s
, i
, tmp
);
7667 tmp
= load_reg(s
, i
);
7668 gen_st32(tmp
, addr
, IS_USER(s
));
7670 tcg_gen_addi_i32(addr
, addr
, 4);
7672 if (insn
& (1 << 21)) {
7673 /* Base register writeback. */
7674 if (insn
& (1 << 24)) {
7675 tcg_gen_addi_i32(addr
, addr
, -offset
);
7677 /* Fault if writeback register is in register list. */
7678 if (insn
& (1 << rn
))
7680 store_reg(s
, rn
, addr
);
7682 tcg_temp_free_i32(addr
);
7689 op
= (insn
>> 21) & 0xf;
7691 /* Halfword pack. */
7692 tmp
= load_reg(s
, rn
);
7693 tmp2
= load_reg(s
, rm
);
7694 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
7695 if (insn
& (1 << 5)) {
7699 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7700 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7701 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7705 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7706 tcg_gen_ext16u_i32(tmp
, tmp
);
7707 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7709 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7710 tcg_temp_free_i32(tmp2
);
7711 store_reg(s
, rd
, tmp
);
7713 /* Data processing register constant shift. */
7715 tmp
= tcg_temp_new_i32();
7716 tcg_gen_movi_i32(tmp
, 0);
7718 tmp
= load_reg(s
, rn
);
7720 tmp2
= load_reg(s
, rm
);
7722 shiftop
= (insn
>> 4) & 3;
7723 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7724 conds
= (insn
& (1 << 20)) != 0;
7725 logic_cc
= (conds
&& thumb2_logic_op(op
));
7726 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7727 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
7729 tcg_temp_free_i32(tmp2
);
7731 store_reg(s
, rd
, tmp
);
7733 tcg_temp_free_i32(tmp
);
7737 case 13: /* Misc data processing. */
7738 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
7739 if (op
< 4 && (insn
& 0xf000) != 0xf000)
7742 case 0: /* Register controlled shift. */
7743 tmp
= load_reg(s
, rn
);
7744 tmp2
= load_reg(s
, rm
);
7745 if ((insn
& 0x70) != 0)
7747 op
= (insn
>> 21) & 3;
7748 logic_cc
= (insn
& (1 << 20)) != 0;
7749 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
7752 store_reg_bx(env
, s
, rd
, tmp
);
7754 case 1: /* Sign/zero extend. */
7755 tmp
= load_reg(s
, rm
);
7756 shift
= (insn
>> 4) & 3;
7757 /* ??? In many cases it's not neccessary to do a
7758 rotate, a shift is sufficient. */
7760 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7761 op
= (insn
>> 20) & 7;
7763 case 0: gen_sxth(tmp
); break;
7764 case 1: gen_uxth(tmp
); break;
7765 case 2: gen_sxtb16(tmp
); break;
7766 case 3: gen_uxtb16(tmp
); break;
7767 case 4: gen_sxtb(tmp
); break;
7768 case 5: gen_uxtb(tmp
); break;
7769 default: goto illegal_op
;
7772 tmp2
= load_reg(s
, rn
);
7773 if ((op
>> 1) == 1) {
7774 gen_add16(tmp
, tmp2
);
7776 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7777 tcg_temp_free_i32(tmp2
);
7780 store_reg(s
, rd
, tmp
);
7782 case 2: /* SIMD add/subtract. */
7783 op
= (insn
>> 20) & 7;
7784 shift
= (insn
>> 4) & 7;
7785 if ((op
& 3) == 3 || (shift
& 3) == 3)
7787 tmp
= load_reg(s
, rn
);
7788 tmp2
= load_reg(s
, rm
);
7789 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
7790 tcg_temp_free_i32(tmp2
);
7791 store_reg(s
, rd
, tmp
);
7793 case 3: /* Other data processing. */
7794 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
7796 /* Saturating add/subtract. */
7797 tmp
= load_reg(s
, rn
);
7798 tmp2
= load_reg(s
, rm
);
7800 gen_helper_double_saturate(tmp
, tmp
);
7802 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
7804 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
7805 tcg_temp_free_i32(tmp2
);
7807 tmp
= load_reg(s
, rn
);
7809 case 0x0a: /* rbit */
7810 gen_helper_rbit(tmp
, tmp
);
7812 case 0x08: /* rev */
7813 tcg_gen_bswap32_i32(tmp
, tmp
);
7815 case 0x09: /* rev16 */
7818 case 0x0b: /* revsh */
7821 case 0x10: /* sel */
7822 tmp2
= load_reg(s
, rm
);
7823 tmp3
= tcg_temp_new_i32();
7824 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7825 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7826 tcg_temp_free_i32(tmp3
);
7827 tcg_temp_free_i32(tmp2
);
7829 case 0x18: /* clz */
7830 gen_helper_clz(tmp
, tmp
);
7836 store_reg(s
, rd
, tmp
);
7838 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7839 op
= (insn
>> 4) & 0xf;
7840 tmp
= load_reg(s
, rn
);
7841 tmp2
= load_reg(s
, rm
);
7842 switch ((insn
>> 20) & 7) {
7843 case 0: /* 32 x 32 -> 32 */
7844 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7845 tcg_temp_free_i32(tmp2
);
7847 tmp2
= load_reg(s
, rs
);
7849 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7851 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7852 tcg_temp_free_i32(tmp2
);
7855 case 1: /* 16 x 16 -> 32 */
7856 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7857 tcg_temp_free_i32(tmp2
);
7859 tmp2
= load_reg(s
, rs
);
7860 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7861 tcg_temp_free_i32(tmp2
);
7864 case 2: /* Dual multiply add. */
7865 case 4: /* Dual multiply subtract. */
7867 gen_swap_half(tmp2
);
7868 gen_smul_dual(tmp
, tmp2
);
7869 if (insn
& (1 << 22)) {
7870 /* This subtraction cannot overflow. */
7871 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7873 /* This addition cannot overflow 32 bits;
7874 * however it may overflow considered as a signed
7875 * operation, in which case we must set the Q flag.
7877 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7879 tcg_temp_free_i32(tmp2
);
7882 tmp2
= load_reg(s
, rs
);
7883 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7884 tcg_temp_free_i32(tmp2
);
7887 case 3: /* 32 * 16 -> 32msb */
7889 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7892 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7893 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7894 tmp
= tcg_temp_new_i32();
7895 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7896 tcg_temp_free_i64(tmp64
);
7899 tmp2
= load_reg(s
, rs
);
7900 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7901 tcg_temp_free_i32(tmp2
);
7904 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7905 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7907 tmp
= load_reg(s
, rs
);
7908 if (insn
& (1 << 20)) {
7909 tmp64
= gen_addq_msw(tmp64
, tmp
);
7911 tmp64
= gen_subq_msw(tmp64
, tmp
);
7914 if (insn
& (1 << 4)) {
7915 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7917 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7918 tmp
= tcg_temp_new_i32();
7919 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7920 tcg_temp_free_i64(tmp64
);
7922 case 7: /* Unsigned sum of absolute differences. */
7923 gen_helper_usad8(tmp
, tmp
, tmp2
);
7924 tcg_temp_free_i32(tmp2
);
7926 tmp2
= load_reg(s
, rs
);
7927 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7928 tcg_temp_free_i32(tmp2
);
7932 store_reg(s
, rd
, tmp
);
7934 case 6: case 7: /* 64-bit multiply, Divide. */
7935 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
7936 tmp
= load_reg(s
, rn
);
7937 tmp2
= load_reg(s
, rm
);
7938 if ((op
& 0x50) == 0x10) {
7940 if (!arm_feature(env
, ARM_FEATURE_DIV
))
7943 gen_helper_udiv(tmp
, tmp
, tmp2
);
7945 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7946 tcg_temp_free_i32(tmp2
);
7947 store_reg(s
, rd
, tmp
);
7948 } else if ((op
& 0xe) == 0xc) {
7949 /* Dual multiply accumulate long. */
7951 gen_swap_half(tmp2
);
7952 gen_smul_dual(tmp
, tmp2
);
7954 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7956 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7958 tcg_temp_free_i32(tmp2
);
7960 tmp64
= tcg_temp_new_i64();
7961 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7962 tcg_temp_free_i32(tmp
);
7963 gen_addq(s
, tmp64
, rs
, rd
);
7964 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7965 tcg_temp_free_i64(tmp64
);
7968 /* Unsigned 64-bit multiply */
7969 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7973 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7974 tcg_temp_free_i32(tmp2
);
7975 tmp64
= tcg_temp_new_i64();
7976 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7977 tcg_temp_free_i32(tmp
);
7979 /* Signed 64-bit multiply */
7980 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7985 gen_addq_lo(s
, tmp64
, rs
);
7986 gen_addq_lo(s
, tmp64
, rd
);
7987 } else if (op
& 0x40) {
7988 /* 64-bit accumulate. */
7989 gen_addq(s
, tmp64
, rs
, rd
);
7991 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7992 tcg_temp_free_i64(tmp64
);
7997 case 6: case 7: case 14: case 15:
7999 if (((insn
>> 24) & 3) == 3) {
8000 /* Translate into the equivalent ARM encoding. */
8001 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8002 if (disas_neon_data_insn(env
, s
, insn
))
8005 if (insn
& (1 << 28))
8007 if (disas_coproc_insn (env
, s
, insn
))
8011 case 8: case 9: case 10: case 11:
8012 if (insn
& (1 << 15)) {
8013 /* Branches, misc control. */
8014 if (insn
& 0x5000) {
8015 /* Unconditional branch. */
8016 /* signextend(hw1[10:0]) -> offset[:12]. */
8017 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8018 /* hw1[10:0] -> offset[11:1]. */
8019 offset
|= (insn
& 0x7ff) << 1;
8020 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8021 offset[24:22] already have the same value because of the
8022 sign extension above. */
8023 offset
^= ((~insn
) & (1 << 13)) << 10;
8024 offset
^= ((~insn
) & (1 << 11)) << 11;
8026 if (insn
& (1 << 14)) {
8027 /* Branch and link. */
8028 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8032 if (insn
& (1 << 12)) {
8037 offset
&= ~(uint32_t)2;
8038 gen_bx_im(s
, offset
);
8040 } else if (((insn
>> 23) & 7) == 7) {
8042 if (insn
& (1 << 13))
8045 if (insn
& (1 << 26)) {
8046 /* Secure monitor call (v6Z) */
8047 goto illegal_op
; /* not implemented. */
8049 op
= (insn
>> 20) & 7;
8051 case 0: /* msr cpsr. */
8053 tmp
= load_reg(s
, rn
);
8054 addr
= tcg_const_i32(insn
& 0xff);
8055 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8056 tcg_temp_free_i32(addr
);
8057 tcg_temp_free_i32(tmp
);
8062 case 1: /* msr spsr. */
8065 tmp
= load_reg(s
, rn
);
8067 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8071 case 2: /* cps, nop-hint. */
8072 if (((insn
>> 8) & 7) == 0) {
8073 gen_nop_hint(s
, insn
& 0xff);
8075 /* Implemented as NOP in user mode. */
8080 if (insn
& (1 << 10)) {
8081 if (insn
& (1 << 7))
8083 if (insn
& (1 << 6))
8085 if (insn
& (1 << 5))
8087 if (insn
& (1 << 9))
8088 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8090 if (insn
& (1 << 8)) {
8092 imm
|= (insn
& 0x1f);
8095 gen_set_psr_im(s
, offset
, 0, imm
);
8098 case 3: /* Special control operations. */
8100 op
= (insn
>> 4) & 0xf;
8108 /* These execute as NOPs. */
8115 /* Trivial implementation equivalent to bx. */
8116 tmp
= load_reg(s
, rn
);
8119 case 5: /* Exception return. */
8123 if (rn
!= 14 || rd
!= 15) {
8126 tmp
= load_reg(s
, rn
);
8127 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8128 gen_exception_return(s
, tmp
);
8130 case 6: /* mrs cpsr. */
8131 tmp
= tcg_temp_new_i32();
8133 addr
= tcg_const_i32(insn
& 0xff);
8134 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8135 tcg_temp_free_i32(addr
);
8137 gen_helper_cpsr_read(tmp
);
8139 store_reg(s
, rd
, tmp
);
8141 case 7: /* mrs spsr. */
8142 /* Not accessible in user mode. */
8143 if (IS_USER(s
) || IS_M(env
))
8145 tmp
= load_cpu_field(spsr
);
8146 store_reg(s
, rd
, tmp
);
8151 /* Conditional branch. */
8152 op
= (insn
>> 22) & 0xf;
8153 /* Generate a conditional jump to next instruction. */
8154 s
->condlabel
= gen_new_label();
8155 gen_test_cc(op
^ 1, s
->condlabel
);
8158 /* offset[11:1] = insn[10:0] */
8159 offset
= (insn
& 0x7ff) << 1;
8160 /* offset[17:12] = insn[21:16]. */
8161 offset
|= (insn
& 0x003f0000) >> 4;
8162 /* offset[31:20] = insn[26]. */
8163 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8164 /* offset[18] = insn[13]. */
8165 offset
|= (insn
& (1 << 13)) << 5;
8166 /* offset[19] = insn[11]. */
8167 offset
|= (insn
& (1 << 11)) << 8;
8169 /* jump to the offset */
8170 gen_jmp(s
, s
->pc
+ offset
);
8173 /* Data processing immediate. */
8174 if (insn
& (1 << 25)) {
8175 if (insn
& (1 << 24)) {
8176 if (insn
& (1 << 20))
8178 /* Bitfield/Saturate. */
8179 op
= (insn
>> 21) & 7;
8181 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8183 tmp
= tcg_temp_new_i32();
8184 tcg_gen_movi_i32(tmp
, 0);
8186 tmp
= load_reg(s
, rn
);
8189 case 2: /* Signed bitfield extract. */
8191 if (shift
+ imm
> 32)
8194 gen_sbfx(tmp
, shift
, imm
);
8196 case 6: /* Unsigned bitfield extract. */
8198 if (shift
+ imm
> 32)
8201 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8203 case 3: /* Bitfield insert/clear. */
8206 imm
= imm
+ 1 - shift
;
8208 tmp2
= load_reg(s
, rd
);
8209 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8210 tcg_temp_free_i32(tmp2
);
8215 default: /* Saturate. */
8218 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8220 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8222 tmp2
= tcg_const_i32(imm
);
8225 if ((op
& 1) && shift
== 0)
8226 gen_helper_usat16(tmp
, tmp
, tmp2
);
8228 gen_helper_usat(tmp
, tmp
, tmp2
);
8231 if ((op
& 1) && shift
== 0)
8232 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8234 gen_helper_ssat(tmp
, tmp
, tmp2
);
8236 tcg_temp_free_i32(tmp2
);
8239 store_reg(s
, rd
, tmp
);
8241 imm
= ((insn
& 0x04000000) >> 15)
8242 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8243 if (insn
& (1 << 22)) {
8244 /* 16-bit immediate. */
8245 imm
|= (insn
>> 4) & 0xf000;
8246 if (insn
& (1 << 23)) {
8248 tmp
= load_reg(s
, rd
);
8249 tcg_gen_ext16u_i32(tmp
, tmp
);
8250 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8253 tmp
= tcg_temp_new_i32();
8254 tcg_gen_movi_i32(tmp
, imm
);
8257 /* Add/sub 12-bit immediate. */
8259 offset
= s
->pc
& ~(uint32_t)3;
8260 if (insn
& (1 << 23))
8264 tmp
= tcg_temp_new_i32();
8265 tcg_gen_movi_i32(tmp
, offset
);
8267 tmp
= load_reg(s
, rn
);
8268 if (insn
& (1 << 23))
8269 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8271 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8274 store_reg(s
, rd
, tmp
);
8277 int shifter_out
= 0;
8278 /* modified 12-bit immediate. */
8279 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8280 imm
= (insn
& 0xff);
8283 /* Nothing to do. */
8285 case 1: /* 00XY00XY */
8288 case 2: /* XY00XY00 */
8292 case 3: /* XYXYXYXY */
8296 default: /* Rotated constant. */
8297 shift
= (shift
<< 1) | (imm
>> 7);
8299 imm
= imm
<< (32 - shift
);
8303 tmp2
= tcg_temp_new_i32();
8304 tcg_gen_movi_i32(tmp2
, imm
);
8305 rn
= (insn
>> 16) & 0xf;
8307 tmp
= tcg_temp_new_i32();
8308 tcg_gen_movi_i32(tmp
, 0);
8310 tmp
= load_reg(s
, rn
);
8312 op
= (insn
>> 21) & 0xf;
8313 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8314 shifter_out
, tmp
, tmp2
))
8316 tcg_temp_free_i32(tmp2
);
8317 rd
= (insn
>> 8) & 0xf;
8319 store_reg(s
, rd
, tmp
);
8321 tcg_temp_free_i32(tmp
);
8326 case 12: /* Load/store single data item. */
8331 if ((insn
& 0x01100000) == 0x01000000) {
8332 if (disas_neon_ls_insn(env
, s
, insn
))
8336 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8338 if (!(insn
& (1 << 20))) {
8342 /* Byte or halfword load space with dest == r15 : memory hints.
8343 * Catch them early so we don't emit pointless addressing code.
8344 * This space is a mix of:
8345 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8346 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8348 * unallocated hints, which must be treated as NOPs
8349 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8350 * which is easiest for the decoding logic
8351 * Some space which must UNDEF
8353 int op1
= (insn
>> 23) & 3;
8354 int op2
= (insn
>> 6) & 0x3f;
8359 /* UNPREDICTABLE or unallocated hint */
8363 return 0; /* PLD* or unallocated hint */
8365 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
8366 return 0; /* PLD* or unallocated hint */
8368 /* UNDEF space, or an UNPREDICTABLE */
8374 addr
= tcg_temp_new_i32();
8376 /* s->pc has already been incremented by 4. */
8377 imm
= s
->pc
& 0xfffffffc;
8378 if (insn
& (1 << 23))
8379 imm
+= insn
& 0xfff;
8381 imm
-= insn
& 0xfff;
8382 tcg_gen_movi_i32(addr
, imm
);
8384 addr
= load_reg(s
, rn
);
8385 if (insn
& (1 << 23)) {
8386 /* Positive offset. */
8388 tcg_gen_addi_i32(addr
, addr
, imm
);
8391 switch ((insn
>> 8) & 0xf) {
8392 case 0x0: /* Shifted Register. */
8393 shift
= (insn
>> 4) & 0xf;
8395 tcg_temp_free_i32(addr
);
8398 tmp
= load_reg(s
, rm
);
8400 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8401 tcg_gen_add_i32(addr
, addr
, tmp
);
8402 tcg_temp_free_i32(tmp
);
8404 case 0xc: /* Negative offset. */
8405 tcg_gen_addi_i32(addr
, addr
, -imm
);
8407 case 0xe: /* User privilege. */
8408 tcg_gen_addi_i32(addr
, addr
, imm
);
8411 case 0x9: /* Post-decrement. */
8414 case 0xb: /* Post-increment. */
8418 case 0xd: /* Pre-decrement. */
8421 case 0xf: /* Pre-increment. */
8422 tcg_gen_addi_i32(addr
, addr
, imm
);
8426 tcg_temp_free_i32(addr
);
8431 if (insn
& (1 << 20)) {
8434 case 0: tmp
= gen_ld8u(addr
, user
); break;
8435 case 4: tmp
= gen_ld8s(addr
, user
); break;
8436 case 1: tmp
= gen_ld16u(addr
, user
); break;
8437 case 5: tmp
= gen_ld16s(addr
, user
); break;
8438 case 2: tmp
= gen_ld32(addr
, user
); break;
8440 tcg_temp_free_i32(addr
);
8446 store_reg(s
, rs
, tmp
);
8450 tmp
= load_reg(s
, rs
);
8452 case 0: gen_st8(tmp
, addr
, user
); break;
8453 case 1: gen_st16(tmp
, addr
, user
); break;
8454 case 2: gen_st32(tmp
, addr
, user
); break;
8456 tcg_temp_free_i32(addr
);
8461 tcg_gen_addi_i32(addr
, addr
, imm
);
8463 store_reg(s
, rn
, addr
);
8465 tcg_temp_free_i32(addr
);
8477 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
8479 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8486 if (s
->condexec_mask
) {
8487 cond
= s
->condexec_cond
;
8488 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
8489 s
->condlabel
= gen_new_label();
8490 gen_test_cc(cond
^ 1, s
->condlabel
);
8495 insn
= lduw_code(s
->pc
);
8498 switch (insn
>> 12) {
8502 op
= (insn
>> 11) & 3;
8505 rn
= (insn
>> 3) & 7;
8506 tmp
= load_reg(s
, rn
);
8507 if (insn
& (1 << 10)) {
8509 tmp2
= tcg_temp_new_i32();
8510 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
8513 rm
= (insn
>> 6) & 7;
8514 tmp2
= load_reg(s
, rm
);
8516 if (insn
& (1 << 9)) {
8517 if (s
->condexec_mask
)
8518 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8520 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8522 if (s
->condexec_mask
)
8523 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8525 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8527 tcg_temp_free_i32(tmp2
);
8528 store_reg(s
, rd
, tmp
);
8530 /* shift immediate */
8531 rm
= (insn
>> 3) & 7;
8532 shift
= (insn
>> 6) & 0x1f;
8533 tmp
= load_reg(s
, rm
);
8534 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
8535 if (!s
->condexec_mask
)
8537 store_reg(s
, rd
, tmp
);
8541 /* arithmetic large immediate */
8542 op
= (insn
>> 11) & 3;
8543 rd
= (insn
>> 8) & 0x7;
8544 if (op
== 0) { /* mov */
8545 tmp
= tcg_temp_new_i32();
8546 tcg_gen_movi_i32(tmp
, insn
& 0xff);
8547 if (!s
->condexec_mask
)
8549 store_reg(s
, rd
, tmp
);
8551 tmp
= load_reg(s
, rd
);
8552 tmp2
= tcg_temp_new_i32();
8553 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
8556 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8557 tcg_temp_free_i32(tmp
);
8558 tcg_temp_free_i32(tmp2
);
8561 if (s
->condexec_mask
)
8562 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8564 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8565 tcg_temp_free_i32(tmp2
);
8566 store_reg(s
, rd
, tmp
);
8569 if (s
->condexec_mask
)
8570 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8572 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8573 tcg_temp_free_i32(tmp2
);
8574 store_reg(s
, rd
, tmp
);
8580 if (insn
& (1 << 11)) {
8581 rd
= (insn
>> 8) & 7;
8582 /* load pc-relative. Bit 1 of PC is ignored. */
8583 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
8584 val
&= ~(uint32_t)2;
8585 addr
= tcg_temp_new_i32();
8586 tcg_gen_movi_i32(addr
, val
);
8587 tmp
= gen_ld32(addr
, IS_USER(s
));
8588 tcg_temp_free_i32(addr
);
8589 store_reg(s
, rd
, tmp
);
8592 if (insn
& (1 << 10)) {
8593 /* data processing extended or blx */
8594 rd
= (insn
& 7) | ((insn
>> 4) & 8);
8595 rm
= (insn
>> 3) & 0xf;
8596 op
= (insn
>> 8) & 3;
8599 tmp
= load_reg(s
, rd
);
8600 tmp2
= load_reg(s
, rm
);
8601 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8602 tcg_temp_free_i32(tmp2
);
8603 store_reg(s
, rd
, tmp
);
8606 tmp
= load_reg(s
, rd
);
8607 tmp2
= load_reg(s
, rm
);
8608 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8609 tcg_temp_free_i32(tmp2
);
8610 tcg_temp_free_i32(tmp
);
8612 case 2: /* mov/cpy */
8613 tmp
= load_reg(s
, rm
);
8614 store_reg(s
, rd
, tmp
);
8616 case 3:/* branch [and link] exchange thumb register */
8617 tmp
= load_reg(s
, rm
);
8618 if (insn
& (1 << 7)) {
8619 val
= (uint32_t)s
->pc
| 1;
8620 tmp2
= tcg_temp_new_i32();
8621 tcg_gen_movi_i32(tmp2
, val
);
8622 store_reg(s
, 14, tmp2
);
8630 /* data processing register */
8632 rm
= (insn
>> 3) & 7;
8633 op
= (insn
>> 6) & 0xf;
8634 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
8635 /* the shift/rotate ops want the operands backwards */
8644 if (op
== 9) { /* neg */
8645 tmp
= tcg_temp_new_i32();
8646 tcg_gen_movi_i32(tmp
, 0);
8647 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
8648 tmp
= load_reg(s
, rd
);
8653 tmp2
= load_reg(s
, rm
);
8656 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8657 if (!s
->condexec_mask
)
8661 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8662 if (!s
->condexec_mask
)
8666 if (s
->condexec_mask
) {
8667 gen_helper_shl(tmp2
, tmp2
, tmp
);
8669 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
8674 if (s
->condexec_mask
) {
8675 gen_helper_shr(tmp2
, tmp2
, tmp
);
8677 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
8682 if (s
->condexec_mask
) {
8683 gen_helper_sar(tmp2
, tmp2
, tmp
);
8685 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
8690 if (s
->condexec_mask
)
8693 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
8696 if (s
->condexec_mask
)
8697 gen_sub_carry(tmp
, tmp
, tmp2
);
8699 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
8702 if (s
->condexec_mask
) {
8703 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
8704 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
8706 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
8711 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8716 if (s
->condexec_mask
)
8717 tcg_gen_neg_i32(tmp
, tmp2
);
8719 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8722 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8726 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8730 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8731 if (!s
->condexec_mask
)
8735 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8736 if (!s
->condexec_mask
)
8740 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8741 if (!s
->condexec_mask
)
8745 tcg_gen_not_i32(tmp2
, tmp2
);
8746 if (!s
->condexec_mask
)
8754 store_reg(s
, rm
, tmp2
);
8756 tcg_temp_free_i32(tmp
);
8758 store_reg(s
, rd
, tmp
);
8759 tcg_temp_free_i32(tmp2
);
8762 tcg_temp_free_i32(tmp
);
8763 tcg_temp_free_i32(tmp2
);
8768 /* load/store register offset. */
8770 rn
= (insn
>> 3) & 7;
8771 rm
= (insn
>> 6) & 7;
8772 op
= (insn
>> 9) & 7;
8773 addr
= load_reg(s
, rn
);
8774 tmp
= load_reg(s
, rm
);
8775 tcg_gen_add_i32(addr
, addr
, tmp
);
8776 tcg_temp_free_i32(tmp
);
8778 if (op
< 3) /* store */
8779 tmp
= load_reg(s
, rd
);
8783 gen_st32(tmp
, addr
, IS_USER(s
));
8786 gen_st16(tmp
, addr
, IS_USER(s
));
8789 gen_st8(tmp
, addr
, IS_USER(s
));
8792 tmp
= gen_ld8s(addr
, IS_USER(s
));
8795 tmp
= gen_ld32(addr
, IS_USER(s
));
8798 tmp
= gen_ld16u(addr
, IS_USER(s
));
8801 tmp
= gen_ld8u(addr
, IS_USER(s
));
8804 tmp
= gen_ld16s(addr
, IS_USER(s
));
8807 if (op
>= 3) /* load */
8808 store_reg(s
, rd
, tmp
);
8809 tcg_temp_free_i32(addr
);
8813 /* load/store word immediate offset */
8815 rn
= (insn
>> 3) & 7;
8816 addr
= load_reg(s
, rn
);
8817 val
= (insn
>> 4) & 0x7c;
8818 tcg_gen_addi_i32(addr
, addr
, val
);
8820 if (insn
& (1 << 11)) {
8822 tmp
= gen_ld32(addr
, IS_USER(s
));
8823 store_reg(s
, rd
, tmp
);
8826 tmp
= load_reg(s
, rd
);
8827 gen_st32(tmp
, addr
, IS_USER(s
));
8829 tcg_temp_free_i32(addr
);
8833 /* load/store byte immediate offset */
8835 rn
= (insn
>> 3) & 7;
8836 addr
= load_reg(s
, rn
);
8837 val
= (insn
>> 6) & 0x1f;
8838 tcg_gen_addi_i32(addr
, addr
, val
);
8840 if (insn
& (1 << 11)) {
8842 tmp
= gen_ld8u(addr
, IS_USER(s
));
8843 store_reg(s
, rd
, tmp
);
8846 tmp
= load_reg(s
, rd
);
8847 gen_st8(tmp
, addr
, IS_USER(s
));
8849 tcg_temp_free_i32(addr
);
8853 /* load/store halfword immediate offset */
8855 rn
= (insn
>> 3) & 7;
8856 addr
= load_reg(s
, rn
);
8857 val
= (insn
>> 5) & 0x3e;
8858 tcg_gen_addi_i32(addr
, addr
, val
);
8860 if (insn
& (1 << 11)) {
8862 tmp
= gen_ld16u(addr
, IS_USER(s
));
8863 store_reg(s
, rd
, tmp
);
8866 tmp
= load_reg(s
, rd
);
8867 gen_st16(tmp
, addr
, IS_USER(s
));
8869 tcg_temp_free_i32(addr
);
8873 /* load/store from stack */
8874 rd
= (insn
>> 8) & 7;
8875 addr
= load_reg(s
, 13);
8876 val
= (insn
& 0xff) * 4;
8877 tcg_gen_addi_i32(addr
, addr
, val
);
8879 if (insn
& (1 << 11)) {
8881 tmp
= gen_ld32(addr
, IS_USER(s
));
8882 store_reg(s
, rd
, tmp
);
8885 tmp
= load_reg(s
, rd
);
8886 gen_st32(tmp
, addr
, IS_USER(s
));
8888 tcg_temp_free_i32(addr
);
8892 /* add to high reg */
8893 rd
= (insn
>> 8) & 7;
8894 if (insn
& (1 << 11)) {
8896 tmp
= load_reg(s
, 13);
8898 /* PC. bit 1 is ignored. */
8899 tmp
= tcg_temp_new_i32();
8900 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
8902 val
= (insn
& 0xff) * 4;
8903 tcg_gen_addi_i32(tmp
, tmp
, val
);
8904 store_reg(s
, rd
, tmp
);
8909 op
= (insn
>> 8) & 0xf;
8912 /* adjust stack pointer */
8913 tmp
= load_reg(s
, 13);
8914 val
= (insn
& 0x7f) * 4;
8915 if (insn
& (1 << 7))
8916 val
= -(int32_t)val
;
8917 tcg_gen_addi_i32(tmp
, tmp
, val
);
8918 store_reg(s
, 13, tmp
);
8921 case 2: /* sign/zero extend. */
8924 rm
= (insn
>> 3) & 7;
8925 tmp
= load_reg(s
, rm
);
8926 switch ((insn
>> 6) & 3) {
8927 case 0: gen_sxth(tmp
); break;
8928 case 1: gen_sxtb(tmp
); break;
8929 case 2: gen_uxth(tmp
); break;
8930 case 3: gen_uxtb(tmp
); break;
8932 store_reg(s
, rd
, tmp
);
8934 case 4: case 5: case 0xc: case 0xd:
8936 addr
= load_reg(s
, 13);
8937 if (insn
& (1 << 8))
8941 for (i
= 0; i
< 8; i
++) {
8942 if (insn
& (1 << i
))
8945 if ((insn
& (1 << 11)) == 0) {
8946 tcg_gen_addi_i32(addr
, addr
, -offset
);
8948 for (i
= 0; i
< 8; i
++) {
8949 if (insn
& (1 << i
)) {
8950 if (insn
& (1 << 11)) {
8952 tmp
= gen_ld32(addr
, IS_USER(s
));
8953 store_reg(s
, i
, tmp
);
8956 tmp
= load_reg(s
, i
);
8957 gen_st32(tmp
, addr
, IS_USER(s
));
8959 /* advance to the next address. */
8960 tcg_gen_addi_i32(addr
, addr
, 4);
8964 if (insn
& (1 << 8)) {
8965 if (insn
& (1 << 11)) {
8967 tmp
= gen_ld32(addr
, IS_USER(s
));
8968 /* don't set the pc until the rest of the instruction
8972 tmp
= load_reg(s
, 14);
8973 gen_st32(tmp
, addr
, IS_USER(s
));
8975 tcg_gen_addi_i32(addr
, addr
, 4);
8977 if ((insn
& (1 << 11)) == 0) {
8978 tcg_gen_addi_i32(addr
, addr
, -offset
);
8980 /* write back the new stack pointer */
8981 store_reg(s
, 13, addr
);
8982 /* set the new PC value */
8983 if ((insn
& 0x0900) == 0x0900)
8987 case 1: case 3: case 9: case 11: /* czb */
8989 tmp
= load_reg(s
, rm
);
8990 s
->condlabel
= gen_new_label();
8992 if (insn
& (1 << 11))
8993 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
8995 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
8996 tcg_temp_free_i32(tmp
);
8997 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
8998 val
= (uint32_t)s
->pc
+ 2;
9003 case 15: /* IT, nop-hint. */
9004 if ((insn
& 0xf) == 0) {
9005 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9009 s
->condexec_cond
= (insn
>> 4) & 0xe;
9010 s
->condexec_mask
= insn
& 0x1f;
9011 /* No actual code generated for this insn, just setup state. */
9014 case 0xe: /* bkpt */
9015 gen_exception_insn(s
, 2, EXCP_BKPT
);
9020 rn
= (insn
>> 3) & 0x7;
9022 tmp
= load_reg(s
, rn
);
9023 switch ((insn
>> 6) & 3) {
9024 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9025 case 1: gen_rev16(tmp
); break;
9026 case 3: gen_revsh(tmp
); break;
9027 default: goto illegal_op
;
9029 store_reg(s
, rd
, tmp
);
9037 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9040 addr
= tcg_const_i32(16);
9041 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9042 tcg_temp_free_i32(addr
);
9046 addr
= tcg_const_i32(17);
9047 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9048 tcg_temp_free_i32(addr
);
9050 tcg_temp_free_i32(tmp
);
9053 if (insn
& (1 << 4))
9054 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9057 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9067 /* load/store multiple */
9068 rn
= (insn
>> 8) & 0x7;
9069 addr
= load_reg(s
, rn
);
9070 for (i
= 0; i
< 8; i
++) {
9071 if (insn
& (1 << i
)) {
9072 if (insn
& (1 << 11)) {
9074 tmp
= gen_ld32(addr
, IS_USER(s
));
9075 store_reg(s
, i
, tmp
);
9078 tmp
= load_reg(s
, i
);
9079 gen_st32(tmp
, addr
, IS_USER(s
));
9081 /* advance to the next address */
9082 tcg_gen_addi_i32(addr
, addr
, 4);
9085 /* Base register writeback. */
9086 if ((insn
& (1 << rn
)) == 0) {
9087 store_reg(s
, rn
, addr
);
9089 tcg_temp_free_i32(addr
);
9094 /* conditional branch or swi */
9095 cond
= (insn
>> 8) & 0xf;
9101 gen_set_pc_im(s
->pc
);
9102 s
->is_jmp
= DISAS_SWI
;
9105 /* generate a conditional jump to next instruction */
9106 s
->condlabel
= gen_new_label();
9107 gen_test_cc(cond
^ 1, s
->condlabel
);
9110 /* jump to the offset */
9111 val
= (uint32_t)s
->pc
+ 2;
9112 offset
= ((int32_t)insn
<< 24) >> 24;
9118 if (insn
& (1 << 11)) {
9119 if (disas_thumb2_insn(env
, s
, insn
))
9123 /* unconditional branch */
9124 val
= (uint32_t)s
->pc
;
9125 offset
= ((int32_t)insn
<< 21) >> 21;
9126 val
+= (offset
<< 1) + 2;
9131 if (disas_thumb2_insn(env
, s
, insn
))
9137 gen_exception_insn(s
, 4, EXCP_UDEF
);
9141 gen_exception_insn(s
, 2, EXCP_UDEF
);
9144 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9145 basic block 'tb'. If search_pc is TRUE, also generate PC
9146 information for each intermediate instruction. */
9147 static inline void gen_intermediate_code_internal(CPUState
*env
,
9148 TranslationBlock
*tb
,
9151 DisasContext dc1
, *dc
= &dc1
;
9153 uint16_t *gen_opc_end
;
9155 target_ulong pc_start
;
9156 uint32_t next_page_start
;
9160 /* generate intermediate code */
9165 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9167 dc
->is_jmp
= DISAS_NEXT
;
9169 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9171 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9172 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9173 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9174 #if !defined(CONFIG_USER_ONLY)
9175 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9177 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9178 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9179 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9180 cpu_F0s
= tcg_temp_new_i32();
9181 cpu_F1s
= tcg_temp_new_i32();
9182 cpu_F0d
= tcg_temp_new_i64();
9183 cpu_F1d
= tcg_temp_new_i64();
9186 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9187 cpu_M0
= tcg_temp_new_i64();
9188 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9191 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9193 max_insns
= CF_COUNT_MASK
;
9197 tcg_clear_temp_count();
9199 /* A note on handling of the condexec (IT) bits:
9201 * We want to avoid the overhead of having to write the updated condexec
9202 * bits back to the CPUState for every instruction in an IT block. So:
9203 * (1) if the condexec bits are not already zero then we write
9204 * zero back into the CPUState now. This avoids complications trying
9205 * to do it at the end of the block. (For example if we don't do this
9206 * it's hard to identify whether we can safely skip writing condexec
9207 * at the end of the TB, which we definitely want to do for the case
9208 * where a TB doesn't do anything with the IT state at all.)
9209 * (2) if we are going to leave the TB then we call gen_set_condexec()
9210 * which will write the correct value into CPUState if zero is wrong.
9211 * This is done both for leaving the TB at the end, and for leaving
9212 * it because of an exception we know will happen, which is done in
9213 * gen_exception_insn(). The latter is necessary because we need to
9214 * leave the TB with the PC/IT state just prior to execution of the
9215 * instruction which caused the exception.
9216 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9217 * then the CPUState will be wrong and we need to reset it.
9218 * This is handled in the same way as restoration of the
9219 * PC in these situations: we will be called again with search_pc=1
9220 * and generate a mapping of the condexec bits for each PC in
9221 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9222 * the condexec bits.
9224 * Note that there are no instructions which can read the condexec
9225 * bits, and none which can write non-static values to them, so
9226 * we don't need to care about whether CPUState is correct in the
9230 /* Reset the conditional execution bits immediately. This avoids
9231 complications trying to do it at the end of the block. */
9232 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9234 TCGv tmp
= tcg_temp_new_i32();
9235 tcg_gen_movi_i32(tmp
, 0);
9236 store_cpu_field(tmp
, condexec_bits
);
9239 #ifdef CONFIG_USER_ONLY
9240 /* Intercept jump to the magic kernel page. */
9241 if (dc
->pc
>= 0xffff0000) {
9242 /* We always get here via a jump, so know we are not in a
9243 conditional execution block. */
9244 gen_exception(EXCP_KERNEL_TRAP
);
9245 dc
->is_jmp
= DISAS_UPDATE
;
9249 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9250 /* We always get here via a jump, so know we are not in a
9251 conditional execution block. */
9252 gen_exception(EXCP_EXCEPTION_EXIT
);
9253 dc
->is_jmp
= DISAS_UPDATE
;
9258 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9259 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9260 if (bp
->pc
== dc
->pc
) {
9261 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9262 /* Advance PC so that clearing the breakpoint will
9263 invalidate this TB. */
9265 goto done_generating
;
9271 j
= gen_opc_ptr
- gen_opc_buf
;
9275 gen_opc_instr_start
[lj
++] = 0;
9277 gen_opc_pc
[lj
] = dc
->pc
;
9278 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9279 gen_opc_instr_start
[lj
] = 1;
9280 gen_opc_icount
[lj
] = num_insns
;
9283 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9286 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
9287 tcg_gen_debug_insn_start(dc
->pc
);
9291 disas_thumb_insn(env
, dc
);
9292 if (dc
->condexec_mask
) {
9293 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9294 | ((dc
->condexec_mask
>> 4) & 1);
9295 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9296 if (dc
->condexec_mask
== 0) {
9297 dc
->condexec_cond
= 0;
9301 disas_arm_insn(env
, dc
);
9304 if (dc
->condjmp
&& !dc
->is_jmp
) {
9305 gen_set_label(dc
->condlabel
);
9309 if (tcg_check_temp_count()) {
9310 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
9313 /* Translation stops when a conditional branch is encountered.
9314 * Otherwise the subsequent code could get translated several times.
9315 * Also stop translation when a page boundary is reached. This
9316 * ensures prefetch aborts occur at the right place. */
9318 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9319 !env
->singlestep_enabled
&&
9321 dc
->pc
< next_page_start
&&
9322 num_insns
< max_insns
);
9324 if (tb
->cflags
& CF_LAST_IO
) {
9326 /* FIXME: This can theoretically happen with self-modifying
9328 cpu_abort(env
, "IO on conditional branch instruction");
9333 /* At this stage dc->condjmp will only be set when the skipped
9334 instruction was a conditional branch or trap, and the PC has
9335 already been written. */
9336 if (unlikely(env
->singlestep_enabled
)) {
9337 /* Make sure the pc is updated, and raise a debug exception. */
9339 gen_set_condexec(dc
);
9340 if (dc
->is_jmp
== DISAS_SWI
) {
9341 gen_exception(EXCP_SWI
);
9343 gen_exception(EXCP_DEBUG
);
9345 gen_set_label(dc
->condlabel
);
9347 if (dc
->condjmp
|| !dc
->is_jmp
) {
9348 gen_set_pc_im(dc
->pc
);
9351 gen_set_condexec(dc
);
9352 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9353 gen_exception(EXCP_SWI
);
9355 /* FIXME: Single stepping a WFI insn will not halt
9357 gen_exception(EXCP_DEBUG
);
9360 /* While branches must always occur at the end of an IT block,
9361 there are a few other things that can cause us to terminate
9362 the TB in the middel of an IT block:
9363 - Exception generating instructions (bkpt, swi, undefined).
9365 - Hardware watchpoints.
9366 Hardware breakpoints have already been handled and skip this code.
9368 gen_set_condexec(dc
);
9369 switch(dc
->is_jmp
) {
9371 gen_goto_tb(dc
, 1, dc
->pc
);
9376 /* indicate that the hash table must be used to find the next TB */
9380 /* nothing more to generate */
9386 gen_exception(EXCP_SWI
);
9390 gen_set_label(dc
->condlabel
);
9391 gen_set_condexec(dc
);
9392 gen_goto_tb(dc
, 1, dc
->pc
);
9398 gen_icount_end(tb
, num_insns
);
9399 *gen_opc_ptr
= INDEX_op_end
;
9402 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9403 qemu_log("----------------\n");
9404 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9405 log_target_disas(pc_start
, dc
->pc
- pc_start
, dc
->thumb
);
9410 j
= gen_opc_ptr
- gen_opc_buf
;
9413 gen_opc_instr_start
[lj
++] = 0;
9415 tb
->size
= dc
->pc
- pc_start
;
9416 tb
->icount
= num_insns
;
9420 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
9422 gen_intermediate_code_internal(env
, tb
, 0);
9425 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
9427 gen_intermediate_code_internal(env
, tb
, 1);
9430 static const char *cpu_mode_names
[16] = {
9431 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9432 "???", "???", "???", "und", "???", "???", "???", "sys"
9435 void cpu_dump_state(CPUState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
9445 /* ??? This assumes float64 and double have the same layout.
9446 Oh well, it's only debug dumps. */
9455 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
9457 cpu_fprintf(f
, "\n");
9459 cpu_fprintf(f
, " ");
9461 psr
= cpsr_read(env
);
9462 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
9464 psr
& (1 << 31) ? 'N' : '-',
9465 psr
& (1 << 30) ? 'Z' : '-',
9466 psr
& (1 << 29) ? 'C' : '-',
9467 psr
& (1 << 28) ? 'V' : '-',
9468 psr
& CPSR_T
? 'T' : 'A',
9469 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
9472 for (i
= 0; i
< 16; i
++) {
9473 d
.d
= env
->vfp
.regs
[i
];
9477 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9478 i
* 2, (int)s0
.i
, s0
.s
,
9479 i
* 2 + 1, (int)s1
.i
, s1
.s
,
9480 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
9483 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
9487 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
9488 unsigned long searched_pc
, int pc_pos
, void *puc
)
9490 env
->regs
[15] = gen_opc_pc
[pc_pos
];
9491 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];