4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
45 /* internal defines */
46 typedef struct DisasContext
{
49 /* Nonzero if this instruction has been conditionally skipped. */
51 /* The label that will be jumped to when the instruction is skipped. */
53 /* Thumb-2 condtional execution bits. */
56 struct TranslationBlock
*tb
;
57 int singlestep_enabled
;
59 #if !defined(CONFIG_USER_ONLY)
67 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
69 #if defined(CONFIG_USER_ONLY)
72 #define IS_USER(s) (s->user)
75 /* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
80 static TCGv_ptr cpu_env
;
81 /* We reuse the same 64-bit temporaries for efficiency. */
82 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
83 static TCGv_i32 cpu_R
[16];
84 static TCGv_i32 cpu_exclusive_addr
;
85 static TCGv_i32 cpu_exclusive_val
;
86 static TCGv_i32 cpu_exclusive_high
;
87 #ifdef CONFIG_USER_ONLY
88 static TCGv_i32 cpu_exclusive_test
;
89 static TCGv_i32 cpu_exclusive_info
;
92 /* FIXME: These should be removed. */
93 static TCGv cpu_F0s
, cpu_F1s
;
94 static TCGv_i64 cpu_F0d
, cpu_F1d
;
96 #include "gen-icount.h"
98 static const char *regnames
[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
102 /* initialize TCG globals. */
103 void arm_translate_init(void)
107 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
109 for (i
= 0; i
< 16; i
++) {
110 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
111 offsetof(CPUState
, regs
[i
]),
114 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUState
, exclusive_addr
), "exclusive_addr");
116 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUState
, exclusive_val
), "exclusive_val");
118 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUState
, exclusive_high
), "exclusive_high");
120 #ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
122 offsetof(CPUState
, exclusive_test
), "exclusive_test");
123 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
124 offsetof(CPUState
, exclusive_info
), "exclusive_info");
131 static inline TCGv
load_cpu_offset(int offset
)
133 TCGv tmp
= tcg_temp_new_i32();
134 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
138 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
140 static inline void store_cpu_offset(TCGv var
, int offset
)
142 tcg_gen_st_i32(var
, cpu_env
, offset
);
143 tcg_temp_free_i32(var
);
146 #define store_cpu_field(var, name) \
147 store_cpu_offset(var, offsetof(CPUState, name))
149 /* Set a variable to the value of a CPU register. */
150 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
154 /* normaly, since we updated PC, we need only to add one insn */
156 addr
= (long)s
->pc
+ 2;
158 addr
= (long)s
->pc
+ 4;
159 tcg_gen_movi_i32(var
, addr
);
161 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
165 /* Create a new temporary and set it to the value of a CPU register. */
166 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
168 TCGv tmp
= tcg_temp_new_i32();
169 load_reg_var(s
, tmp
, reg
);
173 /* Set a CPU register. The source must be a temporary and will be
175 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
178 tcg_gen_andi_i32(var
, var
, ~1);
179 s
->is_jmp
= DISAS_JUMP
;
181 tcg_gen_mov_i32(cpu_R
[reg
], var
);
182 tcg_temp_free_i32(var
);
185 /* Value extensions. */
186 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
187 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
188 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
189 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
191 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
192 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
195 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
197 TCGv tmp_mask
= tcg_const_i32(mask
);
198 gen_helper_cpsr_write(var
, tmp_mask
);
199 tcg_temp_free_i32(tmp_mask
);
201 /* Set NZCV flags from the high 4 bits of var. */
202 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
204 static void gen_exception(int excp
)
206 TCGv tmp
= tcg_temp_new_i32();
207 tcg_gen_movi_i32(tmp
, excp
);
208 gen_helper_exception(tmp
);
209 tcg_temp_free_i32(tmp
);
212 static void gen_smul_dual(TCGv a
, TCGv b
)
214 TCGv tmp1
= tcg_temp_new_i32();
215 TCGv tmp2
= tcg_temp_new_i32();
216 tcg_gen_ext16s_i32(tmp1
, a
);
217 tcg_gen_ext16s_i32(tmp2
, b
);
218 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
219 tcg_temp_free_i32(tmp2
);
220 tcg_gen_sari_i32(a
, a
, 16);
221 tcg_gen_sari_i32(b
, b
, 16);
222 tcg_gen_mul_i32(b
, b
, a
);
223 tcg_gen_mov_i32(a
, tmp1
);
224 tcg_temp_free_i32(tmp1
);
227 /* Byteswap each halfword. */
228 static void gen_rev16(TCGv var
)
230 TCGv tmp
= tcg_temp_new_i32();
231 tcg_gen_shri_i32(tmp
, var
, 8);
232 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
233 tcg_gen_shli_i32(var
, var
, 8);
234 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
235 tcg_gen_or_i32(var
, var
, tmp
);
236 tcg_temp_free_i32(tmp
);
239 /* Byteswap low halfword and sign extend. */
240 static void gen_revsh(TCGv var
)
242 tcg_gen_ext16u_i32(var
, var
);
243 tcg_gen_bswap16_i32(var
, var
);
244 tcg_gen_ext16s_i32(var
, var
);
247 /* Unsigned bitfield extract. */
248 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
251 tcg_gen_shri_i32(var
, var
, shift
);
252 tcg_gen_andi_i32(var
, var
, mask
);
255 /* Signed bitfield extract. */
256 static void gen_sbfx(TCGv var
, int shift
, int width
)
261 tcg_gen_sari_i32(var
, var
, shift
);
262 if (shift
+ width
< 32) {
263 signbit
= 1u << (width
- 1);
264 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
265 tcg_gen_xori_i32(var
, var
, signbit
);
266 tcg_gen_subi_i32(var
, var
, signbit
);
270 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
271 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
273 tcg_gen_andi_i32(val
, val
, mask
);
274 tcg_gen_shli_i32(val
, val
, shift
);
275 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
276 tcg_gen_or_i32(dest
, base
, val
);
279 /* Return (b << 32) + a. Mark inputs as dead */
280 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
282 TCGv_i64 tmp64
= tcg_temp_new_i64();
284 tcg_gen_extu_i32_i64(tmp64
, b
);
285 tcg_temp_free_i32(b
);
286 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
287 tcg_gen_add_i64(a
, tmp64
, a
);
289 tcg_temp_free_i64(tmp64
);
293 /* Return (b << 32) - a. Mark inputs as dead. */
294 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
296 TCGv_i64 tmp64
= tcg_temp_new_i64();
298 tcg_gen_extu_i32_i64(tmp64
, b
);
299 tcg_temp_free_i32(b
);
300 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
301 tcg_gen_sub_i64(a
, tmp64
, a
);
303 tcg_temp_free_i64(tmp64
);
307 /* FIXME: Most targets have native widening multiplication.
308 It would be good to use that instead of a full wide multiply. */
309 /* 32x32->64 multiply. Marks inputs as dead. */
310 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
312 TCGv_i64 tmp1
= tcg_temp_new_i64();
313 TCGv_i64 tmp2
= tcg_temp_new_i64();
315 tcg_gen_extu_i32_i64(tmp1
, a
);
316 tcg_temp_free_i32(a
);
317 tcg_gen_extu_i32_i64(tmp2
, b
);
318 tcg_temp_free_i32(b
);
319 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
320 tcg_temp_free_i64(tmp2
);
324 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
326 TCGv_i64 tmp1
= tcg_temp_new_i64();
327 TCGv_i64 tmp2
= tcg_temp_new_i64();
329 tcg_gen_ext_i32_i64(tmp1
, a
);
330 tcg_temp_free_i32(a
);
331 tcg_gen_ext_i32_i64(tmp2
, b
);
332 tcg_temp_free_i32(b
);
333 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
334 tcg_temp_free_i64(tmp2
);
338 /* Swap low and high halfwords. */
339 static void gen_swap_half(TCGv var
)
341 TCGv tmp
= tcg_temp_new_i32();
342 tcg_gen_shri_i32(tmp
, var
, 16);
343 tcg_gen_shli_i32(var
, var
, 16);
344 tcg_gen_or_i32(var
, var
, tmp
);
345 tcg_temp_free_i32(tmp
);
348 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
349 tmp = (t0 ^ t1) & 0x8000;
352 t0 = (t0 + t1) ^ tmp;
355 static void gen_add16(TCGv t0
, TCGv t1
)
357 TCGv tmp
= tcg_temp_new_i32();
358 tcg_gen_xor_i32(tmp
, t0
, t1
);
359 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
360 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
361 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
362 tcg_gen_add_i32(t0
, t0
, t1
);
363 tcg_gen_xor_i32(t0
, t0
, tmp
);
364 tcg_temp_free_i32(tmp
);
365 tcg_temp_free_i32(t1
);
368 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
370 /* Set CF to the top bit of var. */
371 static void gen_set_CF_bit31(TCGv var
)
373 TCGv tmp
= tcg_temp_new_i32();
374 tcg_gen_shri_i32(tmp
, var
, 31);
376 tcg_temp_free_i32(tmp
);
379 /* Set N and Z flags from var. */
380 static inline void gen_logic_CC(TCGv var
)
382 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
383 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
387 static void gen_adc(TCGv t0
, TCGv t1
)
390 tcg_gen_add_i32(t0
, t0
, t1
);
391 tmp
= load_cpu_field(CF
);
392 tcg_gen_add_i32(t0
, t0
, tmp
);
393 tcg_temp_free_i32(tmp
);
396 /* dest = T0 + T1 + CF. */
397 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
400 tcg_gen_add_i32(dest
, t0
, t1
);
401 tmp
= load_cpu_field(CF
);
402 tcg_gen_add_i32(dest
, dest
, tmp
);
403 tcg_temp_free_i32(tmp
);
406 /* dest = T0 - T1 + CF - 1. */
407 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
410 tcg_gen_sub_i32(dest
, t0
, t1
);
411 tmp
= load_cpu_field(CF
);
412 tcg_gen_add_i32(dest
, dest
, tmp
);
413 tcg_gen_subi_i32(dest
, dest
, 1);
414 tcg_temp_free_i32(tmp
);
417 /* FIXME: Implement this natively. */
418 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
420 static void shifter_out_im(TCGv var
, int shift
)
422 TCGv tmp
= tcg_temp_new_i32();
424 tcg_gen_andi_i32(tmp
, var
, 1);
426 tcg_gen_shri_i32(tmp
, var
, shift
);
428 tcg_gen_andi_i32(tmp
, tmp
, 1);
431 tcg_temp_free_i32(tmp
);
434 /* Shift by immediate. Includes special handling for shift == 0. */
435 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
441 shifter_out_im(var
, 32 - shift
);
442 tcg_gen_shli_i32(var
, var
, shift
);
448 tcg_gen_shri_i32(var
, var
, 31);
451 tcg_gen_movi_i32(var
, 0);
454 shifter_out_im(var
, shift
- 1);
455 tcg_gen_shri_i32(var
, var
, shift
);
462 shifter_out_im(var
, shift
- 1);
465 tcg_gen_sari_i32(var
, var
, shift
);
467 case 3: /* ROR/RRX */
470 shifter_out_im(var
, shift
- 1);
471 tcg_gen_rotri_i32(var
, var
, shift
); break;
473 TCGv tmp
= load_cpu_field(CF
);
475 shifter_out_im(var
, 0);
476 tcg_gen_shri_i32(var
, var
, 1);
477 tcg_gen_shli_i32(tmp
, tmp
, 31);
478 tcg_gen_or_i32(var
, var
, tmp
);
479 tcg_temp_free_i32(tmp
);
484 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
485 TCGv shift
, int flags
)
489 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
490 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
491 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
492 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
496 case 0: gen_helper_shl(var
, var
, shift
); break;
497 case 1: gen_helper_shr(var
, var
, shift
); break;
498 case 2: gen_helper_sar(var
, var
, shift
); break;
499 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
500 tcg_gen_rotr_i32(var
, var
, shift
); break;
503 tcg_temp_free_i32(shift
);
506 #define PAS_OP(pfx) \
508 case 0: gen_pas_helper(glue(pfx,add16)); break; \
509 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
510 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
511 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
512 case 4: gen_pas_helper(glue(pfx,add8)); break; \
513 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
515 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
520 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
522 tmp
= tcg_temp_new_ptr();
523 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
525 tcg_temp_free_ptr(tmp
);
528 tmp
= tcg_temp_new_ptr();
529 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
531 tcg_temp_free_ptr(tmp
);
533 #undef gen_pas_helper
534 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
547 #undef gen_pas_helper
552 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
553 #define PAS_OP(pfx) \
555 case 0: gen_pas_helper(glue(pfx,add8)); break; \
556 case 1: gen_pas_helper(glue(pfx,add16)); break; \
557 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
558 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
559 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
560 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
562 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
567 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
569 tmp
= tcg_temp_new_ptr();
570 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
572 tcg_temp_free_ptr(tmp
);
575 tmp
= tcg_temp_new_ptr();
576 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
578 tcg_temp_free_ptr(tmp
);
580 #undef gen_pas_helper
581 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
594 #undef gen_pas_helper
599 static void gen_test_cc(int cc
, int label
)
607 tmp
= load_cpu_field(ZF
);
608 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
611 tmp
= load_cpu_field(ZF
);
612 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
615 tmp
= load_cpu_field(CF
);
616 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
619 tmp
= load_cpu_field(CF
);
620 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
623 tmp
= load_cpu_field(NF
);
624 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
627 tmp
= load_cpu_field(NF
);
628 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
631 tmp
= load_cpu_field(VF
);
632 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
635 tmp
= load_cpu_field(VF
);
636 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
638 case 8: /* hi: C && !Z */
639 inv
= gen_new_label();
640 tmp
= load_cpu_field(CF
);
641 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
642 tcg_temp_free_i32(tmp
);
643 tmp
= load_cpu_field(ZF
);
644 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
647 case 9: /* ls: !C || Z */
648 tmp
= load_cpu_field(CF
);
649 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
650 tcg_temp_free_i32(tmp
);
651 tmp
= load_cpu_field(ZF
);
652 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
654 case 10: /* ge: N == V -> N ^ V == 0 */
655 tmp
= load_cpu_field(VF
);
656 tmp2
= load_cpu_field(NF
);
657 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
658 tcg_temp_free_i32(tmp2
);
659 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
661 case 11: /* lt: N != V -> N ^ V != 0 */
662 tmp
= load_cpu_field(VF
);
663 tmp2
= load_cpu_field(NF
);
664 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
665 tcg_temp_free_i32(tmp2
);
666 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
668 case 12: /* gt: !Z && N == V */
669 inv
= gen_new_label();
670 tmp
= load_cpu_field(ZF
);
671 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
672 tcg_temp_free_i32(tmp
);
673 tmp
= load_cpu_field(VF
);
674 tmp2
= load_cpu_field(NF
);
675 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
676 tcg_temp_free_i32(tmp2
);
677 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
680 case 13: /* le: Z || N != V */
681 tmp
= load_cpu_field(ZF
);
682 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
683 tcg_temp_free_i32(tmp
);
684 tmp
= load_cpu_field(VF
);
685 tmp2
= load_cpu_field(NF
);
686 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
687 tcg_temp_free_i32(tmp2
);
688 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
691 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
694 tcg_temp_free_i32(tmp
);
697 static const uint8_t table_logic_cc
[16] = {
716 /* Set PC and Thumb state from an immediate address. */
717 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
721 s
->is_jmp
= DISAS_UPDATE
;
722 if (s
->thumb
!= (addr
& 1)) {
723 tmp
= tcg_temp_new_i32();
724 tcg_gen_movi_i32(tmp
, addr
& 1);
725 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
726 tcg_temp_free_i32(tmp
);
728 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
731 /* Set PC and Thumb state from var. var is marked as dead. */
732 static inline void gen_bx(DisasContext
*s
, TCGv var
)
734 s
->is_jmp
= DISAS_UPDATE
;
735 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
736 tcg_gen_andi_i32(var
, var
, 1);
737 store_cpu_field(var
, thumb
);
740 /* Variant of store_reg which uses branch&exchange logic when storing
741 to r15 in ARM architecture v7 and above. The source must be a temporary
742 and will be marked as dead. */
743 static inline void store_reg_bx(CPUState
*env
, DisasContext
*s
,
746 if (reg
== 15 && ENABLE_ARCH_7
) {
749 store_reg(s
, reg
, var
);
753 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
755 TCGv tmp
= tcg_temp_new_i32();
756 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
759 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
761 TCGv tmp
= tcg_temp_new_i32();
762 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
765 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
767 TCGv tmp
= tcg_temp_new_i32();
768 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
771 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
773 TCGv tmp
= tcg_temp_new_i32();
774 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
777 static inline TCGv
gen_ld32(TCGv addr
, int index
)
779 TCGv tmp
= tcg_temp_new_i32();
780 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
783 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
785 TCGv_i64 tmp
= tcg_temp_new_i64();
786 tcg_gen_qemu_ld64(tmp
, addr
, index
);
789 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
791 tcg_gen_qemu_st8(val
, addr
, index
);
792 tcg_temp_free_i32(val
);
794 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
796 tcg_gen_qemu_st16(val
, addr
, index
);
797 tcg_temp_free_i32(val
);
799 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
801 tcg_gen_qemu_st32(val
, addr
, index
);
802 tcg_temp_free_i32(val
);
804 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
806 tcg_gen_qemu_st64(val
, addr
, index
);
807 tcg_temp_free_i64(val
);
810 static inline void gen_set_pc_im(uint32_t val
)
812 tcg_gen_movi_i32(cpu_R
[15], val
);
815 /* Force a TB lookup after an instruction that changes the CPU state. */
816 static inline void gen_lookup_tb(DisasContext
*s
)
818 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
819 s
->is_jmp
= DISAS_UPDATE
;
822 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
825 int val
, rm
, shift
, shiftop
;
828 if (!(insn
& (1 << 25))) {
831 if (!(insn
& (1 << 23)))
834 tcg_gen_addi_i32(var
, var
, val
);
838 shift
= (insn
>> 7) & 0x1f;
839 shiftop
= (insn
>> 5) & 3;
840 offset
= load_reg(s
, rm
);
841 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
842 if (!(insn
& (1 << 23)))
843 tcg_gen_sub_i32(var
, var
, offset
);
845 tcg_gen_add_i32(var
, var
, offset
);
846 tcg_temp_free_i32(offset
);
850 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
856 if (insn
& (1 << 22)) {
858 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
859 if (!(insn
& (1 << 23)))
863 tcg_gen_addi_i32(var
, var
, val
);
867 tcg_gen_addi_i32(var
, var
, extra
);
869 offset
= load_reg(s
, rm
);
870 if (!(insn
& (1 << 23)))
871 tcg_gen_sub_i32(var
, var
, offset
);
873 tcg_gen_add_i32(var
, var
, offset
);
874 tcg_temp_free_i32(offset
);
878 #define VFP_OP2(name) \
879 static inline void gen_vfp_##name(int dp) \
882 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
884 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
894 static inline void gen_vfp_abs(int dp
)
897 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
899 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
902 static inline void gen_vfp_neg(int dp
)
905 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
907 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
910 static inline void gen_vfp_sqrt(int dp
)
913 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
915 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
918 static inline void gen_vfp_cmp(int dp
)
921 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
923 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
926 static inline void gen_vfp_cmpe(int dp
)
929 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
931 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
934 static inline void gen_vfp_F1_ld0(int dp
)
937 tcg_gen_movi_i64(cpu_F1d
, 0);
939 tcg_gen_movi_i32(cpu_F1s
, 0);
942 static inline void gen_vfp_uito(int dp
)
945 gen_helper_vfp_uitod(cpu_F0d
, cpu_F0s
, cpu_env
);
947 gen_helper_vfp_uitos(cpu_F0s
, cpu_F0s
, cpu_env
);
950 static inline void gen_vfp_sito(int dp
)
953 gen_helper_vfp_sitod(cpu_F0d
, cpu_F0s
, cpu_env
);
955 gen_helper_vfp_sitos(cpu_F0s
, cpu_F0s
, cpu_env
);
958 static inline void gen_vfp_toui(int dp
)
961 gen_helper_vfp_touid(cpu_F0s
, cpu_F0d
, cpu_env
);
963 gen_helper_vfp_touis(cpu_F0s
, cpu_F0s
, cpu_env
);
966 static inline void gen_vfp_touiz(int dp
)
969 gen_helper_vfp_touizd(cpu_F0s
, cpu_F0d
, cpu_env
);
971 gen_helper_vfp_touizs(cpu_F0s
, cpu_F0s
, cpu_env
);
974 static inline void gen_vfp_tosi(int dp
)
977 gen_helper_vfp_tosid(cpu_F0s
, cpu_F0d
, cpu_env
);
979 gen_helper_vfp_tosis(cpu_F0s
, cpu_F0s
, cpu_env
);
982 static inline void gen_vfp_tosiz(int dp
)
985 gen_helper_vfp_tosizd(cpu_F0s
, cpu_F0d
, cpu_env
);
987 gen_helper_vfp_tosizs(cpu_F0s
, cpu_F0s
, cpu_env
);
990 #define VFP_GEN_FIX(name) \
991 static inline void gen_vfp_##name(int dp, int shift) \
993 TCGv tmp_shift = tcg_const_i32(shift); \
995 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
997 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
998 tcg_temp_free_i32(tmp_shift); \
1010 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1013 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1015 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1018 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1021 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1023 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1027 vfp_reg_offset (int dp
, int reg
)
1030 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1032 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1033 + offsetof(CPU_DoubleU
, l
.upper
);
1035 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1036 + offsetof(CPU_DoubleU
, l
.lower
);
1040 /* Return the offset of a 32-bit piece of a NEON register.
1041 zero is the least significant end of the register. */
1043 neon_reg_offset (int reg
, int n
)
1047 return vfp_reg_offset(0, sreg
);
1050 static TCGv
neon_load_reg(int reg
, int pass
)
1052 TCGv tmp
= tcg_temp_new_i32();
1053 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1057 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1059 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1060 tcg_temp_free_i32(var
);
1063 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1065 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1068 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1070 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1073 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1074 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1075 #define tcg_gen_st_f32 tcg_gen_st_i32
1076 #define tcg_gen_st_f64 tcg_gen_st_i64
1078 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1081 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1083 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1086 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1089 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1091 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1094 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1097 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1099 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1102 #define ARM_CP_RW_BIT (1 << 20)
1104 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1106 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1109 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1111 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1114 static inline TCGv
iwmmxt_load_creg(int reg
)
1116 TCGv var
= tcg_temp_new_i32();
1117 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1121 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1123 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1124 tcg_temp_free_i32(var
);
1127 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1129 iwmmxt_store_reg(cpu_M0
, rn
);
1132 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1134 iwmmxt_load_reg(cpu_M0
, rn
);
1137 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1139 iwmmxt_load_reg(cpu_V1
, rn
);
1140 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1143 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1145 iwmmxt_load_reg(cpu_V1
, rn
);
1146 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1149 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1151 iwmmxt_load_reg(cpu_V1
, rn
);
1152 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1155 #define IWMMXT_OP(name) \
1156 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1158 iwmmxt_load_reg(cpu_V1, rn); \
1159 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1162 #define IWMMXT_OP_ENV(name) \
1163 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1165 iwmmxt_load_reg(cpu_V1, rn); \
1166 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1169 #define IWMMXT_OP_ENV_SIZE(name) \
1170 IWMMXT_OP_ENV(name##b) \
1171 IWMMXT_OP_ENV(name##w) \
1172 IWMMXT_OP_ENV(name##l)
1174 #define IWMMXT_OP_ENV1(name) \
1175 static inline void gen_op_iwmmxt_##name##_M0(void) \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1191 IWMMXT_OP_ENV_SIZE(unpackl
)
1192 IWMMXT_OP_ENV_SIZE(unpackh
)
1194 IWMMXT_OP_ENV1(unpacklub
)
1195 IWMMXT_OP_ENV1(unpackluw
)
1196 IWMMXT_OP_ENV1(unpacklul
)
1197 IWMMXT_OP_ENV1(unpackhub
)
1198 IWMMXT_OP_ENV1(unpackhuw
)
1199 IWMMXT_OP_ENV1(unpackhul
)
1200 IWMMXT_OP_ENV1(unpacklsb
)
1201 IWMMXT_OP_ENV1(unpacklsw
)
1202 IWMMXT_OP_ENV1(unpacklsl
)
1203 IWMMXT_OP_ENV1(unpackhsb
)
1204 IWMMXT_OP_ENV1(unpackhsw
)
1205 IWMMXT_OP_ENV1(unpackhsl
)
1207 IWMMXT_OP_ENV_SIZE(cmpeq
)
1208 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1209 IWMMXT_OP_ENV_SIZE(cmpgts
)
1211 IWMMXT_OP_ENV_SIZE(mins
)
1212 IWMMXT_OP_ENV_SIZE(minu
)
1213 IWMMXT_OP_ENV_SIZE(maxs
)
1214 IWMMXT_OP_ENV_SIZE(maxu
)
1216 IWMMXT_OP_ENV_SIZE(subn
)
1217 IWMMXT_OP_ENV_SIZE(addn
)
1218 IWMMXT_OP_ENV_SIZE(subu
)
1219 IWMMXT_OP_ENV_SIZE(addu
)
1220 IWMMXT_OP_ENV_SIZE(subs
)
1221 IWMMXT_OP_ENV_SIZE(adds
)
1223 IWMMXT_OP_ENV(avgb0
)
1224 IWMMXT_OP_ENV(avgb1
)
1225 IWMMXT_OP_ENV(avgw0
)
1226 IWMMXT_OP_ENV(avgw1
)
1230 IWMMXT_OP_ENV(packuw
)
1231 IWMMXT_OP_ENV(packul
)
1232 IWMMXT_OP_ENV(packuq
)
1233 IWMMXT_OP_ENV(packsw
)
1234 IWMMXT_OP_ENV(packsl
)
1235 IWMMXT_OP_ENV(packsq
)
1237 static void gen_op_iwmmxt_set_mup(void)
1240 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1241 tcg_gen_ori_i32(tmp
, tmp
, 2);
1242 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1245 static void gen_op_iwmmxt_set_cup(void)
1248 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1249 tcg_gen_ori_i32(tmp
, tmp
, 1);
1250 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1253 static void gen_op_iwmmxt_setpsr_nz(void)
1255 TCGv tmp
= tcg_temp_new_i32();
1256 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1257 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1260 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1262 iwmmxt_load_reg(cpu_V1
, rn
);
1263 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1264 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1267 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1273 rd
= (insn
>> 16) & 0xf;
1274 tmp
= load_reg(s
, rd
);
1276 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1277 if (insn
& (1 << 24)) {
1279 if (insn
& (1 << 23))
1280 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1282 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1283 tcg_gen_mov_i32(dest
, tmp
);
1284 if (insn
& (1 << 21))
1285 store_reg(s
, rd
, tmp
);
1287 tcg_temp_free_i32(tmp
);
1288 } else if (insn
& (1 << 21)) {
1290 tcg_gen_mov_i32(dest
, tmp
);
1291 if (insn
& (1 << 23))
1292 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1294 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1295 store_reg(s
, rd
, tmp
);
1296 } else if (!(insn
& (1 << 23)))
1301 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1303 int rd
= (insn
>> 0) & 0xf;
1306 if (insn
& (1 << 8)) {
1307 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1310 tmp
= iwmmxt_load_creg(rd
);
1313 tmp
= tcg_temp_new_i32();
1314 iwmmxt_load_reg(cpu_V0
, rd
);
1315 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1317 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1318 tcg_gen_mov_i32(dest
, tmp
);
1319 tcg_temp_free_i32(tmp
);
1323 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1324 (ie. an undefined instruction). */
1325 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1328 int rdhi
, rdlo
, rd0
, rd1
, i
;
1330 TCGv tmp
, tmp2
, tmp3
;
1332 if ((insn
& 0x0e000e00) == 0x0c000000) {
1333 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1335 rdlo
= (insn
>> 12) & 0xf;
1336 rdhi
= (insn
>> 16) & 0xf;
1337 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1338 iwmmxt_load_reg(cpu_V0
, wrd
);
1339 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1340 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1341 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1342 } else { /* TMCRR */
1343 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1344 iwmmxt_store_reg(cpu_V0
, wrd
);
1345 gen_op_iwmmxt_set_mup();
1350 wrd
= (insn
>> 12) & 0xf;
1351 addr
= tcg_temp_new_i32();
1352 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1353 tcg_temp_free_i32(addr
);
1356 if (insn
& ARM_CP_RW_BIT
) {
1357 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1358 tmp
= tcg_temp_new_i32();
1359 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1360 iwmmxt_store_creg(wrd
, tmp
);
1363 if (insn
& (1 << 8)) {
1364 if (insn
& (1 << 22)) { /* WLDRD */
1365 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1367 } else { /* WLDRW wRd */
1368 tmp
= gen_ld32(addr
, IS_USER(s
));
1371 if (insn
& (1 << 22)) { /* WLDRH */
1372 tmp
= gen_ld16u(addr
, IS_USER(s
));
1373 } else { /* WLDRB */
1374 tmp
= gen_ld8u(addr
, IS_USER(s
));
1378 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1379 tcg_temp_free_i32(tmp
);
1381 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1384 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1385 tmp
= iwmmxt_load_creg(wrd
);
1386 gen_st32(tmp
, addr
, IS_USER(s
));
1388 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1389 tmp
= tcg_temp_new_i32();
1390 if (insn
& (1 << 8)) {
1391 if (insn
& (1 << 22)) { /* WSTRD */
1392 tcg_temp_free_i32(tmp
);
1393 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1394 } else { /* WSTRW wRd */
1395 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1396 gen_st32(tmp
, addr
, IS_USER(s
));
1399 if (insn
& (1 << 22)) { /* WSTRH */
1400 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1401 gen_st16(tmp
, addr
, IS_USER(s
));
1402 } else { /* WSTRB */
1403 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1404 gen_st8(tmp
, addr
, IS_USER(s
));
1409 tcg_temp_free_i32(addr
);
1413 if ((insn
& 0x0f000000) != 0x0e000000)
1416 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1417 case 0x000: /* WOR */
1418 wrd
= (insn
>> 12) & 0xf;
1419 rd0
= (insn
>> 0) & 0xf;
1420 rd1
= (insn
>> 16) & 0xf;
1421 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1422 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1423 gen_op_iwmmxt_setpsr_nz();
1424 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1425 gen_op_iwmmxt_set_mup();
1426 gen_op_iwmmxt_set_cup();
1428 case 0x011: /* TMCR */
1431 rd
= (insn
>> 12) & 0xf;
1432 wrd
= (insn
>> 16) & 0xf;
1434 case ARM_IWMMXT_wCID
:
1435 case ARM_IWMMXT_wCASF
:
1437 case ARM_IWMMXT_wCon
:
1438 gen_op_iwmmxt_set_cup();
1440 case ARM_IWMMXT_wCSSF
:
1441 tmp
= iwmmxt_load_creg(wrd
);
1442 tmp2
= load_reg(s
, rd
);
1443 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1444 tcg_temp_free_i32(tmp2
);
1445 iwmmxt_store_creg(wrd
, tmp
);
1447 case ARM_IWMMXT_wCGR0
:
1448 case ARM_IWMMXT_wCGR1
:
1449 case ARM_IWMMXT_wCGR2
:
1450 case ARM_IWMMXT_wCGR3
:
1451 gen_op_iwmmxt_set_cup();
1452 tmp
= load_reg(s
, rd
);
1453 iwmmxt_store_creg(wrd
, tmp
);
1459 case 0x100: /* WXOR */
1460 wrd
= (insn
>> 12) & 0xf;
1461 rd0
= (insn
>> 0) & 0xf;
1462 rd1
= (insn
>> 16) & 0xf;
1463 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1464 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1465 gen_op_iwmmxt_setpsr_nz();
1466 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1467 gen_op_iwmmxt_set_mup();
1468 gen_op_iwmmxt_set_cup();
1470 case 0x111: /* TMRC */
1473 rd
= (insn
>> 12) & 0xf;
1474 wrd
= (insn
>> 16) & 0xf;
1475 tmp
= iwmmxt_load_creg(wrd
);
1476 store_reg(s
, rd
, tmp
);
1478 case 0x300: /* WANDN */
1479 wrd
= (insn
>> 12) & 0xf;
1480 rd0
= (insn
>> 0) & 0xf;
1481 rd1
= (insn
>> 16) & 0xf;
1482 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1483 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1484 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1485 gen_op_iwmmxt_setpsr_nz();
1486 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1487 gen_op_iwmmxt_set_mup();
1488 gen_op_iwmmxt_set_cup();
1490 case 0x200: /* WAND */
1491 wrd
= (insn
>> 12) & 0xf;
1492 rd0
= (insn
>> 0) & 0xf;
1493 rd1
= (insn
>> 16) & 0xf;
1494 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1501 case 0x810: case 0xa10: /* WMADD */
1502 wrd
= (insn
>> 12) & 0xf;
1503 rd0
= (insn
>> 0) & 0xf;
1504 rd1
= (insn
>> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1506 if (insn
& (1 << 21))
1507 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1509 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1510 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1511 gen_op_iwmmxt_set_mup();
1513 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1514 wrd
= (insn
>> 12) & 0xf;
1515 rd0
= (insn
>> 16) & 0xf;
1516 rd1
= (insn
>> 0) & 0xf;
1517 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1518 switch ((insn
>> 22) & 3) {
1520 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1523 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1526 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1531 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1535 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1536 wrd
= (insn
>> 12) & 0xf;
1537 rd0
= (insn
>> 16) & 0xf;
1538 rd1
= (insn
>> 0) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1540 switch ((insn
>> 22) & 3) {
1542 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1545 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1548 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1553 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1554 gen_op_iwmmxt_set_mup();
1555 gen_op_iwmmxt_set_cup();
1557 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1558 wrd
= (insn
>> 12) & 0xf;
1559 rd0
= (insn
>> 16) & 0xf;
1560 rd1
= (insn
>> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1562 if (insn
& (1 << 22))
1563 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1565 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1566 if (!(insn
& (1 << 20)))
1567 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1568 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1569 gen_op_iwmmxt_set_mup();
1571 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1572 wrd
= (insn
>> 12) & 0xf;
1573 rd0
= (insn
>> 16) & 0xf;
1574 rd1
= (insn
>> 0) & 0xf;
1575 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1576 if (insn
& (1 << 21)) {
1577 if (insn
& (1 << 20))
1578 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1580 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1582 if (insn
& (1 << 20))
1583 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1585 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1587 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1588 gen_op_iwmmxt_set_mup();
1590 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1591 wrd
= (insn
>> 12) & 0xf;
1592 rd0
= (insn
>> 16) & 0xf;
1593 rd1
= (insn
>> 0) & 0xf;
1594 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1595 if (insn
& (1 << 21))
1596 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1598 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1599 if (!(insn
& (1 << 20))) {
1600 iwmmxt_load_reg(cpu_V1
, wrd
);
1601 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1603 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1604 gen_op_iwmmxt_set_mup();
1606 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1607 wrd
= (insn
>> 12) & 0xf;
1608 rd0
= (insn
>> 16) & 0xf;
1609 rd1
= (insn
>> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1611 switch ((insn
>> 22) & 3) {
1613 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1616 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1619 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1624 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1625 gen_op_iwmmxt_set_mup();
1626 gen_op_iwmmxt_set_cup();
1628 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1629 wrd
= (insn
>> 12) & 0xf;
1630 rd0
= (insn
>> 16) & 0xf;
1631 rd1
= (insn
>> 0) & 0xf;
1632 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1633 if (insn
& (1 << 22)) {
1634 if (insn
& (1 << 20))
1635 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1637 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1639 if (insn
& (1 << 20))
1640 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1642 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1644 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1645 gen_op_iwmmxt_set_mup();
1646 gen_op_iwmmxt_set_cup();
1648 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1649 wrd
= (insn
>> 12) & 0xf;
1650 rd0
= (insn
>> 16) & 0xf;
1651 rd1
= (insn
>> 0) & 0xf;
1652 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1653 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1654 tcg_gen_andi_i32(tmp
, tmp
, 7);
1655 iwmmxt_load_reg(cpu_V1
, rd1
);
1656 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1657 tcg_temp_free_i32(tmp
);
1658 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1659 gen_op_iwmmxt_set_mup();
1661 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1662 if (((insn
>> 6) & 3) == 3)
1664 rd
= (insn
>> 12) & 0xf;
1665 wrd
= (insn
>> 16) & 0xf;
1666 tmp
= load_reg(s
, rd
);
1667 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1668 switch ((insn
>> 6) & 3) {
1670 tmp2
= tcg_const_i32(0xff);
1671 tmp3
= tcg_const_i32((insn
& 7) << 3);
1674 tmp2
= tcg_const_i32(0xffff);
1675 tmp3
= tcg_const_i32((insn
& 3) << 4);
1678 tmp2
= tcg_const_i32(0xffffffff);
1679 tmp3
= tcg_const_i32((insn
& 1) << 5);
1685 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1686 tcg_temp_free(tmp3
);
1687 tcg_temp_free(tmp2
);
1688 tcg_temp_free_i32(tmp
);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1690 gen_op_iwmmxt_set_mup();
1692 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1693 rd
= (insn
>> 12) & 0xf;
1694 wrd
= (insn
>> 16) & 0xf;
1695 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1697 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1698 tmp
= tcg_temp_new_i32();
1699 switch ((insn
>> 22) & 3) {
1701 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1702 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1704 tcg_gen_ext8s_i32(tmp
, tmp
);
1706 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1710 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1711 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1713 tcg_gen_ext16s_i32(tmp
, tmp
);
1715 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1719 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1720 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1723 store_reg(s
, rd
, tmp
);
1725 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1726 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1728 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1729 switch ((insn
>> 22) & 3) {
1731 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1734 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1737 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1740 tcg_gen_shli_i32(tmp
, tmp
, 28);
1742 tcg_temp_free_i32(tmp
);
1744 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1745 if (((insn
>> 6) & 3) == 3)
1747 rd
= (insn
>> 12) & 0xf;
1748 wrd
= (insn
>> 16) & 0xf;
1749 tmp
= load_reg(s
, rd
);
1750 switch ((insn
>> 6) & 3) {
1752 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1755 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1758 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1761 tcg_temp_free_i32(tmp
);
1762 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1763 gen_op_iwmmxt_set_mup();
1765 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1766 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1768 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1769 tmp2
= tcg_temp_new_i32();
1770 tcg_gen_mov_i32(tmp2
, tmp
);
1771 switch ((insn
>> 22) & 3) {
1773 for (i
= 0; i
< 7; i
++) {
1774 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1775 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1779 for (i
= 0; i
< 3; i
++) {
1780 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1781 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1785 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1786 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1790 tcg_temp_free_i32(tmp2
);
1791 tcg_temp_free_i32(tmp
);
1793 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1794 wrd
= (insn
>> 12) & 0xf;
1795 rd0
= (insn
>> 16) & 0xf;
1796 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1797 switch ((insn
>> 22) & 3) {
1799 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1802 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1805 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1810 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1811 gen_op_iwmmxt_set_mup();
1813 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1814 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1816 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1817 tmp2
= tcg_temp_new_i32();
1818 tcg_gen_mov_i32(tmp2
, tmp
);
1819 switch ((insn
>> 22) & 3) {
1821 for (i
= 0; i
< 7; i
++) {
1822 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1823 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1827 for (i
= 0; i
< 3; i
++) {
1828 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1829 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1833 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1834 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1838 tcg_temp_free_i32(tmp2
);
1839 tcg_temp_free_i32(tmp
);
1841 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1842 rd
= (insn
>> 12) & 0xf;
1843 rd0
= (insn
>> 16) & 0xf;
1844 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1846 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1847 tmp
= tcg_temp_new_i32();
1848 switch ((insn
>> 22) & 3) {
1850 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1853 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1856 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1859 store_reg(s
, rd
, tmp
);
1861 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1862 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1863 wrd
= (insn
>> 12) & 0xf;
1864 rd0
= (insn
>> 16) & 0xf;
1865 rd1
= (insn
>> 0) & 0xf;
1866 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1867 switch ((insn
>> 22) & 3) {
1869 if (insn
& (1 << 21))
1870 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1872 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1875 if (insn
& (1 << 21))
1876 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1878 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1881 if (insn
& (1 << 21))
1882 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1884 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1889 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1890 gen_op_iwmmxt_set_mup();
1891 gen_op_iwmmxt_set_cup();
1893 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1894 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1895 wrd
= (insn
>> 12) & 0xf;
1896 rd0
= (insn
>> 16) & 0xf;
1897 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1898 switch ((insn
>> 22) & 3) {
1900 if (insn
& (1 << 21))
1901 gen_op_iwmmxt_unpacklsb_M0();
1903 gen_op_iwmmxt_unpacklub_M0();
1906 if (insn
& (1 << 21))
1907 gen_op_iwmmxt_unpacklsw_M0();
1909 gen_op_iwmmxt_unpackluw_M0();
1912 if (insn
& (1 << 21))
1913 gen_op_iwmmxt_unpacklsl_M0();
1915 gen_op_iwmmxt_unpacklul_M0();
1920 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1921 gen_op_iwmmxt_set_mup();
1922 gen_op_iwmmxt_set_cup();
1924 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1925 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1926 wrd
= (insn
>> 12) & 0xf;
1927 rd0
= (insn
>> 16) & 0xf;
1928 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1929 switch ((insn
>> 22) & 3) {
1931 if (insn
& (1 << 21))
1932 gen_op_iwmmxt_unpackhsb_M0();
1934 gen_op_iwmmxt_unpackhub_M0();
1937 if (insn
& (1 << 21))
1938 gen_op_iwmmxt_unpackhsw_M0();
1940 gen_op_iwmmxt_unpackhuw_M0();
1943 if (insn
& (1 << 21))
1944 gen_op_iwmmxt_unpackhsl_M0();
1946 gen_op_iwmmxt_unpackhul_M0();
1951 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1952 gen_op_iwmmxt_set_mup();
1953 gen_op_iwmmxt_set_cup();
1955 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1956 case 0x214: case 0x614: case 0xa14: case 0xe14:
1957 if (((insn
>> 22) & 3) == 0)
1959 wrd
= (insn
>> 12) & 0xf;
1960 rd0
= (insn
>> 16) & 0xf;
1961 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1962 tmp
= tcg_temp_new_i32();
1963 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
1964 tcg_temp_free_i32(tmp
);
1967 switch ((insn
>> 22) & 3) {
1969 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1972 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1975 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1978 tcg_temp_free_i32(tmp
);
1979 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1980 gen_op_iwmmxt_set_mup();
1981 gen_op_iwmmxt_set_cup();
1983 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1984 case 0x014: case 0x414: case 0x814: case 0xc14:
1985 if (((insn
>> 22) & 3) == 0)
1987 wrd
= (insn
>> 12) & 0xf;
1988 rd0
= (insn
>> 16) & 0xf;
1989 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1990 tmp
= tcg_temp_new_i32();
1991 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
1992 tcg_temp_free_i32(tmp
);
1995 switch ((insn
>> 22) & 3) {
1997 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2000 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2003 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2006 tcg_temp_free_i32(tmp
);
2007 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2008 gen_op_iwmmxt_set_mup();
2009 gen_op_iwmmxt_set_cup();
2011 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2012 case 0x114: case 0x514: case 0x914: case 0xd14:
2013 if (((insn
>> 22) & 3) == 0)
2015 wrd
= (insn
>> 12) & 0xf;
2016 rd0
= (insn
>> 16) & 0xf;
2017 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2018 tmp
= tcg_temp_new_i32();
2019 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2020 tcg_temp_free_i32(tmp
);
2023 switch ((insn
>> 22) & 3) {
2025 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2028 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2031 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2034 tcg_temp_free_i32(tmp
);
2035 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2036 gen_op_iwmmxt_set_mup();
2037 gen_op_iwmmxt_set_cup();
2039 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2040 case 0x314: case 0x714: case 0xb14: case 0xf14:
2041 if (((insn
>> 22) & 3) == 0)
2043 wrd
= (insn
>> 12) & 0xf;
2044 rd0
= (insn
>> 16) & 0xf;
2045 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2046 tmp
= tcg_temp_new_i32();
2047 switch ((insn
>> 22) & 3) {
2049 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2050 tcg_temp_free_i32(tmp
);
2053 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2056 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2057 tcg_temp_free_i32(tmp
);
2060 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2063 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2064 tcg_temp_free_i32(tmp
);
2067 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2070 tcg_temp_free_i32(tmp
);
2071 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2072 gen_op_iwmmxt_set_mup();
2073 gen_op_iwmmxt_set_cup();
2075 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2076 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2077 wrd
= (insn
>> 12) & 0xf;
2078 rd0
= (insn
>> 16) & 0xf;
2079 rd1
= (insn
>> 0) & 0xf;
2080 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2081 switch ((insn
>> 22) & 3) {
2083 if (insn
& (1 << 21))
2084 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2086 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2089 if (insn
& (1 << 21))
2090 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2092 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2095 if (insn
& (1 << 21))
2096 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2098 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2103 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2104 gen_op_iwmmxt_set_mup();
2106 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2107 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2108 wrd
= (insn
>> 12) & 0xf;
2109 rd0
= (insn
>> 16) & 0xf;
2110 rd1
= (insn
>> 0) & 0xf;
2111 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2112 switch ((insn
>> 22) & 3) {
2114 if (insn
& (1 << 21))
2115 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2117 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2120 if (insn
& (1 << 21))
2121 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2123 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2126 if (insn
& (1 << 21))
2127 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2129 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2134 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2135 gen_op_iwmmxt_set_mup();
2137 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2138 case 0x402: case 0x502: case 0x602: case 0x702:
2139 wrd
= (insn
>> 12) & 0xf;
2140 rd0
= (insn
>> 16) & 0xf;
2141 rd1
= (insn
>> 0) & 0xf;
2142 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2143 tmp
= tcg_const_i32((insn
>> 20) & 3);
2144 iwmmxt_load_reg(cpu_V1
, rd1
);
2145 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2147 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2148 gen_op_iwmmxt_set_mup();
2150 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2151 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2152 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2153 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2154 wrd
= (insn
>> 12) & 0xf;
2155 rd0
= (insn
>> 16) & 0xf;
2156 rd1
= (insn
>> 0) & 0xf;
2157 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2158 switch ((insn
>> 20) & 0xf) {
2160 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2163 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2166 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2169 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2172 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2175 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2178 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2181 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2184 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2189 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2190 gen_op_iwmmxt_set_mup();
2191 gen_op_iwmmxt_set_cup();
2193 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2194 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2195 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2196 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2197 wrd
= (insn
>> 12) & 0xf;
2198 rd0
= (insn
>> 16) & 0xf;
2199 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2200 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2201 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2203 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2204 gen_op_iwmmxt_set_mup();
2205 gen_op_iwmmxt_set_cup();
2207 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2208 case 0x418: case 0x518: case 0x618: case 0x718:
2209 case 0x818: case 0x918: case 0xa18: case 0xb18:
2210 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2211 wrd
= (insn
>> 12) & 0xf;
2212 rd0
= (insn
>> 16) & 0xf;
2213 rd1
= (insn
>> 0) & 0xf;
2214 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2215 switch ((insn
>> 20) & 0xf) {
2217 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2220 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2223 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2226 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2229 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2232 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2235 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2238 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2241 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2246 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2247 gen_op_iwmmxt_set_mup();
2248 gen_op_iwmmxt_set_cup();
2250 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2251 case 0x408: case 0x508: case 0x608: case 0x708:
2252 case 0x808: case 0x908: case 0xa08: case 0xb08:
2253 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2254 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2256 wrd
= (insn
>> 12) & 0xf;
2257 rd0
= (insn
>> 16) & 0xf;
2258 rd1
= (insn
>> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2260 switch ((insn
>> 22) & 3) {
2262 if (insn
& (1 << 21))
2263 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2265 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2268 if (insn
& (1 << 21))
2269 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2271 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2274 if (insn
& (1 << 21))
2275 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2277 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2280 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2281 gen_op_iwmmxt_set_mup();
2282 gen_op_iwmmxt_set_cup();
2284 case 0x201: case 0x203: case 0x205: case 0x207:
2285 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2286 case 0x211: case 0x213: case 0x215: case 0x217:
2287 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2288 wrd
= (insn
>> 5) & 0xf;
2289 rd0
= (insn
>> 12) & 0xf;
2290 rd1
= (insn
>> 0) & 0xf;
2291 if (rd0
== 0xf || rd1
== 0xf)
2293 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2294 tmp
= load_reg(s
, rd0
);
2295 tmp2
= load_reg(s
, rd1
);
2296 switch ((insn
>> 16) & 0xf) {
2297 case 0x0: /* TMIA */
2298 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2300 case 0x8: /* TMIAPH */
2301 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2303 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2304 if (insn
& (1 << 16))
2305 tcg_gen_shri_i32(tmp
, tmp
, 16);
2306 if (insn
& (1 << 17))
2307 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2308 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2311 tcg_temp_free_i32(tmp2
);
2312 tcg_temp_free_i32(tmp
);
2315 tcg_temp_free_i32(tmp2
);
2316 tcg_temp_free_i32(tmp
);
2317 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2318 gen_op_iwmmxt_set_mup();
2327 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2328 (ie. an undefined instruction). */
2329 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2331 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2334 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2335 /* Multiply with Internal Accumulate Format */
2336 rd0
= (insn
>> 12) & 0xf;
2338 acc
= (insn
>> 5) & 7;
2343 tmp
= load_reg(s
, rd0
);
2344 tmp2
= load_reg(s
, rd1
);
2345 switch ((insn
>> 16) & 0xf) {
2347 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2349 case 0x8: /* MIAPH */
2350 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2352 case 0xc: /* MIABB */
2353 case 0xd: /* MIABT */
2354 case 0xe: /* MIATB */
2355 case 0xf: /* MIATT */
2356 if (insn
& (1 << 16))
2357 tcg_gen_shri_i32(tmp
, tmp
, 16);
2358 if (insn
& (1 << 17))
2359 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2360 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2365 tcg_temp_free_i32(tmp2
);
2366 tcg_temp_free_i32(tmp
);
2368 gen_op_iwmmxt_movq_wRn_M0(acc
);
2372 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2373 /* Internal Accumulator Access Format */
2374 rdhi
= (insn
>> 16) & 0xf;
2375 rdlo
= (insn
>> 12) & 0xf;
2381 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2382 iwmmxt_load_reg(cpu_V0
, acc
);
2383 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2384 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2385 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2386 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2388 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2389 iwmmxt_store_reg(cpu_V0
, acc
);
2397 /* Disassemble system coprocessor instruction. Return nonzero if
2398 instruction is not defined. */
2399 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2402 uint32_t rd
= (insn
>> 12) & 0xf;
2403 uint32_t cp
= (insn
>> 8) & 0xf;
2408 if (insn
& ARM_CP_RW_BIT
) {
2409 if (!env
->cp
[cp
].cp_read
)
2411 gen_set_pc_im(s
->pc
);
2412 tmp
= tcg_temp_new_i32();
2413 tmp2
= tcg_const_i32(insn
);
2414 gen_helper_get_cp(tmp
, cpu_env
, tmp2
);
2415 tcg_temp_free(tmp2
);
2416 store_reg(s
, rd
, tmp
);
2418 if (!env
->cp
[cp
].cp_write
)
2420 gen_set_pc_im(s
->pc
);
2421 tmp
= load_reg(s
, rd
);
2422 tmp2
= tcg_const_i32(insn
);
2423 gen_helper_set_cp(cpu_env
, tmp2
, tmp
);
2424 tcg_temp_free(tmp2
);
2425 tcg_temp_free_i32(tmp
);
2430 static int cp15_user_ok(uint32_t insn
)
2432 int cpn
= (insn
>> 16) & 0xf;
2433 int cpm
= insn
& 0xf;
2434 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2436 if (cpn
== 13 && cpm
== 0) {
2438 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2442 /* ISB, DSB, DMB. */
2443 if ((cpm
== 5 && op
== 4)
2444 || (cpm
== 10 && (op
== 4 || op
== 5)))
2450 static int cp15_tls_load_store(CPUState
*env
, DisasContext
*s
, uint32_t insn
, uint32_t rd
)
2453 int cpn
= (insn
>> 16) & 0xf;
2454 int cpm
= insn
& 0xf;
2455 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2457 if (!arm_feature(env
, ARM_FEATURE_V6K
))
2460 if (!(cpn
== 13 && cpm
== 0))
2463 if (insn
& ARM_CP_RW_BIT
) {
2466 tmp
= load_cpu_field(cp15
.c13_tls1
);
2469 tmp
= load_cpu_field(cp15
.c13_tls2
);
2472 tmp
= load_cpu_field(cp15
.c13_tls3
);
2477 store_reg(s
, rd
, tmp
);
2480 tmp
= load_reg(s
, rd
);
2483 store_cpu_field(tmp
, cp15
.c13_tls1
);
2486 store_cpu_field(tmp
, cp15
.c13_tls2
);
2489 store_cpu_field(tmp
, cp15
.c13_tls3
);
2492 tcg_temp_free_i32(tmp
);
2499 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2500 instruction is not defined. */
2501 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2506 /* M profile cores use memory mapped registers instead of cp15. */
2507 if (arm_feature(env
, ARM_FEATURE_M
))
2510 if ((insn
& (1 << 25)) == 0) {
2511 if (insn
& (1 << 20)) {
2515 /* mcrr. Used for block cache operations, so implement as no-op. */
2518 if ((insn
& (1 << 4)) == 0) {
2522 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
2526 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2527 * instructions rather than a separate instruction.
2529 if ((insn
& 0x0fff0fff) == 0x0e070f90) {
2530 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2531 * In v7, this must NOP.
2533 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
2534 /* Wait for interrupt. */
2535 gen_set_pc_im(s
->pc
);
2536 s
->is_jmp
= DISAS_WFI
;
2541 if ((insn
& 0x0fff0fff) == 0x0e070f58) {
2542 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2543 * so this is slightly over-broad.
2545 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
2546 /* Wait for interrupt. */
2547 gen_set_pc_im(s
->pc
);
2548 s
->is_jmp
= DISAS_WFI
;
2551 /* Otherwise fall through to handle via helper function.
2552 * In particular, on v7 and some v6 cores this is one of
2553 * the VA-PA registers.
2557 rd
= (insn
>> 12) & 0xf;
2559 if (cp15_tls_load_store(env
, s
, insn
, rd
))
2562 tmp2
= tcg_const_i32(insn
);
2563 if (insn
& ARM_CP_RW_BIT
) {
2564 tmp
= tcg_temp_new_i32();
2565 gen_helper_get_cp15(tmp
, cpu_env
, tmp2
);
2566 /* If the destination register is r15 then sets condition codes. */
2568 store_reg(s
, rd
, tmp
);
2570 tcg_temp_free_i32(tmp
);
2572 tmp
= load_reg(s
, rd
);
2573 gen_helper_set_cp15(cpu_env
, tmp2
, tmp
);
2574 tcg_temp_free_i32(tmp
);
2575 /* Normally we would always end the TB here, but Linux
2576 * arch/arm/mach-pxa/sleep.S expects two instructions following
2577 * an MMU enable to execute from cache. Imitate this behaviour. */
2578 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2579 (insn
& 0x0fff0fff) != 0x0e010f10)
2582 tcg_temp_free_i32(tmp2
);
2586 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2587 #define VFP_SREG(insn, bigbit, smallbit) \
2588 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2589 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2590 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2591 reg = (((insn) >> (bigbit)) & 0x0f) \
2592 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2594 if (insn & (1 << (smallbit))) \
2596 reg = ((insn) >> (bigbit)) & 0x0f; \
2599 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2600 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2601 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2602 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2603 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2604 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2606 /* Move between integer and VFP cores. */
2607 static TCGv
gen_vfp_mrs(void)
2609 TCGv tmp
= tcg_temp_new_i32();
2610 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2614 static void gen_vfp_msr(TCGv tmp
)
2616 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2617 tcg_temp_free_i32(tmp
);
2620 static void gen_neon_dup_u8(TCGv var
, int shift
)
2622 TCGv tmp
= tcg_temp_new_i32();
2624 tcg_gen_shri_i32(var
, var
, shift
);
2625 tcg_gen_ext8u_i32(var
, var
);
2626 tcg_gen_shli_i32(tmp
, var
, 8);
2627 tcg_gen_or_i32(var
, var
, tmp
);
2628 tcg_gen_shli_i32(tmp
, var
, 16);
2629 tcg_gen_or_i32(var
, var
, tmp
);
2630 tcg_temp_free_i32(tmp
);
2633 static void gen_neon_dup_low16(TCGv var
)
2635 TCGv tmp
= tcg_temp_new_i32();
2636 tcg_gen_ext16u_i32(var
, var
);
2637 tcg_gen_shli_i32(tmp
, var
, 16);
2638 tcg_gen_or_i32(var
, var
, tmp
);
2639 tcg_temp_free_i32(tmp
);
2642 static void gen_neon_dup_high16(TCGv var
)
2644 TCGv tmp
= tcg_temp_new_i32();
2645 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2646 tcg_gen_shri_i32(tmp
, var
, 16);
2647 tcg_gen_or_i32(var
, var
, tmp
);
2648 tcg_temp_free_i32(tmp
);
2651 static TCGv
gen_load_and_replicate(DisasContext
*s
, TCGv addr
, int size
)
2653 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2657 tmp
= gen_ld8u(addr
, IS_USER(s
));
2658 gen_neon_dup_u8(tmp
, 0);
2661 tmp
= gen_ld16u(addr
, IS_USER(s
));
2662 gen_neon_dup_low16(tmp
);
2665 tmp
= gen_ld32(addr
, IS_USER(s
));
2667 default: /* Avoid compiler warnings. */
2673 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2674 (ie. an undefined instruction). */
2675 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2677 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2683 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2686 if (!s
->vfp_enabled
) {
2687 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2688 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2690 rn
= (insn
>> 16) & 0xf;
2691 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2692 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2695 dp
= ((insn
& 0xf00) == 0xb00);
2696 switch ((insn
>> 24) & 0xf) {
2698 if (insn
& (1 << 4)) {
2699 /* single register transfer */
2700 rd
= (insn
>> 12) & 0xf;
2705 VFP_DREG_N(rn
, insn
);
2708 if (insn
& 0x00c00060
2709 && !arm_feature(env
, ARM_FEATURE_NEON
))
2712 pass
= (insn
>> 21) & 1;
2713 if (insn
& (1 << 22)) {
2715 offset
= ((insn
>> 5) & 3) * 8;
2716 } else if (insn
& (1 << 5)) {
2718 offset
= (insn
& (1 << 6)) ? 16 : 0;
2723 if (insn
& ARM_CP_RW_BIT
) {
2725 tmp
= neon_load_reg(rn
, pass
);
2729 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2730 if (insn
& (1 << 23))
2736 if (insn
& (1 << 23)) {
2738 tcg_gen_shri_i32(tmp
, tmp
, 16);
2744 tcg_gen_sari_i32(tmp
, tmp
, 16);
2753 store_reg(s
, rd
, tmp
);
2756 tmp
= load_reg(s
, rd
);
2757 if (insn
& (1 << 23)) {
2760 gen_neon_dup_u8(tmp
, 0);
2761 } else if (size
== 1) {
2762 gen_neon_dup_low16(tmp
);
2764 for (n
= 0; n
<= pass
* 2; n
++) {
2765 tmp2
= tcg_temp_new_i32();
2766 tcg_gen_mov_i32(tmp2
, tmp
);
2767 neon_store_reg(rn
, n
, tmp2
);
2769 neon_store_reg(rn
, n
, tmp
);
2774 tmp2
= neon_load_reg(rn
, pass
);
2775 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2776 tcg_temp_free_i32(tmp2
);
2779 tmp2
= neon_load_reg(rn
, pass
);
2780 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2781 tcg_temp_free_i32(tmp2
);
2786 neon_store_reg(rn
, pass
, tmp
);
2790 if ((insn
& 0x6f) != 0x00)
2792 rn
= VFP_SREG_N(insn
);
2793 if (insn
& ARM_CP_RW_BIT
) {
2795 if (insn
& (1 << 21)) {
2796 /* system register */
2801 /* VFP2 allows access to FSID from userspace.
2802 VFP3 restricts all id registers to privileged
2805 && arm_feature(env
, ARM_FEATURE_VFP3
))
2807 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2812 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2814 case ARM_VFP_FPINST
:
2815 case ARM_VFP_FPINST2
:
2816 /* Not present in VFP3. */
2818 || arm_feature(env
, ARM_FEATURE_VFP3
))
2820 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2824 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2825 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2827 tmp
= tcg_temp_new_i32();
2828 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2834 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2836 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2842 gen_mov_F0_vreg(0, rn
);
2843 tmp
= gen_vfp_mrs();
2846 /* Set the 4 flag bits in the CPSR. */
2848 tcg_temp_free_i32(tmp
);
2850 store_reg(s
, rd
, tmp
);
2854 tmp
= load_reg(s
, rd
);
2855 if (insn
& (1 << 21)) {
2857 /* system register */
2862 /* Writes are ignored. */
2865 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2866 tcg_temp_free_i32(tmp
);
2872 /* TODO: VFP subarchitecture support.
2873 * For now, keep the EN bit only */
2874 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2875 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2878 case ARM_VFP_FPINST
:
2879 case ARM_VFP_FPINST2
:
2880 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2887 gen_mov_vreg_F0(0, rn
);
2892 /* data processing */
2893 /* The opcode is in bits 23, 21, 20 and 6. */
2894 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2898 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2900 /* rn is register number */
2901 VFP_DREG_N(rn
, insn
);
2904 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2905 /* Integer or single precision destination. */
2906 rd
= VFP_SREG_D(insn
);
2908 VFP_DREG_D(rd
, insn
);
2911 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2912 /* VCVT from int is always from S reg regardless of dp bit.
2913 * VCVT with immediate frac_bits has same format as SREG_M
2915 rm
= VFP_SREG_M(insn
);
2917 VFP_DREG_M(rm
, insn
);
2920 rn
= VFP_SREG_N(insn
);
2921 if (op
== 15 && rn
== 15) {
2922 /* Double precision destination. */
2923 VFP_DREG_D(rd
, insn
);
2925 rd
= VFP_SREG_D(insn
);
2927 /* NB that we implicitly rely on the encoding for the frac_bits
2928 * in VCVT of fixed to float being the same as that of an SREG_M
2930 rm
= VFP_SREG_M(insn
);
2933 veclen
= s
->vec_len
;
2934 if (op
== 15 && rn
> 3)
2937 /* Shut up compiler warnings. */
2948 /* Figure out what type of vector operation this is. */
2949 if ((rd
& bank_mask
) == 0) {
2954 delta_d
= (s
->vec_stride
>> 1) + 1;
2956 delta_d
= s
->vec_stride
+ 1;
2958 if ((rm
& bank_mask
) == 0) {
2959 /* mixed scalar/vector */
2968 /* Load the initial operands. */
2973 /* Integer source */
2974 gen_mov_F0_vreg(0, rm
);
2979 gen_mov_F0_vreg(dp
, rd
);
2980 gen_mov_F1_vreg(dp
, rm
);
2984 /* Compare with zero */
2985 gen_mov_F0_vreg(dp
, rd
);
2996 /* Source and destination the same. */
2997 gen_mov_F0_vreg(dp
, rd
);
3000 /* One source operand. */
3001 gen_mov_F0_vreg(dp
, rm
);
3005 /* Two source operands. */
3006 gen_mov_F0_vreg(dp
, rn
);
3007 gen_mov_F1_vreg(dp
, rm
);
3011 /* Perform the calculation. */
3013 case 0: /* mac: fd + (fn * fm) */
3015 gen_mov_F1_vreg(dp
, rd
);
3018 case 1: /* nmac: fd - (fn * fm) */
3021 gen_mov_F1_vreg(dp
, rd
);
3024 case 2: /* msc: -fd + (fn * fm) */
3026 gen_mov_F1_vreg(dp
, rd
);
3029 case 3: /* nmsc: -fd - (fn * fm) */
3032 gen_mov_F1_vreg(dp
, rd
);
3035 case 4: /* mul: fn * fm */
3038 case 5: /* nmul: -(fn * fm) */
3042 case 6: /* add: fn + fm */
3045 case 7: /* sub: fn - fm */
3048 case 8: /* div: fn / fm */
3051 case 14: /* fconst */
3052 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3055 n
= (insn
<< 12) & 0x80000000;
3056 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3063 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3070 tcg_gen_movi_i32(cpu_F0s
, n
);
3073 case 15: /* extension space */
3087 case 4: /* vcvtb.f32.f16 */
3088 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3090 tmp
= gen_vfp_mrs();
3091 tcg_gen_ext16u_i32(tmp
, tmp
);
3092 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3093 tcg_temp_free_i32(tmp
);
3095 case 5: /* vcvtt.f32.f16 */
3096 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3098 tmp
= gen_vfp_mrs();
3099 tcg_gen_shri_i32(tmp
, tmp
, 16);
3100 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3101 tcg_temp_free_i32(tmp
);
3103 case 6: /* vcvtb.f16.f32 */
3104 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3106 tmp
= tcg_temp_new_i32();
3107 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3108 gen_mov_F0_vreg(0, rd
);
3109 tmp2
= gen_vfp_mrs();
3110 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3111 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3112 tcg_temp_free_i32(tmp2
);
3115 case 7: /* vcvtt.f16.f32 */
3116 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3118 tmp
= tcg_temp_new_i32();
3119 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3120 tcg_gen_shli_i32(tmp
, tmp
, 16);
3121 gen_mov_F0_vreg(0, rd
);
3122 tmp2
= gen_vfp_mrs();
3123 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3124 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3125 tcg_temp_free_i32(tmp2
);
3137 case 11: /* cmpez */
3141 case 15: /* single<->double conversion */
3143 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3145 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3147 case 16: /* fuito */
3150 case 17: /* fsito */
3153 case 20: /* fshto */
3154 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3156 gen_vfp_shto(dp
, 16 - rm
);
3158 case 21: /* fslto */
3159 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3161 gen_vfp_slto(dp
, 32 - rm
);
3163 case 22: /* fuhto */
3164 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3166 gen_vfp_uhto(dp
, 16 - rm
);
3168 case 23: /* fulto */
3169 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3171 gen_vfp_ulto(dp
, 32 - rm
);
3173 case 24: /* ftoui */
3176 case 25: /* ftouiz */
3179 case 26: /* ftosi */
3182 case 27: /* ftosiz */
3185 case 28: /* ftosh */
3186 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3188 gen_vfp_tosh(dp
, 16 - rm
);
3190 case 29: /* ftosl */
3191 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3193 gen_vfp_tosl(dp
, 32 - rm
);
3195 case 30: /* ftouh */
3196 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3198 gen_vfp_touh(dp
, 16 - rm
);
3200 case 31: /* ftoul */
3201 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3203 gen_vfp_toul(dp
, 32 - rm
);
3205 default: /* undefined */
3206 printf ("rn:%d\n", rn
);
3210 default: /* undefined */
3211 printf ("op:%d\n", op
);
3215 /* Write back the result. */
3216 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3217 ; /* Comparison, do nothing. */
3218 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3219 /* VCVT double to int: always integer result. */
3220 gen_mov_vreg_F0(0, rd
);
3221 else if (op
== 15 && rn
== 15)
3223 gen_mov_vreg_F0(!dp
, rd
);
3225 gen_mov_vreg_F0(dp
, rd
);
3227 /* break out of the loop if we have finished */
3231 if (op
== 15 && delta_m
== 0) {
3232 /* single source one-many */
3234 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3236 gen_mov_vreg_F0(dp
, rd
);
3240 /* Setup the next operands. */
3242 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3246 /* One source operand. */
3247 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3249 gen_mov_F0_vreg(dp
, rm
);
3251 /* Two source operands. */
3252 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3254 gen_mov_F0_vreg(dp
, rn
);
3256 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3258 gen_mov_F1_vreg(dp
, rm
);
3266 if ((insn
& 0x03e00000) == 0x00400000) {
3267 /* two-register transfer */
3268 rn
= (insn
>> 16) & 0xf;
3269 rd
= (insn
>> 12) & 0xf;
3271 VFP_DREG_M(rm
, insn
);
3273 rm
= VFP_SREG_M(insn
);
3276 if (insn
& ARM_CP_RW_BIT
) {
3279 gen_mov_F0_vreg(0, rm
* 2);
3280 tmp
= gen_vfp_mrs();
3281 store_reg(s
, rd
, tmp
);
3282 gen_mov_F0_vreg(0, rm
* 2 + 1);
3283 tmp
= gen_vfp_mrs();
3284 store_reg(s
, rn
, tmp
);
3286 gen_mov_F0_vreg(0, rm
);
3287 tmp
= gen_vfp_mrs();
3288 store_reg(s
, rd
, tmp
);
3289 gen_mov_F0_vreg(0, rm
+ 1);
3290 tmp
= gen_vfp_mrs();
3291 store_reg(s
, rn
, tmp
);
3296 tmp
= load_reg(s
, rd
);
3298 gen_mov_vreg_F0(0, rm
* 2);
3299 tmp
= load_reg(s
, rn
);
3301 gen_mov_vreg_F0(0, rm
* 2 + 1);
3303 tmp
= load_reg(s
, rd
);
3305 gen_mov_vreg_F0(0, rm
);
3306 tmp
= load_reg(s
, rn
);
3308 gen_mov_vreg_F0(0, rm
+ 1);
3313 rn
= (insn
>> 16) & 0xf;
3315 VFP_DREG_D(rd
, insn
);
3317 rd
= VFP_SREG_D(insn
);
3318 if (s
->thumb
&& rn
== 15) {
3319 addr
= tcg_temp_new_i32();
3320 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3322 addr
= load_reg(s
, rn
);
3324 if ((insn
& 0x01200000) == 0x01000000) {
3325 /* Single load/store */
3326 offset
= (insn
& 0xff) << 2;
3327 if ((insn
& (1 << 23)) == 0)
3329 tcg_gen_addi_i32(addr
, addr
, offset
);
3330 if (insn
& (1 << 20)) {
3331 gen_vfp_ld(s
, dp
, addr
);
3332 gen_mov_vreg_F0(dp
, rd
);
3334 gen_mov_F0_vreg(dp
, rd
);
3335 gen_vfp_st(s
, dp
, addr
);
3337 tcg_temp_free_i32(addr
);
3339 /* load/store multiple */
3341 n
= (insn
>> 1) & 0x7f;
3345 if (insn
& (1 << 24)) /* pre-decrement */
3346 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3352 for (i
= 0; i
< n
; i
++) {
3353 if (insn
& ARM_CP_RW_BIT
) {
3355 gen_vfp_ld(s
, dp
, addr
);
3356 gen_mov_vreg_F0(dp
, rd
+ i
);
3359 gen_mov_F0_vreg(dp
, rd
+ i
);
3360 gen_vfp_st(s
, dp
, addr
);
3362 tcg_gen_addi_i32(addr
, addr
, offset
);
3364 if (insn
& (1 << 21)) {
3366 if (insn
& (1 << 24))
3367 offset
= -offset
* n
;
3368 else if (dp
&& (insn
& 1))
3374 tcg_gen_addi_i32(addr
, addr
, offset
);
3375 store_reg(s
, rn
, addr
);
3377 tcg_temp_free_i32(addr
);
3383 /* Should never happen. */
3389 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3391 TranslationBlock
*tb
;
3394 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3396 gen_set_pc_im(dest
);
3397 tcg_gen_exit_tb((long)tb
+ n
);
3399 gen_set_pc_im(dest
);
3404 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3406 if (unlikely(s
->singlestep_enabled
)) {
3407 /* An indirect jump so that we still trigger the debug exception. */
3412 gen_goto_tb(s
, 0, dest
);
3413 s
->is_jmp
= DISAS_TB_JUMP
;
3417 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3420 tcg_gen_sari_i32(t0
, t0
, 16);
3424 tcg_gen_sari_i32(t1
, t1
, 16);
3427 tcg_gen_mul_i32(t0
, t0
, t1
);
3430 /* Return the mask of PSR bits set by a MSR instruction. */
3431 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3435 if (flags
& (1 << 0))
3437 if (flags
& (1 << 1))
3439 if (flags
& (1 << 2))
3441 if (flags
& (1 << 3))
3444 /* Mask out undefined bits. */
3445 mask
&= ~CPSR_RESERVED
;
3446 if (!arm_feature(env
, ARM_FEATURE_V6
))
3447 mask
&= ~(CPSR_E
| CPSR_GE
);
3448 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3450 /* Mask out execution state bits. */
3453 /* Mask out privileged bits. */
3459 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3460 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3464 /* ??? This is also undefined in system mode. */
3468 tmp
= load_cpu_field(spsr
);
3469 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3470 tcg_gen_andi_i32(t0
, t0
, mask
);
3471 tcg_gen_or_i32(tmp
, tmp
, t0
);
3472 store_cpu_field(tmp
, spsr
);
3474 gen_set_cpsr(t0
, mask
);
3476 tcg_temp_free_i32(t0
);
3481 /* Returns nonzero if access to the PSR is not permitted. */
3482 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3485 tmp
= tcg_temp_new_i32();
3486 tcg_gen_movi_i32(tmp
, val
);
3487 return gen_set_psr(s
, mask
, spsr
, tmp
);
3490 /* Generate an old-style exception return. Marks pc as dead. */
3491 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3494 store_reg(s
, 15, pc
);
3495 tmp
= load_cpu_field(spsr
);
3496 gen_set_cpsr(tmp
, 0xffffffff);
3497 tcg_temp_free_i32(tmp
);
3498 s
->is_jmp
= DISAS_UPDATE
;
3501 /* Generate a v6 exception return. Marks both values as dead. */
3502 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3504 gen_set_cpsr(cpsr
, 0xffffffff);
3505 tcg_temp_free_i32(cpsr
);
3506 store_reg(s
, 15, pc
);
3507 s
->is_jmp
= DISAS_UPDATE
;
3511 gen_set_condexec (DisasContext
*s
)
3513 if (s
->condexec_mask
) {
3514 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3515 TCGv tmp
= tcg_temp_new_i32();
3516 tcg_gen_movi_i32(tmp
, val
);
3517 store_cpu_field(tmp
, condexec_bits
);
3521 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3523 gen_set_condexec(s
);
3524 gen_set_pc_im(s
->pc
- offset
);
3525 gen_exception(excp
);
3526 s
->is_jmp
= DISAS_JUMP
;
3529 static void gen_nop_hint(DisasContext
*s
, int val
)
3533 gen_set_pc_im(s
->pc
);
3534 s
->is_jmp
= DISAS_WFI
;
3538 /* TODO: Implement SEV and WFE. May help SMP performance. */
3544 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3546 static inline int gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3549 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3550 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3551 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3557 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3560 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3561 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3562 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3567 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3568 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3569 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3570 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3571 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3573 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3574 switch ((size << 1) | u) { \
3576 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3579 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3582 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3585 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3588 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3591 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3593 default: return 1; \
3596 #define GEN_NEON_INTEGER_OP(name) do { \
3597 switch ((size << 1) | u) { \
3599 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3602 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3605 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3608 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3611 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3614 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3616 default: return 1; \
3619 static TCGv
neon_load_scratch(int scratch
)
3621 TCGv tmp
= tcg_temp_new_i32();
3622 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3626 static void neon_store_scratch(int scratch
, TCGv var
)
3628 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3629 tcg_temp_free_i32(var
);
3632 static inline TCGv
neon_get_scalar(int size
, int reg
)
3636 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3638 gen_neon_dup_high16(tmp
);
3640 gen_neon_dup_low16(tmp
);
3643 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3648 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3651 if (size
== 3 || (!q
&& size
== 2)) {
3654 tmp
= tcg_const_i32(rd
);
3655 tmp2
= tcg_const_i32(rm
);
3659 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
3662 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
3665 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
3673 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
3676 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
3682 tcg_temp_free_i32(tmp
);
3683 tcg_temp_free_i32(tmp2
);
3687 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3690 if (size
== 3 || (!q
&& size
== 2)) {
3693 tmp
= tcg_const_i32(rd
);
3694 tmp2
= tcg_const_i32(rm
);
3698 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
3701 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
3704 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
3712 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
3715 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
3721 tcg_temp_free_i32(tmp
);
3722 tcg_temp_free_i32(tmp2
);
3726 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3730 rd
= tcg_temp_new_i32();
3731 tmp
= tcg_temp_new_i32();
3733 tcg_gen_shli_i32(rd
, t0
, 8);
3734 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3735 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3736 tcg_gen_or_i32(rd
, rd
, tmp
);
3738 tcg_gen_shri_i32(t1
, t1
, 8);
3739 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3740 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3741 tcg_gen_or_i32(t1
, t1
, tmp
);
3742 tcg_gen_mov_i32(t0
, rd
);
3744 tcg_temp_free_i32(tmp
);
3745 tcg_temp_free_i32(rd
);
3748 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3752 rd
= tcg_temp_new_i32();
3753 tmp
= tcg_temp_new_i32();
3755 tcg_gen_shli_i32(rd
, t0
, 16);
3756 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3757 tcg_gen_or_i32(rd
, rd
, tmp
);
3758 tcg_gen_shri_i32(t1
, t1
, 16);
3759 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3760 tcg_gen_or_i32(t1
, t1
, tmp
);
3761 tcg_gen_mov_i32(t0
, rd
);
3763 tcg_temp_free_i32(tmp
);
3764 tcg_temp_free_i32(rd
);
3772 } neon_ls_element_type
[11] = {
3786 /* Translate a NEON load/store element instruction. Return nonzero if the
3787 instruction is invalid. */
3788 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3807 if (!s
->vfp_enabled
)
3809 VFP_DREG_D(rd
, insn
);
3810 rn
= (insn
>> 16) & 0xf;
3812 load
= (insn
& (1 << 21)) != 0;
3813 if ((insn
& (1 << 23)) == 0) {
3814 /* Load store all elements. */
3815 op
= (insn
>> 8) & 0xf;
3816 size
= (insn
>> 6) & 3;
3819 nregs
= neon_ls_element_type
[op
].nregs
;
3820 interleave
= neon_ls_element_type
[op
].interleave
;
3821 spacing
= neon_ls_element_type
[op
].spacing
;
3822 if (size
== 3 && (interleave
| spacing
) != 1)
3824 addr
= tcg_temp_new_i32();
3825 load_reg_var(s
, addr
, rn
);
3826 stride
= (1 << size
) * interleave
;
3827 for (reg
= 0; reg
< nregs
; reg
++) {
3828 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3829 load_reg_var(s
, addr
, rn
);
3830 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3831 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3832 load_reg_var(s
, addr
, rn
);
3833 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3837 tmp64
= gen_ld64(addr
, IS_USER(s
));
3838 neon_store_reg64(tmp64
, rd
);
3839 tcg_temp_free_i64(tmp64
);
3841 tmp64
= tcg_temp_new_i64();
3842 neon_load_reg64(tmp64
, rd
);
3843 gen_st64(tmp64
, addr
, IS_USER(s
));
3845 tcg_gen_addi_i32(addr
, addr
, stride
);
3847 for (pass
= 0; pass
< 2; pass
++) {
3850 tmp
= gen_ld32(addr
, IS_USER(s
));
3851 neon_store_reg(rd
, pass
, tmp
);
3853 tmp
= neon_load_reg(rd
, pass
);
3854 gen_st32(tmp
, addr
, IS_USER(s
));
3856 tcg_gen_addi_i32(addr
, addr
, stride
);
3857 } else if (size
== 1) {
3859 tmp
= gen_ld16u(addr
, IS_USER(s
));
3860 tcg_gen_addi_i32(addr
, addr
, stride
);
3861 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3862 tcg_gen_addi_i32(addr
, addr
, stride
);
3863 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3864 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3865 tcg_temp_free_i32(tmp2
);
3866 neon_store_reg(rd
, pass
, tmp
);
3868 tmp
= neon_load_reg(rd
, pass
);
3869 tmp2
= tcg_temp_new_i32();
3870 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3871 gen_st16(tmp
, addr
, IS_USER(s
));
3872 tcg_gen_addi_i32(addr
, addr
, stride
);
3873 gen_st16(tmp2
, addr
, IS_USER(s
));
3874 tcg_gen_addi_i32(addr
, addr
, stride
);
3876 } else /* size == 0 */ {
3879 for (n
= 0; n
< 4; n
++) {
3880 tmp
= gen_ld8u(addr
, IS_USER(s
));
3881 tcg_gen_addi_i32(addr
, addr
, stride
);
3885 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
3886 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3887 tcg_temp_free_i32(tmp
);
3890 neon_store_reg(rd
, pass
, tmp2
);
3892 tmp2
= neon_load_reg(rd
, pass
);
3893 for (n
= 0; n
< 4; n
++) {
3894 tmp
= tcg_temp_new_i32();
3896 tcg_gen_mov_i32(tmp
, tmp2
);
3898 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3900 gen_st8(tmp
, addr
, IS_USER(s
));
3901 tcg_gen_addi_i32(addr
, addr
, stride
);
3903 tcg_temp_free_i32(tmp2
);
3910 tcg_temp_free_i32(addr
);
3913 size
= (insn
>> 10) & 3;
3915 /* Load single element to all lanes. */
3916 int a
= (insn
>> 4) & 1;
3920 size
= (insn
>> 6) & 3;
3921 nregs
= ((insn
>> 8) & 3) + 1;
3924 if (nregs
!= 4 || a
== 0) {
3927 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3930 if (nregs
== 1 && a
== 1 && size
== 0) {
3933 if (nregs
== 3 && a
== 1) {
3936 addr
= tcg_temp_new_i32();
3937 load_reg_var(s
, addr
, rn
);
3939 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3940 tmp
= gen_load_and_replicate(s
, addr
, size
);
3941 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3942 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3943 if (insn
& (1 << 5)) {
3944 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
3945 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
3947 tcg_temp_free_i32(tmp
);
3949 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3950 stride
= (insn
& (1 << 5)) ? 2 : 1;
3951 for (reg
= 0; reg
< nregs
; reg
++) {
3952 tmp
= gen_load_and_replicate(s
, addr
, size
);
3953 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3954 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3955 tcg_temp_free_i32(tmp
);
3956 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3960 tcg_temp_free_i32(addr
);
3961 stride
= (1 << size
) * nregs
;
3963 /* Single element. */
3964 pass
= (insn
>> 7) & 1;
3967 shift
= ((insn
>> 5) & 3) * 8;
3971 shift
= ((insn
>> 6) & 1) * 16;
3972 stride
= (insn
& (1 << 5)) ? 2 : 1;
3976 stride
= (insn
& (1 << 6)) ? 2 : 1;
3981 nregs
= ((insn
>> 8) & 3) + 1;
3982 addr
= tcg_temp_new_i32();
3983 load_reg_var(s
, addr
, rn
);
3984 for (reg
= 0; reg
< nregs
; reg
++) {
3988 tmp
= gen_ld8u(addr
, IS_USER(s
));
3991 tmp
= gen_ld16u(addr
, IS_USER(s
));
3994 tmp
= gen_ld32(addr
, IS_USER(s
));
3996 default: /* Avoid compiler warnings. */
4000 tmp2
= neon_load_reg(rd
, pass
);
4001 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
4002 tcg_temp_free_i32(tmp2
);
4004 neon_store_reg(rd
, pass
, tmp
);
4005 } else { /* Store */
4006 tmp
= neon_load_reg(rd
, pass
);
4008 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4011 gen_st8(tmp
, addr
, IS_USER(s
));
4014 gen_st16(tmp
, addr
, IS_USER(s
));
4017 gen_st32(tmp
, addr
, IS_USER(s
));
4022 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4024 tcg_temp_free_i32(addr
);
4025 stride
= nregs
* (1 << size
);
4031 base
= load_reg(s
, rn
);
4033 tcg_gen_addi_i32(base
, base
, stride
);
4036 index
= load_reg(s
, rm
);
4037 tcg_gen_add_i32(base
, base
, index
);
4038 tcg_temp_free_i32(index
);
4040 store_reg(s
, rn
, base
);
4045 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4046 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4048 tcg_gen_and_i32(t
, t
, c
);
4049 tcg_gen_andc_i32(f
, f
, c
);
4050 tcg_gen_or_i32(dest
, t
, f
);
4053 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4056 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4057 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4058 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4063 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4066 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4067 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4068 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4073 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4076 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4077 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4078 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4083 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4086 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4087 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4088 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4093 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4099 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4100 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4105 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4106 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4113 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4114 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4119 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4120 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4127 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4131 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4132 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4133 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4138 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4139 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4140 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4144 tcg_temp_free_i32(src
);
4147 static inline void gen_neon_addl(int size
)
4150 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4151 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4152 case 2: tcg_gen_add_i64(CPU_V001
); break;
4157 static inline void gen_neon_subl(int size
)
4160 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4161 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4162 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4167 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4170 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4171 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4172 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4177 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4180 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4181 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4186 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4190 switch ((size
<< 1) | u
) {
4191 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4192 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4193 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4194 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4196 tmp
= gen_muls_i64_i32(a
, b
);
4197 tcg_gen_mov_i64(dest
, tmp
);
4198 tcg_temp_free_i64(tmp
);
4201 tmp
= gen_mulu_i64_i32(a
, b
);
4202 tcg_gen_mov_i64(dest
, tmp
);
4203 tcg_temp_free_i64(tmp
);
4208 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4209 Don't forget to clean them now. */
4211 tcg_temp_free_i32(a
);
4212 tcg_temp_free_i32(b
);
4216 static void gen_neon_narrow_op(int op
, int u
, int size
, TCGv dest
, TCGv_i64 src
)
4220 gen_neon_unarrow_sats(size
, dest
, src
);
4222 gen_neon_narrow(size
, dest
, src
);
4226 gen_neon_narrow_satu(size
, dest
, src
);
4228 gen_neon_narrow_sats(size
, dest
, src
);
4233 /* Translate a NEON data processing instruction. Return nonzero if the
4234 instruction is invalid.
4235 We process data in a mixture of 32-bit and 64-bit chunks.
4236 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4238 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4251 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4254 if (!s
->vfp_enabled
)
4256 q
= (insn
& (1 << 6)) != 0;
4257 u
= (insn
>> 24) & 1;
4258 VFP_DREG_D(rd
, insn
);
4259 VFP_DREG_N(rn
, insn
);
4260 VFP_DREG_M(rm
, insn
);
4261 size
= (insn
>> 20) & 3;
4262 if ((insn
& (1 << 23)) == 0) {
4263 /* Three register same length. */
4264 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4265 if (size
== 3 && (op
== 1 || op
== 5 || op
== 8 || op
== 9
4266 || op
== 10 || op
== 11 || op
== 16)) {
4267 /* 64-bit element instructions. */
4268 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4269 neon_load_reg64(cpu_V0
, rn
+ pass
);
4270 neon_load_reg64(cpu_V1
, rm
+ pass
);
4274 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
4277 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
4283 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
4286 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
4292 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4294 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4299 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4302 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4306 case 10: /* VRSHL */
4308 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4310 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4313 case 11: /* VQRSHL */
4315 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4318 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4324 tcg_gen_sub_i64(CPU_V001
);
4326 tcg_gen_add_i64(CPU_V001
);
4332 neon_store_reg64(cpu_V0
, rd
+ pass
);
4339 case 10: /* VRSHL */
4340 case 11: /* VQRSHL */
4343 /* Shift instruction operands are reversed. */
4350 case 20: /* VPMAX */
4351 case 21: /* VPMIN */
4352 case 23: /* VPADD */
4355 case 26: /* VPADD (float) */
4356 pairwise
= (u
&& size
< 2);
4358 case 30: /* VPMIN/VPMAX (float) */
4366 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4375 tmp
= neon_load_reg(rn
, n
);
4376 tmp2
= neon_load_reg(rn
, n
+ 1);
4378 tmp
= neon_load_reg(rm
, n
);
4379 tmp2
= neon_load_reg(rm
, n
+ 1);
4383 tmp
= neon_load_reg(rn
, pass
);
4384 tmp2
= neon_load_reg(rm
, pass
);
4388 GEN_NEON_INTEGER_OP(hadd
);
4391 GEN_NEON_INTEGER_OP_ENV(qadd
);
4393 case 2: /* VRHADD */
4394 GEN_NEON_INTEGER_OP(rhadd
);
4396 case 3: /* Logic ops. */
4397 switch ((u
<< 2) | size
) {
4399 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4402 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4405 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4408 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4411 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4414 tmp3
= neon_load_reg(rd
, pass
);
4415 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4416 tcg_temp_free_i32(tmp3
);
4419 tmp3
= neon_load_reg(rd
, pass
);
4420 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4421 tcg_temp_free_i32(tmp3
);
4424 tmp3
= neon_load_reg(rd
, pass
);
4425 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4426 tcg_temp_free_i32(tmp3
);
4431 GEN_NEON_INTEGER_OP(hsub
);
4434 GEN_NEON_INTEGER_OP_ENV(qsub
);
4437 GEN_NEON_INTEGER_OP(cgt
);
4440 GEN_NEON_INTEGER_OP(cge
);
4443 GEN_NEON_INTEGER_OP(shl
);
4446 GEN_NEON_INTEGER_OP_ENV(qshl
);
4448 case 10: /* VRSHL */
4449 GEN_NEON_INTEGER_OP(rshl
);
4451 case 11: /* VQRSHL */
4452 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4455 GEN_NEON_INTEGER_OP(max
);
4458 GEN_NEON_INTEGER_OP(min
);
4461 GEN_NEON_INTEGER_OP(abd
);
4464 GEN_NEON_INTEGER_OP(abd
);
4465 tcg_temp_free_i32(tmp2
);
4466 tmp2
= neon_load_reg(rd
, pass
);
4467 gen_neon_add(size
, tmp
, tmp2
);
4470 if (!u
) { /* VADD */
4471 if (gen_neon_add(size
, tmp
, tmp2
))
4475 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4476 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4477 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4483 if (!u
) { /* VTST */
4485 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4486 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4487 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4492 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4493 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4494 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4499 case 18: /* Multiply. */
4501 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4502 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4503 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4506 tcg_temp_free_i32(tmp2
);
4507 tmp2
= neon_load_reg(rd
, pass
);
4509 gen_neon_rsb(size
, tmp
, tmp2
);
4511 gen_neon_add(size
, tmp
, tmp2
);
4515 if (u
) { /* polynomial */
4516 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4517 } else { /* Integer */
4519 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4520 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4521 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4526 case 20: /* VPMAX */
4527 GEN_NEON_INTEGER_OP(pmax
);
4529 case 21: /* VPMIN */
4530 GEN_NEON_INTEGER_OP(pmin
);
4532 case 22: /* Hultiply high. */
4533 if (!u
) { /* VQDMULH */
4535 case 1: gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
); break;
4536 case 2: gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
); break;
4539 } else { /* VQRDHMUL */
4541 case 1: gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
); break;
4542 case 2: gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
); break;
4547 case 23: /* VPADD */
4551 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4552 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4553 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4557 case 26: /* Floating point arithnetic. */
4558 switch ((u
<< 2) | size
) {
4560 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4563 gen_helper_neon_sub_f32(tmp
, tmp
, tmp2
);
4566 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4569 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
);
4575 case 27: /* Float multiply. */
4576 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
4578 tcg_temp_free_i32(tmp2
);
4579 tmp2
= neon_load_reg(rd
, pass
);
4581 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4583 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
4587 case 28: /* Float compare. */
4589 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
4592 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
4594 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
4597 case 29: /* Float compare absolute. */
4601 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
);
4603 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
);
4605 case 30: /* Float min/max. */
4607 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
);
4609 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
);
4613 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4615 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4620 tcg_temp_free_i32(tmp2
);
4622 /* Save the result. For elementwise operations we can put it
4623 straight into the destination register. For pairwise operations
4624 we have to be careful to avoid clobbering the source operands. */
4625 if (pairwise
&& rd
== rm
) {
4626 neon_store_scratch(pass
, tmp
);
4628 neon_store_reg(rd
, pass
, tmp
);
4632 if (pairwise
&& rd
== rm
) {
4633 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4634 tmp
= neon_load_scratch(pass
);
4635 neon_store_reg(rd
, pass
, tmp
);
4638 /* End of 3 register same size operations. */
4639 } else if (insn
& (1 << 4)) {
4640 if ((insn
& 0x00380080) != 0) {
4641 /* Two registers and shift. */
4642 op
= (insn
>> 8) & 0xf;
4643 if (insn
& (1 << 7)) {
4648 while ((insn
& (1 << (size
+ 19))) == 0)
4651 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4652 /* To avoid excessive dumplication of ops we implement shift
4653 by immediate using the variable shift operations. */
4655 /* Shift by immediate:
4656 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4657 /* Right shifts are encoded as N - shift, where N is the
4658 element size in bits. */
4660 shift
= shift
- (1 << (size
+ 3));
4668 imm
= (uint8_t) shift
;
4673 imm
= (uint16_t) shift
;
4684 for (pass
= 0; pass
< count
; pass
++) {
4686 neon_load_reg64(cpu_V0
, rm
+ pass
);
4687 tcg_gen_movi_i64(cpu_V1
, imm
);
4692 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4694 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4699 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4701 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4706 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4708 case 5: /* VSHL, VSLI */
4709 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4711 case 6: /* VQSHLU */
4713 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
4721 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4724 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4729 if (op
== 1 || op
== 3) {
4731 neon_load_reg64(cpu_V1
, rd
+ pass
);
4732 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4733 } else if (op
== 4 || (op
== 5 && u
)) {
4735 neon_load_reg64(cpu_V1
, rd
+ pass
);
4737 if (shift
< -63 || shift
> 63) {
4741 mask
= 0xffffffffffffffffull
>> -shift
;
4743 mask
= 0xffffffffffffffffull
<< shift
;
4746 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
4747 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4749 neon_store_reg64(cpu_V0
, rd
+ pass
);
4750 } else { /* size < 3 */
4751 /* Operands in T0 and T1. */
4752 tmp
= neon_load_reg(rm
, pass
);
4753 tmp2
= tcg_temp_new_i32();
4754 tcg_gen_movi_i32(tmp2
, imm
);
4758 GEN_NEON_INTEGER_OP(shl
);
4762 GEN_NEON_INTEGER_OP(rshl
);
4767 GEN_NEON_INTEGER_OP(shl
);
4769 case 5: /* VSHL, VSLI */
4771 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
4772 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
4773 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
4777 case 6: /* VQSHLU */
4783 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
4787 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
4791 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
4799 GEN_NEON_INTEGER_OP_ENV(qshl
);
4802 tcg_temp_free_i32(tmp2
);
4804 if (op
== 1 || op
== 3) {
4806 tmp2
= neon_load_reg(rd
, pass
);
4807 gen_neon_add(size
, tmp
, tmp2
);
4808 tcg_temp_free_i32(tmp2
);
4809 } else if (op
== 4 || (op
== 5 && u
)) {
4814 mask
= 0xff >> -shift
;
4816 mask
= (uint8_t)(0xff << shift
);
4822 mask
= 0xffff >> -shift
;
4824 mask
= (uint16_t)(0xffff << shift
);
4828 if (shift
< -31 || shift
> 31) {
4832 mask
= 0xffffffffu
>> -shift
;
4834 mask
= 0xffffffffu
<< shift
;
4840 tmp2
= neon_load_reg(rd
, pass
);
4841 tcg_gen_andi_i32(tmp
, tmp
, mask
);
4842 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
4843 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4844 tcg_temp_free_i32(tmp2
);
4846 neon_store_reg(rd
, pass
, tmp
);
4849 } else if (op
< 10) {
4850 /* Shift by immediate and narrow:
4851 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4852 int input_unsigned
= (op
== 8) ? !u
: u
;
4854 shift
= shift
- (1 << (size
+ 3));
4857 tmp64
= tcg_const_i64(shift
);
4858 neon_load_reg64(cpu_V0
, rm
);
4859 neon_load_reg64(cpu_V1
, rm
+ 1);
4860 for (pass
= 0; pass
< 2; pass
++) {
4868 if (input_unsigned
) {
4869 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
4871 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
4874 if (input_unsigned
) {
4875 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
4877 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
4880 tmp
= tcg_temp_new_i32();
4881 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
4882 neon_store_reg(rd
, pass
, tmp
);
4884 tcg_temp_free_i64(tmp64
);
4887 imm
= (uint16_t)shift
;
4891 imm
= (uint32_t)shift
;
4893 tmp2
= tcg_const_i32(imm
);
4894 tmp4
= neon_load_reg(rm
+ 1, 0);
4895 tmp5
= neon_load_reg(rm
+ 1, 1);
4896 for (pass
= 0; pass
< 2; pass
++) {
4898 tmp
= neon_load_reg(rm
, 0);
4902 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
4905 tmp3
= neon_load_reg(rm
, 1);
4909 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
4911 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
4912 tcg_temp_free_i32(tmp
);
4913 tcg_temp_free_i32(tmp3
);
4914 tmp
= tcg_temp_new_i32();
4915 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
4916 neon_store_reg(rd
, pass
, tmp
);
4918 tcg_temp_free_i32(tmp2
);
4920 } else if (op
== 10) {
4924 tmp
= neon_load_reg(rm
, 0);
4925 tmp2
= neon_load_reg(rm
, 1);
4926 for (pass
= 0; pass
< 2; pass
++) {
4930 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4933 /* The shift is less than the width of the source
4934 type, so we can just shift the whole register. */
4935 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
4936 /* Widen the result of shift: we need to clear
4937 * the potential overflow bits resulting from
4938 * left bits of the narrow input appearing as
4939 * right bits of left the neighbour narrow
4941 if (size
< 2 || !u
) {
4944 imm
= (0xffu
>> (8 - shift
));
4946 } else if (size
== 1) {
4947 imm
= 0xffff >> (16 - shift
);
4950 imm
= 0xffffffff >> (32 - shift
);
4953 imm64
= imm
| (((uint64_t)imm
) << 32);
4957 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
4960 neon_store_reg64(cpu_V0
, rd
+ pass
);
4962 } else if (op
>= 14) {
4963 /* VCVT fixed-point. */
4964 /* We have already masked out the must-be-1 top bit of imm6,
4965 * hence this 32-shift where the ARM ARM has 64-imm6.
4968 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4969 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
4972 gen_vfp_ulto(0, shift
);
4974 gen_vfp_slto(0, shift
);
4977 gen_vfp_toul(0, shift
);
4979 gen_vfp_tosl(0, shift
);
4981 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
4986 } else { /* (insn & 0x00380080) == 0 */
4989 op
= (insn
>> 8) & 0xf;
4990 /* One register and immediate. */
4991 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
4992 invert
= (insn
& (1 << 5)) != 0;
5010 imm
= (imm
<< 8) | (imm
<< 24);
5013 imm
= (imm
<< 8) | 0xff;
5016 imm
= (imm
<< 16) | 0xffff;
5019 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5024 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5025 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5031 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5032 if (op
& 1 && op
< 12) {
5033 tmp
= neon_load_reg(rd
, pass
);
5035 /* The immediate value has already been inverted, so
5037 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5039 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5043 tmp
= tcg_temp_new_i32();
5044 if (op
== 14 && invert
) {
5047 for (n
= 0; n
< 4; n
++) {
5048 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5049 val
|= 0xff << (n
* 8);
5051 tcg_gen_movi_i32(tmp
, val
);
5053 tcg_gen_movi_i32(tmp
, imm
);
5056 neon_store_reg(rd
, pass
, tmp
);
5059 } else { /* (insn & 0x00800010 == 0x00800000) */
5061 op
= (insn
>> 8) & 0xf;
5062 if ((insn
& (1 << 6)) == 0) {
5063 /* Three registers of different lengths. */
5067 /* prewiden, src1_wide, src2_wide */
5068 static const int neon_3reg_wide
[16][3] = {
5069 {1, 0, 0}, /* VADDL */
5070 {1, 1, 0}, /* VADDW */
5071 {1, 0, 0}, /* VSUBL */
5072 {1, 1, 0}, /* VSUBW */
5073 {0, 1, 1}, /* VADDHN */
5074 {0, 0, 0}, /* VABAL */
5075 {0, 1, 1}, /* VSUBHN */
5076 {0, 0, 0}, /* VABDL */
5077 {0, 0, 0}, /* VMLAL */
5078 {0, 0, 0}, /* VQDMLAL */
5079 {0, 0, 0}, /* VMLSL */
5080 {0, 0, 0}, /* VQDMLSL */
5081 {0, 0, 0}, /* Integer VMULL */
5082 {0, 0, 0}, /* VQDMULL */
5083 {0, 0, 0} /* Polynomial VMULL */
5086 prewiden
= neon_3reg_wide
[op
][0];
5087 src1_wide
= neon_3reg_wide
[op
][1];
5088 src2_wide
= neon_3reg_wide
[op
][2];
5090 if (size
== 0 && (op
== 9 || op
== 11 || op
== 13))
5093 /* Avoid overlapping operands. Wide source operands are
5094 always aligned so will never overlap with wide
5095 destinations in problematic ways. */
5096 if (rd
== rm
&& !src2_wide
) {
5097 tmp
= neon_load_reg(rm
, 1);
5098 neon_store_scratch(2, tmp
);
5099 } else if (rd
== rn
&& !src1_wide
) {
5100 tmp
= neon_load_reg(rn
, 1);
5101 neon_store_scratch(2, tmp
);
5104 for (pass
= 0; pass
< 2; pass
++) {
5106 neon_load_reg64(cpu_V0
, rn
+ pass
);
5109 if (pass
== 1 && rd
== rn
) {
5110 tmp
= neon_load_scratch(2);
5112 tmp
= neon_load_reg(rn
, pass
);
5115 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5119 neon_load_reg64(cpu_V1
, rm
+ pass
);
5122 if (pass
== 1 && rd
== rm
) {
5123 tmp2
= neon_load_scratch(2);
5125 tmp2
= neon_load_reg(rm
, pass
);
5128 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5132 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5133 gen_neon_addl(size
);
5135 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5136 gen_neon_subl(size
);
5138 case 5: case 7: /* VABAL, VABDL */
5139 switch ((size
<< 1) | u
) {
5141 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5144 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5147 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5150 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5153 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5156 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5160 tcg_temp_free_i32(tmp2
);
5161 tcg_temp_free_i32(tmp
);
5163 case 8: case 9: case 10: case 11: case 12: case 13:
5164 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5165 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5167 case 14: /* Polynomial VMULL */
5168 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5169 tcg_temp_free_i32(tmp2
);
5170 tcg_temp_free_i32(tmp
);
5172 default: /* 15 is RESERVED. */
5177 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5178 neon_store_reg64(cpu_V0
, rd
+ pass
);
5179 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5181 neon_load_reg64(cpu_V1
, rd
+ pass
);
5183 case 10: /* VMLSL */
5184 gen_neon_negl(cpu_V0
, size
);
5186 case 5: case 8: /* VABAL, VMLAL */
5187 gen_neon_addl(size
);
5189 case 9: case 11: /* VQDMLAL, VQDMLSL */
5190 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5192 gen_neon_negl(cpu_V0
, size
);
5194 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5199 neon_store_reg64(cpu_V0
, rd
+ pass
);
5200 } else if (op
== 4 || op
== 6) {
5201 /* Narrowing operation. */
5202 tmp
= tcg_temp_new_i32();
5206 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5209 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5212 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5213 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5220 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5223 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5226 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5227 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5228 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5236 neon_store_reg(rd
, 0, tmp3
);
5237 neon_store_reg(rd
, 1, tmp
);
5240 /* Write back the result. */
5241 neon_store_reg64(cpu_V0
, rd
+ pass
);
5245 /* Two registers and a scalar. */
5247 case 0: /* Integer VMLA scalar */
5248 case 1: /* Float VMLA scalar */
5249 case 4: /* Integer VMLS scalar */
5250 case 5: /* Floating point VMLS scalar */
5251 case 8: /* Integer VMUL scalar */
5252 case 9: /* Floating point VMUL scalar */
5253 case 12: /* VQDMULH scalar */
5254 case 13: /* VQRDMULH scalar */
5255 tmp
= neon_get_scalar(size
, rm
);
5256 neon_store_scratch(0, tmp
);
5257 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5258 tmp
= neon_load_scratch(0);
5259 tmp2
= neon_load_reg(rn
, pass
);
5262 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5264 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5266 } else if (op
== 13) {
5268 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5270 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5272 } else if (op
& 1) {
5273 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
5276 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5277 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5278 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5282 tcg_temp_free_i32(tmp2
);
5285 tmp2
= neon_load_reg(rd
, pass
);
5288 gen_neon_add(size
, tmp
, tmp2
);
5291 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
5294 gen_neon_rsb(size
, tmp
, tmp2
);
5297 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
5302 tcg_temp_free_i32(tmp2
);
5304 neon_store_reg(rd
, pass
, tmp
);
5307 case 2: /* VMLAL sclar */
5308 case 3: /* VQDMLAL scalar */
5309 case 6: /* VMLSL scalar */
5310 case 7: /* VQDMLSL scalar */
5311 case 10: /* VMULL scalar */
5312 case 11: /* VQDMULL scalar */
5313 if (size
== 0 && (op
== 3 || op
== 7 || op
== 11))
5316 tmp2
= neon_get_scalar(size
, rm
);
5317 /* We need a copy of tmp2 because gen_neon_mull
5318 * deletes it during pass 0. */
5319 tmp4
= tcg_temp_new_i32();
5320 tcg_gen_mov_i32(tmp4
, tmp2
);
5321 tmp3
= neon_load_reg(rn
, 1);
5323 for (pass
= 0; pass
< 2; pass
++) {
5325 tmp
= neon_load_reg(rn
, 0);
5330 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5332 neon_load_reg64(cpu_V1
, rd
+ pass
);
5336 gen_neon_negl(cpu_V0
, size
);
5339 gen_neon_addl(size
);
5342 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5344 gen_neon_negl(cpu_V0
, size
);
5346 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5352 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5357 neon_store_reg64(cpu_V0
, rd
+ pass
);
5362 default: /* 14 and 15 are RESERVED */
5366 } else { /* size == 3 */
5369 imm
= (insn
>> 8) & 0xf;
5375 neon_load_reg64(cpu_V0
, rn
);
5377 neon_load_reg64(cpu_V1
, rn
+ 1);
5379 } else if (imm
== 8) {
5380 neon_load_reg64(cpu_V0
, rn
+ 1);
5382 neon_load_reg64(cpu_V1
, rm
);
5385 tmp64
= tcg_temp_new_i64();
5387 neon_load_reg64(cpu_V0
, rn
);
5388 neon_load_reg64(tmp64
, rn
+ 1);
5390 neon_load_reg64(cpu_V0
, rn
+ 1);
5391 neon_load_reg64(tmp64
, rm
);
5393 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5394 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5395 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5397 neon_load_reg64(cpu_V1
, rm
);
5399 neon_load_reg64(cpu_V1
, rm
+ 1);
5402 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5403 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5404 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5405 tcg_temp_free_i64(tmp64
);
5408 neon_load_reg64(cpu_V0
, rn
);
5409 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5410 neon_load_reg64(cpu_V1
, rm
);
5411 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5412 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5414 neon_store_reg64(cpu_V0
, rd
);
5416 neon_store_reg64(cpu_V1
, rd
+ 1);
5418 } else if ((insn
& (1 << 11)) == 0) {
5419 /* Two register misc. */
5420 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5421 size
= (insn
>> 18) & 3;
5423 case 0: /* VREV64 */
5426 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5427 tmp
= neon_load_reg(rm
, pass
* 2);
5428 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5430 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5431 case 1: gen_swap_half(tmp
); break;
5432 case 2: /* no-op */ break;
5435 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5437 neon_store_reg(rd
, pass
* 2, tmp2
);
5440 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5441 case 1: gen_swap_half(tmp2
); break;
5444 neon_store_reg(rd
, pass
* 2, tmp2
);
5448 case 4: case 5: /* VPADDL */
5449 case 12: case 13: /* VPADAL */
5452 for (pass
= 0; pass
< q
+ 1; pass
++) {
5453 tmp
= neon_load_reg(rm
, pass
* 2);
5454 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5455 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5456 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5458 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5459 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5460 case 2: tcg_gen_add_i64(CPU_V001
); break;
5465 neon_load_reg64(cpu_V1
, rd
+ pass
);
5466 gen_neon_addl(size
);
5468 neon_store_reg64(cpu_V0
, rd
+ pass
);
5473 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5474 tmp
= neon_load_reg(rm
, n
);
5475 tmp2
= neon_load_reg(rd
, n
+ 1);
5476 neon_store_reg(rm
, n
, tmp2
);
5477 neon_store_reg(rd
, n
+ 1, tmp
);
5484 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
5489 if (gen_neon_zip(rd
, rm
, size
, q
)) {
5493 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5497 for (pass
= 0; pass
< 2; pass
++) {
5498 neon_load_reg64(cpu_V0
, rm
+ pass
);
5499 tmp
= tcg_temp_new_i32();
5500 gen_neon_narrow_op(op
== 36, q
, size
, tmp
, cpu_V0
);
5504 neon_store_reg(rd
, 0, tmp2
);
5505 neon_store_reg(rd
, 1, tmp
);
5509 case 38: /* VSHLL */
5512 tmp
= neon_load_reg(rm
, 0);
5513 tmp2
= neon_load_reg(rm
, 1);
5514 for (pass
= 0; pass
< 2; pass
++) {
5517 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5518 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5519 neon_store_reg64(cpu_V0
, rd
+ pass
);
5522 case 44: /* VCVT.F16.F32 */
5523 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5525 tmp
= tcg_temp_new_i32();
5526 tmp2
= tcg_temp_new_i32();
5527 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5528 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5529 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5530 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5531 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5532 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5533 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5534 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5535 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5536 neon_store_reg(rd
, 0, tmp2
);
5537 tmp2
= tcg_temp_new_i32();
5538 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5539 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5540 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5541 neon_store_reg(rd
, 1, tmp2
);
5542 tcg_temp_free_i32(tmp
);
5544 case 46: /* VCVT.F32.F16 */
5545 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5547 tmp3
= tcg_temp_new_i32();
5548 tmp
= neon_load_reg(rm
, 0);
5549 tmp2
= neon_load_reg(rm
, 1);
5550 tcg_gen_ext16u_i32(tmp3
, tmp
);
5551 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5552 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5553 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5554 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5555 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5556 tcg_temp_free_i32(tmp
);
5557 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5558 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5559 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5560 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5561 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5562 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5563 tcg_temp_free_i32(tmp2
);
5564 tcg_temp_free_i32(tmp3
);
5568 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5569 if (op
== 30 || op
== 31 || op
>= 58) {
5570 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5571 neon_reg_offset(rm
, pass
));
5574 tmp
= neon_load_reg(rm
, pass
);
5577 case 1: /* VREV32 */
5579 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5580 case 1: gen_swap_half(tmp
); break;
5584 case 2: /* VREV16 */
5591 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5592 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5593 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5599 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5600 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5601 case 2: gen_helper_clz(tmp
, tmp
); break;
5608 gen_helper_neon_cnt_u8(tmp
, tmp
);
5613 tcg_gen_not_i32(tmp
, tmp
);
5615 case 14: /* VQABS */
5617 case 0: gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
); break;
5618 case 1: gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
); break;
5619 case 2: gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
); break;
5623 case 15: /* VQNEG */
5625 case 0: gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
); break;
5626 case 1: gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
); break;
5627 case 2: gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
); break;
5631 case 16: case 19: /* VCGT #0, VCLE #0 */
5632 tmp2
= tcg_const_i32(0);
5634 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5635 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5636 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
5639 tcg_temp_free(tmp2
);
5641 tcg_gen_not_i32(tmp
, tmp
);
5643 case 17: case 20: /* VCGE #0, VCLT #0 */
5644 tmp2
= tcg_const_i32(0);
5646 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
5647 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
5648 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
5651 tcg_temp_free(tmp2
);
5653 tcg_gen_not_i32(tmp
, tmp
);
5655 case 18: /* VCEQ #0 */
5656 tmp2
= tcg_const_i32(0);
5658 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5659 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5660 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5663 tcg_temp_free(tmp2
);
5667 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
5668 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
5669 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
5676 tmp2
= tcg_const_i32(0);
5677 gen_neon_rsb(size
, tmp
, tmp2
);
5678 tcg_temp_free(tmp2
);
5680 case 24: /* Float VCGT #0 */
5681 tmp2
= tcg_const_i32(0);
5682 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
5683 tcg_temp_free(tmp2
);
5685 case 25: /* Float VCGE #0 */
5686 tmp2
= tcg_const_i32(0);
5687 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
5688 tcg_temp_free(tmp2
);
5690 case 26: /* Float VCEQ #0 */
5691 tmp2
= tcg_const_i32(0);
5692 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
5693 tcg_temp_free(tmp2
);
5695 case 27: /* Float VCLE #0 */
5696 tmp2
= tcg_const_i32(0);
5697 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
);
5698 tcg_temp_free(tmp2
);
5700 case 28: /* Float VCLT #0 */
5701 tmp2
= tcg_const_i32(0);
5702 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
);
5703 tcg_temp_free(tmp2
);
5705 case 30: /* Float VABS */
5708 case 31: /* Float VNEG */
5712 tmp2
= neon_load_reg(rd
, pass
);
5713 neon_store_reg(rm
, pass
, tmp2
);
5716 tmp2
= neon_load_reg(rd
, pass
);
5718 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
5719 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
5723 neon_store_reg(rm
, pass
, tmp2
);
5725 case 56: /* Integer VRECPE */
5726 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
5728 case 57: /* Integer VRSQRTE */
5729 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
5731 case 58: /* Float VRECPE */
5732 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5734 case 59: /* Float VRSQRTE */
5735 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5737 case 60: /* VCVT.F32.S32 */
5740 case 61: /* VCVT.F32.U32 */
5743 case 62: /* VCVT.S32.F32 */
5746 case 63: /* VCVT.U32.F32 */
5750 /* Reserved: 21, 29, 39-56 */
5753 if (op
== 30 || op
== 31 || op
>= 58) {
5754 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
5755 neon_reg_offset(rd
, pass
));
5757 neon_store_reg(rd
, pass
, tmp
);
5762 } else if ((insn
& (1 << 10)) == 0) {
5764 n
= ((insn
>> 5) & 0x18) + 8;
5765 if (insn
& (1 << 6)) {
5766 tmp
= neon_load_reg(rd
, 0);
5768 tmp
= tcg_temp_new_i32();
5769 tcg_gen_movi_i32(tmp
, 0);
5771 tmp2
= neon_load_reg(rm
, 0);
5772 tmp4
= tcg_const_i32(rn
);
5773 tmp5
= tcg_const_i32(n
);
5774 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
5775 tcg_temp_free_i32(tmp
);
5776 if (insn
& (1 << 6)) {
5777 tmp
= neon_load_reg(rd
, 1);
5779 tmp
= tcg_temp_new_i32();
5780 tcg_gen_movi_i32(tmp
, 0);
5782 tmp3
= neon_load_reg(rm
, 1);
5783 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
5784 tcg_temp_free_i32(tmp5
);
5785 tcg_temp_free_i32(tmp4
);
5786 neon_store_reg(rd
, 0, tmp2
);
5787 neon_store_reg(rd
, 1, tmp3
);
5788 tcg_temp_free_i32(tmp
);
5789 } else if ((insn
& 0x380) == 0) {
5791 if (insn
& (1 << 19)) {
5792 tmp
= neon_load_reg(rm
, 1);
5794 tmp
= neon_load_reg(rm
, 0);
5796 if (insn
& (1 << 16)) {
5797 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
5798 } else if (insn
& (1 << 17)) {
5799 if ((insn
>> 18) & 1)
5800 gen_neon_dup_high16(tmp
);
5802 gen_neon_dup_low16(tmp
);
5804 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5805 tmp2
= tcg_temp_new_i32();
5806 tcg_gen_mov_i32(tmp2
, tmp
);
5807 neon_store_reg(rd
, pass
, tmp2
);
5809 tcg_temp_free_i32(tmp
);
5818 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5820 int crn
= (insn
>> 16) & 0xf;
5821 int crm
= insn
& 0xf;
5822 int op1
= (insn
>> 21) & 7;
5823 int op2
= (insn
>> 5) & 7;
5824 int rt
= (insn
>> 12) & 0xf;
5827 /* Minimal set of debug registers, since we don't support debug */
5828 if (op1
== 0 && crn
== 0 && op2
== 0) {
5831 /* DBGDIDR: just RAZ. In particular this means the
5832 * "debug architecture version" bits will read as
5833 * a reserved value, which should cause Linux to
5834 * not try to use the debug hardware.
5836 tmp
= tcg_const_i32(0);
5837 store_reg(s
, rt
, tmp
);
5841 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
5842 * don't implement memory mapped debug components
5844 if (ENABLE_ARCH_7
) {
5845 tmp
= tcg_const_i32(0);
5846 store_reg(s
, rt
, tmp
);
5855 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5856 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5860 tmp
= load_cpu_field(teecr
);
5861 store_reg(s
, rt
, tmp
);
5864 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5866 if (IS_USER(s
) && (env
->teecr
& 1))
5868 tmp
= load_cpu_field(teehbr
);
5869 store_reg(s
, rt
, tmp
);
5873 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5874 op1
, crn
, crm
, op2
);
5878 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5880 int crn
= (insn
>> 16) & 0xf;
5881 int crm
= insn
& 0xf;
5882 int op1
= (insn
>> 21) & 7;
5883 int op2
= (insn
>> 5) & 7;
5884 int rt
= (insn
>> 12) & 0xf;
5887 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5888 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5892 tmp
= load_reg(s
, rt
);
5893 gen_helper_set_teecr(cpu_env
, tmp
);
5894 tcg_temp_free_i32(tmp
);
5897 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5899 if (IS_USER(s
) && (env
->teecr
& 1))
5901 tmp
= load_reg(s
, rt
);
5902 store_cpu_field(tmp
, teehbr
);
5906 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5907 op1
, crn
, crm
, op2
);
5911 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5915 cpnum
= (insn
>> 8) & 0xf;
5916 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
5917 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
5923 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5924 return disas_iwmmxt_insn(env
, s
, insn
);
5925 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5926 return disas_dsp_insn(env
, s
, insn
);
5931 return disas_vfp_insn (env
, s
, insn
);
5933 /* Coprocessors 7-15 are architecturally reserved by ARM.
5934 Unfortunately Intel decided to ignore this. */
5935 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
5937 if (insn
& (1 << 20))
5938 return disas_cp14_read(env
, s
, insn
);
5940 return disas_cp14_write(env
, s
, insn
);
5942 return disas_cp15_insn (env
, s
, insn
);
5945 /* Unknown coprocessor. See if the board has hooked it. */
5946 return disas_cp_insn (env
, s
, insn
);
5951 /* Store a 64-bit value to a register pair. Clobbers val. */
5952 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
5955 tmp
= tcg_temp_new_i32();
5956 tcg_gen_trunc_i64_i32(tmp
, val
);
5957 store_reg(s
, rlow
, tmp
);
5958 tmp
= tcg_temp_new_i32();
5959 tcg_gen_shri_i64(val
, val
, 32);
5960 tcg_gen_trunc_i64_i32(tmp
, val
);
5961 store_reg(s
, rhigh
, tmp
);
5964 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5965 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
5970 /* Load value and extend to 64 bits. */
5971 tmp
= tcg_temp_new_i64();
5972 tmp2
= load_reg(s
, rlow
);
5973 tcg_gen_extu_i32_i64(tmp
, tmp2
);
5974 tcg_temp_free_i32(tmp2
);
5975 tcg_gen_add_i64(val
, val
, tmp
);
5976 tcg_temp_free_i64(tmp
);
5979 /* load and add a 64-bit value from a register pair. */
5980 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
5986 /* Load 64-bit value rd:rn. */
5987 tmpl
= load_reg(s
, rlow
);
5988 tmph
= load_reg(s
, rhigh
);
5989 tmp
= tcg_temp_new_i64();
5990 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
5991 tcg_temp_free_i32(tmpl
);
5992 tcg_temp_free_i32(tmph
);
5993 tcg_gen_add_i64(val
, val
, tmp
);
5994 tcg_temp_free_i64(tmp
);
5997 /* Set N and Z flags from a 64-bit value. */
5998 static void gen_logicq_cc(TCGv_i64 val
)
6000 TCGv tmp
= tcg_temp_new_i32();
6001 gen_helper_logicq_cc(tmp
, val
);
6003 tcg_temp_free_i32(tmp
);
6006 /* Load/Store exclusive instructions are implemented by remembering
6007 the value/address loaded, and seeing if these are the same
6008 when the store is performed. This should be is sufficient to implement
6009 the architecturally mandated semantics, and avoids having to monitor
6012 In system emulation mode only one CPU will be running at once, so
6013 this sequence is effectively atomic. In user emulation mode we
6014 throw an exception and handle the atomic operation elsewhere. */
6015 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6016 TCGv addr
, int size
)
6022 tmp
= gen_ld8u(addr
, IS_USER(s
));
6025 tmp
= gen_ld16u(addr
, IS_USER(s
));
6029 tmp
= gen_ld32(addr
, IS_USER(s
));
6034 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6035 store_reg(s
, rt
, tmp
);
6037 TCGv tmp2
= tcg_temp_new_i32();
6038 tcg_gen_addi_i32(tmp2
, addr
, 4);
6039 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6040 tcg_temp_free_i32(tmp2
);
6041 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6042 store_reg(s
, rt2
, tmp
);
6044 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6047 static void gen_clrex(DisasContext
*s
)
6049 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6052 #ifdef CONFIG_USER_ONLY
6053 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6054 TCGv addr
, int size
)
6056 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6057 tcg_gen_movi_i32(cpu_exclusive_info
,
6058 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6059 gen_exception_insn(s
, 4, EXCP_STREX
);
6062 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6063 TCGv addr
, int size
)
6069 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6075 fail_label
= gen_new_label();
6076 done_label
= gen_new_label();
6077 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6080 tmp
= gen_ld8u(addr
, IS_USER(s
));
6083 tmp
= gen_ld16u(addr
, IS_USER(s
));
6087 tmp
= gen_ld32(addr
, IS_USER(s
));
6092 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6093 tcg_temp_free_i32(tmp
);
6095 TCGv tmp2
= tcg_temp_new_i32();
6096 tcg_gen_addi_i32(tmp2
, addr
, 4);
6097 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6098 tcg_temp_free_i32(tmp2
);
6099 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6100 tcg_temp_free_i32(tmp
);
6102 tmp
= load_reg(s
, rt
);
6105 gen_st8(tmp
, addr
, IS_USER(s
));
6108 gen_st16(tmp
, addr
, IS_USER(s
));
6112 gen_st32(tmp
, addr
, IS_USER(s
));
6118 tcg_gen_addi_i32(addr
, addr
, 4);
6119 tmp
= load_reg(s
, rt2
);
6120 gen_st32(tmp
, addr
, IS_USER(s
));
6122 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6123 tcg_gen_br(done_label
);
6124 gen_set_label(fail_label
);
6125 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6126 gen_set_label(done_label
);
6127 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6131 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
6133 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6140 insn
= ldl_code(s
->pc
);
6143 /* M variants do not implement ARM mode. */
6148 /* Unconditional instructions. */
6149 if (((insn
>> 25) & 7) == 1) {
6150 /* NEON Data processing. */
6151 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6154 if (disas_neon_data_insn(env
, s
, insn
))
6158 if ((insn
& 0x0f100000) == 0x04000000) {
6159 /* NEON load/store. */
6160 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6163 if (disas_neon_ls_insn(env
, s
, insn
))
6167 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6168 ((insn
& 0x0f30f010) == 0x0710f000)) {
6169 if ((insn
& (1 << 22)) == 0) {
6171 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6175 /* Otherwise PLD; v5TE+ */
6178 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6179 ((insn
& 0x0f70f010) == 0x0650f000)) {
6181 return; /* PLI; V7 */
6183 if (((insn
& 0x0f700000) == 0x04100000) ||
6184 ((insn
& 0x0f700010) == 0x06100000)) {
6185 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6188 return; /* v7MP: Unallocated memory hint: must NOP */
6191 if ((insn
& 0x0ffffdff) == 0x01010000) {
6194 if (insn
& (1 << 9)) {
6195 /* BE8 mode not implemented. */
6199 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6200 switch ((insn
>> 4) & 0xf) {
6209 /* We don't emulate caches so these are a no-op. */
6214 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6220 op1
= (insn
& 0x1f);
6221 addr
= tcg_temp_new_i32();
6222 tmp
= tcg_const_i32(op1
);
6223 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6224 tcg_temp_free_i32(tmp
);
6225 i
= (insn
>> 23) & 3;
6227 case 0: offset
= -4; break; /* DA */
6228 case 1: offset
= 0; break; /* IA */
6229 case 2: offset
= -8; break; /* DB */
6230 case 3: offset
= 4; break; /* IB */
6234 tcg_gen_addi_i32(addr
, addr
, offset
);
6235 tmp
= load_reg(s
, 14);
6236 gen_st32(tmp
, addr
, 0);
6237 tmp
= load_cpu_field(spsr
);
6238 tcg_gen_addi_i32(addr
, addr
, 4);
6239 gen_st32(tmp
, addr
, 0);
6240 if (insn
& (1 << 21)) {
6241 /* Base writeback. */
6243 case 0: offset
= -8; break;
6244 case 1: offset
= 4; break;
6245 case 2: offset
= -4; break;
6246 case 3: offset
= 0; break;
6250 tcg_gen_addi_i32(addr
, addr
, offset
);
6251 tmp
= tcg_const_i32(op1
);
6252 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6253 tcg_temp_free_i32(tmp
);
6254 tcg_temp_free_i32(addr
);
6256 tcg_temp_free_i32(addr
);
6259 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6265 rn
= (insn
>> 16) & 0xf;
6266 addr
= load_reg(s
, rn
);
6267 i
= (insn
>> 23) & 3;
6269 case 0: offset
= -4; break; /* DA */
6270 case 1: offset
= 0; break; /* IA */
6271 case 2: offset
= -8; break; /* DB */
6272 case 3: offset
= 4; break; /* IB */
6276 tcg_gen_addi_i32(addr
, addr
, offset
);
6277 /* Load PC into tmp and CPSR into tmp2. */
6278 tmp
= gen_ld32(addr
, 0);
6279 tcg_gen_addi_i32(addr
, addr
, 4);
6280 tmp2
= gen_ld32(addr
, 0);
6281 if (insn
& (1 << 21)) {
6282 /* Base writeback. */
6284 case 0: offset
= -8; break;
6285 case 1: offset
= 4; break;
6286 case 2: offset
= -4; break;
6287 case 3: offset
= 0; break;
6291 tcg_gen_addi_i32(addr
, addr
, offset
);
6292 store_reg(s
, rn
, addr
);
6294 tcg_temp_free_i32(addr
);
6296 gen_rfe(s
, tmp
, tmp2
);
6298 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6299 /* branch link and change to thumb (blx <offset>) */
6302 val
= (uint32_t)s
->pc
;
6303 tmp
= tcg_temp_new_i32();
6304 tcg_gen_movi_i32(tmp
, val
);
6305 store_reg(s
, 14, tmp
);
6306 /* Sign-extend the 24-bit offset */
6307 offset
= (((int32_t)insn
) << 8) >> 8;
6308 /* offset * 4 + bit24 * 2 + (thumb bit) */
6309 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6310 /* pipeline offset */
6314 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6315 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6316 /* iWMMXt register transfer. */
6317 if (env
->cp15
.c15_cpar
& (1 << 1))
6318 if (!disas_iwmmxt_insn(env
, s
, insn
))
6321 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6322 /* Coprocessor double register transfer. */
6323 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6324 /* Additional coprocessor register transfer. */
6325 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6328 /* cps (privileged) */
6332 if (insn
& (1 << 19)) {
6333 if (insn
& (1 << 8))
6335 if (insn
& (1 << 7))
6337 if (insn
& (1 << 6))
6339 if (insn
& (1 << 18))
6342 if (insn
& (1 << 17)) {
6344 val
|= (insn
& 0x1f);
6347 gen_set_psr_im(s
, mask
, 0, val
);
6354 /* if not always execute, we generate a conditional jump to
6356 s
->condlabel
= gen_new_label();
6357 gen_test_cc(cond
^ 1, s
->condlabel
);
6360 if ((insn
& 0x0f900000) == 0x03000000) {
6361 if ((insn
& (1 << 21)) == 0) {
6363 rd
= (insn
>> 12) & 0xf;
6364 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6365 if ((insn
& (1 << 22)) == 0) {
6367 tmp
= tcg_temp_new_i32();
6368 tcg_gen_movi_i32(tmp
, val
);
6371 tmp
= load_reg(s
, rd
);
6372 tcg_gen_ext16u_i32(tmp
, tmp
);
6373 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6375 store_reg(s
, rd
, tmp
);
6377 if (((insn
>> 12) & 0xf) != 0xf)
6379 if (((insn
>> 16) & 0xf) == 0) {
6380 gen_nop_hint(s
, insn
& 0xff);
6382 /* CPSR = immediate */
6384 shift
= ((insn
>> 8) & 0xf) * 2;
6386 val
= (val
>> shift
) | (val
<< (32 - shift
));
6387 i
= ((insn
& (1 << 22)) != 0);
6388 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6392 } else if ((insn
& 0x0f900000) == 0x01000000
6393 && (insn
& 0x00000090) != 0x00000090) {
6394 /* miscellaneous instructions */
6395 op1
= (insn
>> 21) & 3;
6396 sh
= (insn
>> 4) & 0xf;
6399 case 0x0: /* move program status register */
6402 tmp
= load_reg(s
, rm
);
6403 i
= ((op1
& 2) != 0);
6404 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6408 rd
= (insn
>> 12) & 0xf;
6412 tmp
= load_cpu_field(spsr
);
6414 tmp
= tcg_temp_new_i32();
6415 gen_helper_cpsr_read(tmp
);
6417 store_reg(s
, rd
, tmp
);
6422 /* branch/exchange thumb (bx). */
6423 tmp
= load_reg(s
, rm
);
6425 } else if (op1
== 3) {
6427 rd
= (insn
>> 12) & 0xf;
6428 tmp
= load_reg(s
, rm
);
6429 gen_helper_clz(tmp
, tmp
);
6430 store_reg(s
, rd
, tmp
);
6438 /* Trivial implementation equivalent to bx. */
6439 tmp
= load_reg(s
, rm
);
6449 /* branch link/exchange thumb (blx) */
6450 tmp
= load_reg(s
, rm
);
6451 tmp2
= tcg_temp_new_i32();
6452 tcg_gen_movi_i32(tmp2
, s
->pc
);
6453 store_reg(s
, 14, tmp2
);
6456 case 0x5: /* saturating add/subtract */
6457 rd
= (insn
>> 12) & 0xf;
6458 rn
= (insn
>> 16) & 0xf;
6459 tmp
= load_reg(s
, rm
);
6460 tmp2
= load_reg(s
, rn
);
6462 gen_helper_double_saturate(tmp2
, tmp2
);
6464 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6466 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6467 tcg_temp_free_i32(tmp2
);
6468 store_reg(s
, rd
, tmp
);
6471 /* SMC instruction (op1 == 3)
6472 and undefined instructions (op1 == 0 || op1 == 2)
6478 gen_exception_insn(s
, 4, EXCP_BKPT
);
6480 case 0x8: /* signed multiply */
6484 rs
= (insn
>> 8) & 0xf;
6485 rn
= (insn
>> 12) & 0xf;
6486 rd
= (insn
>> 16) & 0xf;
6488 /* (32 * 16) >> 16 */
6489 tmp
= load_reg(s
, rm
);
6490 tmp2
= load_reg(s
, rs
);
6492 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6495 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6496 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6497 tmp
= tcg_temp_new_i32();
6498 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6499 tcg_temp_free_i64(tmp64
);
6500 if ((sh
& 2) == 0) {
6501 tmp2
= load_reg(s
, rn
);
6502 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6503 tcg_temp_free_i32(tmp2
);
6505 store_reg(s
, rd
, tmp
);
6508 tmp
= load_reg(s
, rm
);
6509 tmp2
= load_reg(s
, rs
);
6510 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6511 tcg_temp_free_i32(tmp2
);
6513 tmp64
= tcg_temp_new_i64();
6514 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6515 tcg_temp_free_i32(tmp
);
6516 gen_addq(s
, tmp64
, rn
, rd
);
6517 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6518 tcg_temp_free_i64(tmp64
);
6521 tmp2
= load_reg(s
, rn
);
6522 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6523 tcg_temp_free_i32(tmp2
);
6525 store_reg(s
, rd
, tmp
);
6532 } else if (((insn
& 0x0e000000) == 0 &&
6533 (insn
& 0x00000090) != 0x90) ||
6534 ((insn
& 0x0e000000) == (1 << 25))) {
6535 int set_cc
, logic_cc
, shiftop
;
6537 op1
= (insn
>> 21) & 0xf;
6538 set_cc
= (insn
>> 20) & 1;
6539 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6541 /* data processing instruction */
6542 if (insn
& (1 << 25)) {
6543 /* immediate operand */
6545 shift
= ((insn
>> 8) & 0xf) * 2;
6547 val
= (val
>> shift
) | (val
<< (32 - shift
));
6549 tmp2
= tcg_temp_new_i32();
6550 tcg_gen_movi_i32(tmp2
, val
);
6551 if (logic_cc
&& shift
) {
6552 gen_set_CF_bit31(tmp2
);
6557 tmp2
= load_reg(s
, rm
);
6558 shiftop
= (insn
>> 5) & 3;
6559 if (!(insn
& (1 << 4))) {
6560 shift
= (insn
>> 7) & 0x1f;
6561 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6563 rs
= (insn
>> 8) & 0xf;
6564 tmp
= load_reg(s
, rs
);
6565 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6568 if (op1
!= 0x0f && op1
!= 0x0d) {
6569 rn
= (insn
>> 16) & 0xf;
6570 tmp
= load_reg(s
, rn
);
6574 rd
= (insn
>> 12) & 0xf;
6577 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6581 store_reg_bx(env
, s
, rd
, tmp
);
6584 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6588 store_reg_bx(env
, s
, rd
, tmp
);
6591 if (set_cc
&& rd
== 15) {
6592 /* SUBS r15, ... is used for exception return. */
6596 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6597 gen_exception_return(s
, tmp
);
6600 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6602 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6604 store_reg_bx(env
, s
, rd
, tmp
);
6609 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
6611 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6613 store_reg_bx(env
, s
, rd
, tmp
);
6617 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6619 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6621 store_reg_bx(env
, s
, rd
, tmp
);
6625 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
6627 gen_add_carry(tmp
, tmp
, tmp2
);
6629 store_reg_bx(env
, s
, rd
, tmp
);
6633 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
6635 gen_sub_carry(tmp
, tmp
, tmp2
);
6637 store_reg_bx(env
, s
, rd
, tmp
);
6641 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
6643 gen_sub_carry(tmp
, tmp2
, tmp
);
6645 store_reg_bx(env
, s
, rd
, tmp
);
6649 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6652 tcg_temp_free_i32(tmp
);
6656 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6659 tcg_temp_free_i32(tmp
);
6663 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6665 tcg_temp_free_i32(tmp
);
6669 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6671 tcg_temp_free_i32(tmp
);
6674 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6678 store_reg_bx(env
, s
, rd
, tmp
);
6681 if (logic_cc
&& rd
== 15) {
6682 /* MOVS r15, ... is used for exception return. */
6686 gen_exception_return(s
, tmp2
);
6691 store_reg_bx(env
, s
, rd
, tmp2
);
6695 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
6699 store_reg_bx(env
, s
, rd
, tmp
);
6703 tcg_gen_not_i32(tmp2
, tmp2
);
6707 store_reg_bx(env
, s
, rd
, tmp2
);
6710 if (op1
!= 0x0f && op1
!= 0x0d) {
6711 tcg_temp_free_i32(tmp2
);
6714 /* other instructions */
6715 op1
= (insn
>> 24) & 0xf;
6719 /* multiplies, extra load/stores */
6720 sh
= (insn
>> 5) & 3;
6723 rd
= (insn
>> 16) & 0xf;
6724 rn
= (insn
>> 12) & 0xf;
6725 rs
= (insn
>> 8) & 0xf;
6727 op1
= (insn
>> 20) & 0xf;
6729 case 0: case 1: case 2: case 3: case 6:
6731 tmp
= load_reg(s
, rs
);
6732 tmp2
= load_reg(s
, rm
);
6733 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
6734 tcg_temp_free_i32(tmp2
);
6735 if (insn
& (1 << 22)) {
6736 /* Subtract (mls) */
6738 tmp2
= load_reg(s
, rn
);
6739 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6740 tcg_temp_free_i32(tmp2
);
6741 } else if (insn
& (1 << 21)) {
6743 tmp2
= load_reg(s
, rn
);
6744 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6745 tcg_temp_free_i32(tmp2
);
6747 if (insn
& (1 << 20))
6749 store_reg(s
, rd
, tmp
);
6752 /* 64 bit mul double accumulate (UMAAL) */
6754 tmp
= load_reg(s
, rs
);
6755 tmp2
= load_reg(s
, rm
);
6756 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6757 gen_addq_lo(s
, tmp64
, rn
);
6758 gen_addq_lo(s
, tmp64
, rd
);
6759 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6760 tcg_temp_free_i64(tmp64
);
6762 case 8: case 9: case 10: case 11:
6763 case 12: case 13: case 14: case 15:
6764 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6765 tmp
= load_reg(s
, rs
);
6766 tmp2
= load_reg(s
, rm
);
6767 if (insn
& (1 << 22)) {
6768 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6770 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6772 if (insn
& (1 << 21)) { /* mult accumulate */
6773 gen_addq(s
, tmp64
, rn
, rd
);
6775 if (insn
& (1 << 20)) {
6776 gen_logicq_cc(tmp64
);
6778 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6779 tcg_temp_free_i64(tmp64
);
6785 rn
= (insn
>> 16) & 0xf;
6786 rd
= (insn
>> 12) & 0xf;
6787 if (insn
& (1 << 23)) {
6788 /* load/store exclusive */
6789 op1
= (insn
>> 21) & 0x3;
6794 addr
= tcg_temp_local_new_i32();
6795 load_reg_var(s
, addr
, rn
);
6796 if (insn
& (1 << 20)) {
6799 gen_load_exclusive(s
, rd
, 15, addr
, 2);
6801 case 1: /* ldrexd */
6802 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
6804 case 2: /* ldrexb */
6805 gen_load_exclusive(s
, rd
, 15, addr
, 0);
6807 case 3: /* ldrexh */
6808 gen_load_exclusive(s
, rd
, 15, addr
, 1);
6817 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
6819 case 1: /* strexd */
6820 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
6822 case 2: /* strexb */
6823 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
6825 case 3: /* strexh */
6826 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
6832 tcg_temp_free(addr
);
6834 /* SWP instruction */
6837 /* ??? This is not really atomic. However we know
6838 we never have multiple CPUs running in parallel,
6839 so it is good enough. */
6840 addr
= load_reg(s
, rn
);
6841 tmp
= load_reg(s
, rm
);
6842 if (insn
& (1 << 22)) {
6843 tmp2
= gen_ld8u(addr
, IS_USER(s
));
6844 gen_st8(tmp
, addr
, IS_USER(s
));
6846 tmp2
= gen_ld32(addr
, IS_USER(s
));
6847 gen_st32(tmp
, addr
, IS_USER(s
));
6849 tcg_temp_free_i32(addr
);
6850 store_reg(s
, rd
, tmp2
);
6856 /* Misc load/store */
6857 rn
= (insn
>> 16) & 0xf;
6858 rd
= (insn
>> 12) & 0xf;
6859 addr
= load_reg(s
, rn
);
6860 if (insn
& (1 << 24))
6861 gen_add_datah_offset(s
, insn
, 0, addr
);
6863 if (insn
& (1 << 20)) {
6867 tmp
= gen_ld16u(addr
, IS_USER(s
));
6870 tmp
= gen_ld8s(addr
, IS_USER(s
));
6874 tmp
= gen_ld16s(addr
, IS_USER(s
));
6878 } else if (sh
& 2) {
6882 tmp
= load_reg(s
, rd
);
6883 gen_st32(tmp
, addr
, IS_USER(s
));
6884 tcg_gen_addi_i32(addr
, addr
, 4);
6885 tmp
= load_reg(s
, rd
+ 1);
6886 gen_st32(tmp
, addr
, IS_USER(s
));
6890 tmp
= gen_ld32(addr
, IS_USER(s
));
6891 store_reg(s
, rd
, tmp
);
6892 tcg_gen_addi_i32(addr
, addr
, 4);
6893 tmp
= gen_ld32(addr
, IS_USER(s
));
6897 address_offset
= -4;
6900 tmp
= load_reg(s
, rd
);
6901 gen_st16(tmp
, addr
, IS_USER(s
));
6904 /* Perform base writeback before the loaded value to
6905 ensure correct behavior with overlapping index registers.
6906 ldrd with base writeback is is undefined if the
6907 destination and index registers overlap. */
6908 if (!(insn
& (1 << 24))) {
6909 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
6910 store_reg(s
, rn
, addr
);
6911 } else if (insn
& (1 << 21)) {
6913 tcg_gen_addi_i32(addr
, addr
, address_offset
);
6914 store_reg(s
, rn
, addr
);
6916 tcg_temp_free_i32(addr
);
6919 /* Complete the load. */
6920 store_reg(s
, rd
, tmp
);
6929 if (insn
& (1 << 4)) {
6931 /* Armv6 Media instructions. */
6933 rn
= (insn
>> 16) & 0xf;
6934 rd
= (insn
>> 12) & 0xf;
6935 rs
= (insn
>> 8) & 0xf;
6936 switch ((insn
>> 23) & 3) {
6937 case 0: /* Parallel add/subtract. */
6938 op1
= (insn
>> 20) & 7;
6939 tmp
= load_reg(s
, rn
);
6940 tmp2
= load_reg(s
, rm
);
6941 sh
= (insn
>> 5) & 7;
6942 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
6944 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
6945 tcg_temp_free_i32(tmp2
);
6946 store_reg(s
, rd
, tmp
);
6949 if ((insn
& 0x00700020) == 0) {
6950 /* Halfword pack. */
6951 tmp
= load_reg(s
, rn
);
6952 tmp2
= load_reg(s
, rm
);
6953 shift
= (insn
>> 7) & 0x1f;
6954 if (insn
& (1 << 6)) {
6958 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
6959 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
6960 tcg_gen_ext16u_i32(tmp2
, tmp2
);
6964 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
6965 tcg_gen_ext16u_i32(tmp
, tmp
);
6966 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
6968 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6969 tcg_temp_free_i32(tmp2
);
6970 store_reg(s
, rd
, tmp
);
6971 } else if ((insn
& 0x00200020) == 0x00200000) {
6973 tmp
= load_reg(s
, rm
);
6974 shift
= (insn
>> 7) & 0x1f;
6975 if (insn
& (1 << 6)) {
6978 tcg_gen_sari_i32(tmp
, tmp
, shift
);
6980 tcg_gen_shli_i32(tmp
, tmp
, shift
);
6982 sh
= (insn
>> 16) & 0x1f;
6983 tmp2
= tcg_const_i32(sh
);
6984 if (insn
& (1 << 22))
6985 gen_helper_usat(tmp
, tmp
, tmp2
);
6987 gen_helper_ssat(tmp
, tmp
, tmp2
);
6988 tcg_temp_free_i32(tmp2
);
6989 store_reg(s
, rd
, tmp
);
6990 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
6992 tmp
= load_reg(s
, rm
);
6993 sh
= (insn
>> 16) & 0x1f;
6994 tmp2
= tcg_const_i32(sh
);
6995 if (insn
& (1 << 22))
6996 gen_helper_usat16(tmp
, tmp
, tmp2
);
6998 gen_helper_ssat16(tmp
, tmp
, tmp2
);
6999 tcg_temp_free_i32(tmp2
);
7000 store_reg(s
, rd
, tmp
);
7001 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
7003 tmp
= load_reg(s
, rn
);
7004 tmp2
= load_reg(s
, rm
);
7005 tmp3
= tcg_temp_new_i32();
7006 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7007 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7008 tcg_temp_free_i32(tmp3
);
7009 tcg_temp_free_i32(tmp2
);
7010 store_reg(s
, rd
, tmp
);
7011 } else if ((insn
& 0x000003e0) == 0x00000060) {
7012 tmp
= load_reg(s
, rm
);
7013 shift
= (insn
>> 10) & 3;
7014 /* ??? In many cases it's not neccessary to do a
7015 rotate, a shift is sufficient. */
7017 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7018 op1
= (insn
>> 20) & 7;
7020 case 0: gen_sxtb16(tmp
); break;
7021 case 2: gen_sxtb(tmp
); break;
7022 case 3: gen_sxth(tmp
); break;
7023 case 4: gen_uxtb16(tmp
); break;
7024 case 6: gen_uxtb(tmp
); break;
7025 case 7: gen_uxth(tmp
); break;
7026 default: goto illegal_op
;
7029 tmp2
= load_reg(s
, rn
);
7030 if ((op1
& 3) == 0) {
7031 gen_add16(tmp
, tmp2
);
7033 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7034 tcg_temp_free_i32(tmp2
);
7037 store_reg(s
, rd
, tmp
);
7038 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7040 tmp
= load_reg(s
, rm
);
7041 if (insn
& (1 << 22)) {
7042 if (insn
& (1 << 7)) {
7046 gen_helper_rbit(tmp
, tmp
);
7049 if (insn
& (1 << 7))
7052 tcg_gen_bswap32_i32(tmp
, tmp
);
7054 store_reg(s
, rd
, tmp
);
7059 case 2: /* Multiplies (Type 3). */
7060 tmp
= load_reg(s
, rm
);
7061 tmp2
= load_reg(s
, rs
);
7062 if (insn
& (1 << 20)) {
7063 /* Signed multiply most significant [accumulate].
7064 (SMMUL, SMMLA, SMMLS) */
7065 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7068 tmp
= load_reg(s
, rd
);
7069 if (insn
& (1 << 6)) {
7070 tmp64
= gen_subq_msw(tmp64
, tmp
);
7072 tmp64
= gen_addq_msw(tmp64
, tmp
);
7075 if (insn
& (1 << 5)) {
7076 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7078 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7079 tmp
= tcg_temp_new_i32();
7080 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7081 tcg_temp_free_i64(tmp64
);
7082 store_reg(s
, rn
, tmp
);
7084 if (insn
& (1 << 5))
7085 gen_swap_half(tmp2
);
7086 gen_smul_dual(tmp
, tmp2
);
7087 if (insn
& (1 << 6)) {
7088 /* This subtraction cannot overflow. */
7089 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7091 /* This addition cannot overflow 32 bits;
7092 * however it may overflow considered as a signed
7093 * operation, in which case we must set the Q flag.
7095 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7097 tcg_temp_free_i32(tmp2
);
7098 if (insn
& (1 << 22)) {
7099 /* smlald, smlsld */
7100 tmp64
= tcg_temp_new_i64();
7101 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7102 tcg_temp_free_i32(tmp
);
7103 gen_addq(s
, tmp64
, rd
, rn
);
7104 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7105 tcg_temp_free_i64(tmp64
);
7107 /* smuad, smusd, smlad, smlsd */
7110 tmp2
= load_reg(s
, rd
);
7111 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7112 tcg_temp_free_i32(tmp2
);
7114 store_reg(s
, rn
, tmp
);
7119 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7121 case 0: /* Unsigned sum of absolute differences. */
7123 tmp
= load_reg(s
, rm
);
7124 tmp2
= load_reg(s
, rs
);
7125 gen_helper_usad8(tmp
, tmp
, tmp2
);
7126 tcg_temp_free_i32(tmp2
);
7128 tmp2
= load_reg(s
, rd
);
7129 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7130 tcg_temp_free_i32(tmp2
);
7132 store_reg(s
, rn
, tmp
);
7134 case 0x20: case 0x24: case 0x28: case 0x2c:
7135 /* Bitfield insert/clear. */
7137 shift
= (insn
>> 7) & 0x1f;
7138 i
= (insn
>> 16) & 0x1f;
7141 tmp
= tcg_temp_new_i32();
7142 tcg_gen_movi_i32(tmp
, 0);
7144 tmp
= load_reg(s
, rm
);
7147 tmp2
= load_reg(s
, rd
);
7148 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7149 tcg_temp_free_i32(tmp2
);
7151 store_reg(s
, rd
, tmp
);
7153 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7154 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7156 tmp
= load_reg(s
, rm
);
7157 shift
= (insn
>> 7) & 0x1f;
7158 i
= ((insn
>> 16) & 0x1f) + 1;
7163 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7165 gen_sbfx(tmp
, shift
, i
);
7168 store_reg(s
, rd
, tmp
);
7178 /* Check for undefined extension instructions
7179 * per the ARM Bible IE:
7180 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7182 sh
= (0xf << 20) | (0xf << 4);
7183 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7187 /* load/store byte/word */
7188 rn
= (insn
>> 16) & 0xf;
7189 rd
= (insn
>> 12) & 0xf;
7190 tmp2
= load_reg(s
, rn
);
7191 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7192 if (insn
& (1 << 24))
7193 gen_add_data_offset(s
, insn
, tmp2
);
7194 if (insn
& (1 << 20)) {
7196 if (insn
& (1 << 22)) {
7197 tmp
= gen_ld8u(tmp2
, i
);
7199 tmp
= gen_ld32(tmp2
, i
);
7203 tmp
= load_reg(s
, rd
);
7204 if (insn
& (1 << 22))
7205 gen_st8(tmp
, tmp2
, i
);
7207 gen_st32(tmp
, tmp2
, i
);
7209 if (!(insn
& (1 << 24))) {
7210 gen_add_data_offset(s
, insn
, tmp2
);
7211 store_reg(s
, rn
, tmp2
);
7212 } else if (insn
& (1 << 21)) {
7213 store_reg(s
, rn
, tmp2
);
7215 tcg_temp_free_i32(tmp2
);
7217 if (insn
& (1 << 20)) {
7218 /* Complete the load. */
7222 store_reg(s
, rd
, tmp
);
7228 int j
, n
, user
, loaded_base
;
7230 /* load/store multiple words */
7231 /* XXX: store correct base if write back */
7233 if (insn
& (1 << 22)) {
7235 goto illegal_op
; /* only usable in supervisor mode */
7237 if ((insn
& (1 << 15)) == 0)
7240 rn
= (insn
>> 16) & 0xf;
7241 addr
= load_reg(s
, rn
);
7243 /* compute total size */
7245 TCGV_UNUSED(loaded_var
);
7248 if (insn
& (1 << i
))
7251 /* XXX: test invalid n == 0 case ? */
7252 if (insn
& (1 << 23)) {
7253 if (insn
& (1 << 24)) {
7255 tcg_gen_addi_i32(addr
, addr
, 4);
7257 /* post increment */
7260 if (insn
& (1 << 24)) {
7262 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7264 /* post decrement */
7266 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7271 if (insn
& (1 << i
)) {
7272 if (insn
& (1 << 20)) {
7274 tmp
= gen_ld32(addr
, IS_USER(s
));
7278 tmp2
= tcg_const_i32(i
);
7279 gen_helper_set_user_reg(tmp2
, tmp
);
7280 tcg_temp_free_i32(tmp2
);
7281 tcg_temp_free_i32(tmp
);
7282 } else if (i
== rn
) {
7286 store_reg(s
, i
, tmp
);
7291 /* special case: r15 = PC + 8 */
7292 val
= (long)s
->pc
+ 4;
7293 tmp
= tcg_temp_new_i32();
7294 tcg_gen_movi_i32(tmp
, val
);
7296 tmp
= tcg_temp_new_i32();
7297 tmp2
= tcg_const_i32(i
);
7298 gen_helper_get_user_reg(tmp
, tmp2
);
7299 tcg_temp_free_i32(tmp2
);
7301 tmp
= load_reg(s
, i
);
7303 gen_st32(tmp
, addr
, IS_USER(s
));
7306 /* no need to add after the last transfer */
7308 tcg_gen_addi_i32(addr
, addr
, 4);
7311 if (insn
& (1 << 21)) {
7313 if (insn
& (1 << 23)) {
7314 if (insn
& (1 << 24)) {
7317 /* post increment */
7318 tcg_gen_addi_i32(addr
, addr
, 4);
7321 if (insn
& (1 << 24)) {
7324 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7326 /* post decrement */
7327 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7330 store_reg(s
, rn
, addr
);
7332 tcg_temp_free_i32(addr
);
7335 store_reg(s
, rn
, loaded_var
);
7337 if ((insn
& (1 << 22)) && !user
) {
7338 /* Restore CPSR from SPSR. */
7339 tmp
= load_cpu_field(spsr
);
7340 gen_set_cpsr(tmp
, 0xffffffff);
7341 tcg_temp_free_i32(tmp
);
7342 s
->is_jmp
= DISAS_UPDATE
;
7351 /* branch (and link) */
7352 val
= (int32_t)s
->pc
;
7353 if (insn
& (1 << 24)) {
7354 tmp
= tcg_temp_new_i32();
7355 tcg_gen_movi_i32(tmp
, val
);
7356 store_reg(s
, 14, tmp
);
7358 offset
= (((int32_t)insn
<< 8) >> 8);
7359 val
+= (offset
<< 2) + 4;
7367 if (disas_coproc_insn(env
, s
, insn
))
7372 gen_set_pc_im(s
->pc
);
7373 s
->is_jmp
= DISAS_SWI
;
7377 gen_exception_insn(s
, 4, EXCP_UDEF
);
7383 /* Return true if this is a Thumb-2 logical op. */
7385 thumb2_logic_op(int op
)
7390 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7391 then set condition code flags based on the result of the operation.
7392 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7393 to the high bit of T1.
7394 Returns zero if the opcode is valid. */
7397 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7404 tcg_gen_and_i32(t0
, t0
, t1
);
7408 tcg_gen_andc_i32(t0
, t0
, t1
);
7412 tcg_gen_or_i32(t0
, t0
, t1
);
7416 tcg_gen_orc_i32(t0
, t0
, t1
);
7420 tcg_gen_xor_i32(t0
, t0
, t1
);
7425 gen_helper_add_cc(t0
, t0
, t1
);
7427 tcg_gen_add_i32(t0
, t0
, t1
);
7431 gen_helper_adc_cc(t0
, t0
, t1
);
7437 gen_helper_sbc_cc(t0
, t0
, t1
);
7439 gen_sub_carry(t0
, t0
, t1
);
7443 gen_helper_sub_cc(t0
, t0
, t1
);
7445 tcg_gen_sub_i32(t0
, t0
, t1
);
7449 gen_helper_sub_cc(t0
, t1
, t0
);
7451 tcg_gen_sub_i32(t0
, t1
, t0
);
7453 default: /* 5, 6, 7, 9, 12, 15. */
7459 gen_set_CF_bit31(t1
);
7464 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7466 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7468 uint32_t insn
, imm
, shift
, offset
;
7469 uint32_t rd
, rn
, rm
, rs
;
7480 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7481 || arm_feature (env
, ARM_FEATURE_M
))) {
7482 /* Thumb-1 cores may need to treat bl and blx as a pair of
7483 16-bit instructions to get correct prefetch abort behavior. */
7485 if ((insn
& (1 << 12)) == 0) {
7486 /* Second half of blx. */
7487 offset
= ((insn
& 0x7ff) << 1);
7488 tmp
= load_reg(s
, 14);
7489 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7490 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7492 tmp2
= tcg_temp_new_i32();
7493 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7494 store_reg(s
, 14, tmp2
);
7498 if (insn
& (1 << 11)) {
7499 /* Second half of bl. */
7500 offset
= ((insn
& 0x7ff) << 1) | 1;
7501 tmp
= load_reg(s
, 14);
7502 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7504 tmp2
= tcg_temp_new_i32();
7505 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7506 store_reg(s
, 14, tmp2
);
7510 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7511 /* Instruction spans a page boundary. Implement it as two
7512 16-bit instructions in case the second half causes an
7514 offset
= ((int32_t)insn
<< 21) >> 9;
7515 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7518 /* Fall through to 32-bit decode. */
7521 insn
= lduw_code(s
->pc
);
7523 insn
|= (uint32_t)insn_hw1
<< 16;
7525 if ((insn
& 0xf800e800) != 0xf000e800) {
7529 rn
= (insn
>> 16) & 0xf;
7530 rs
= (insn
>> 12) & 0xf;
7531 rd
= (insn
>> 8) & 0xf;
7533 switch ((insn
>> 25) & 0xf) {
7534 case 0: case 1: case 2: case 3:
7535 /* 16-bit instructions. Should never happen. */
7538 if (insn
& (1 << 22)) {
7539 /* Other load/store, table branch. */
7540 if (insn
& 0x01200000) {
7541 /* Load/store doubleword. */
7543 addr
= tcg_temp_new_i32();
7544 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7546 addr
= load_reg(s
, rn
);
7548 offset
= (insn
& 0xff) * 4;
7549 if ((insn
& (1 << 23)) == 0)
7551 if (insn
& (1 << 24)) {
7552 tcg_gen_addi_i32(addr
, addr
, offset
);
7555 if (insn
& (1 << 20)) {
7557 tmp
= gen_ld32(addr
, IS_USER(s
));
7558 store_reg(s
, rs
, tmp
);
7559 tcg_gen_addi_i32(addr
, addr
, 4);
7560 tmp
= gen_ld32(addr
, IS_USER(s
));
7561 store_reg(s
, rd
, tmp
);
7564 tmp
= load_reg(s
, rs
);
7565 gen_st32(tmp
, addr
, IS_USER(s
));
7566 tcg_gen_addi_i32(addr
, addr
, 4);
7567 tmp
= load_reg(s
, rd
);
7568 gen_st32(tmp
, addr
, IS_USER(s
));
7570 if (insn
& (1 << 21)) {
7571 /* Base writeback. */
7574 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7575 store_reg(s
, rn
, addr
);
7577 tcg_temp_free_i32(addr
);
7579 } else if ((insn
& (1 << 23)) == 0) {
7580 /* Load/store exclusive word. */
7581 addr
= tcg_temp_local_new();
7582 load_reg_var(s
, addr
, rn
);
7583 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
7584 if (insn
& (1 << 20)) {
7585 gen_load_exclusive(s
, rs
, 15, addr
, 2);
7587 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
7589 tcg_temp_free(addr
);
7590 } else if ((insn
& (1 << 6)) == 0) {
7593 addr
= tcg_temp_new_i32();
7594 tcg_gen_movi_i32(addr
, s
->pc
);
7596 addr
= load_reg(s
, rn
);
7598 tmp
= load_reg(s
, rm
);
7599 tcg_gen_add_i32(addr
, addr
, tmp
);
7600 if (insn
& (1 << 4)) {
7602 tcg_gen_add_i32(addr
, addr
, tmp
);
7603 tcg_temp_free_i32(tmp
);
7604 tmp
= gen_ld16u(addr
, IS_USER(s
));
7606 tcg_temp_free_i32(tmp
);
7607 tmp
= gen_ld8u(addr
, IS_USER(s
));
7609 tcg_temp_free_i32(addr
);
7610 tcg_gen_shli_i32(tmp
, tmp
, 1);
7611 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
7612 store_reg(s
, 15, tmp
);
7614 /* Load/store exclusive byte/halfword/doubleword. */
7616 op
= (insn
>> 4) & 0x3;
7620 addr
= tcg_temp_local_new();
7621 load_reg_var(s
, addr
, rn
);
7622 if (insn
& (1 << 20)) {
7623 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
7625 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
7627 tcg_temp_free(addr
);
7630 /* Load/store multiple, RFE, SRS. */
7631 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
7632 /* Not available in user mode. */
7635 if (insn
& (1 << 20)) {
7637 addr
= load_reg(s
, rn
);
7638 if ((insn
& (1 << 24)) == 0)
7639 tcg_gen_addi_i32(addr
, addr
, -8);
7640 /* Load PC into tmp and CPSR into tmp2. */
7641 tmp
= gen_ld32(addr
, 0);
7642 tcg_gen_addi_i32(addr
, addr
, 4);
7643 tmp2
= gen_ld32(addr
, 0);
7644 if (insn
& (1 << 21)) {
7645 /* Base writeback. */
7646 if (insn
& (1 << 24)) {
7647 tcg_gen_addi_i32(addr
, addr
, 4);
7649 tcg_gen_addi_i32(addr
, addr
, -4);
7651 store_reg(s
, rn
, addr
);
7653 tcg_temp_free_i32(addr
);
7655 gen_rfe(s
, tmp
, tmp2
);
7659 addr
= tcg_temp_new_i32();
7660 tmp
= tcg_const_i32(op
);
7661 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7662 tcg_temp_free_i32(tmp
);
7663 if ((insn
& (1 << 24)) == 0) {
7664 tcg_gen_addi_i32(addr
, addr
, -8);
7666 tmp
= load_reg(s
, 14);
7667 gen_st32(tmp
, addr
, 0);
7668 tcg_gen_addi_i32(addr
, addr
, 4);
7669 tmp
= tcg_temp_new_i32();
7670 gen_helper_cpsr_read(tmp
);
7671 gen_st32(tmp
, addr
, 0);
7672 if (insn
& (1 << 21)) {
7673 if ((insn
& (1 << 24)) == 0) {
7674 tcg_gen_addi_i32(addr
, addr
, -4);
7676 tcg_gen_addi_i32(addr
, addr
, 4);
7678 tmp
= tcg_const_i32(op
);
7679 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7680 tcg_temp_free_i32(tmp
);
7682 tcg_temp_free_i32(addr
);
7687 /* Load/store multiple. */
7688 addr
= load_reg(s
, rn
);
7690 for (i
= 0; i
< 16; i
++) {
7691 if (insn
& (1 << i
))
7694 if (insn
& (1 << 24)) {
7695 tcg_gen_addi_i32(addr
, addr
, -offset
);
7698 for (i
= 0; i
< 16; i
++) {
7699 if ((insn
& (1 << i
)) == 0)
7701 if (insn
& (1 << 20)) {
7703 tmp
= gen_ld32(addr
, IS_USER(s
));
7707 store_reg(s
, i
, tmp
);
7711 tmp
= load_reg(s
, i
);
7712 gen_st32(tmp
, addr
, IS_USER(s
));
7714 tcg_gen_addi_i32(addr
, addr
, 4);
7716 if (insn
& (1 << 21)) {
7717 /* Base register writeback. */
7718 if (insn
& (1 << 24)) {
7719 tcg_gen_addi_i32(addr
, addr
, -offset
);
7721 /* Fault if writeback register is in register list. */
7722 if (insn
& (1 << rn
))
7724 store_reg(s
, rn
, addr
);
7726 tcg_temp_free_i32(addr
);
7733 op
= (insn
>> 21) & 0xf;
7735 /* Halfword pack. */
7736 tmp
= load_reg(s
, rn
);
7737 tmp2
= load_reg(s
, rm
);
7738 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
7739 if (insn
& (1 << 5)) {
7743 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7744 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7745 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7749 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7750 tcg_gen_ext16u_i32(tmp
, tmp
);
7751 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7753 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7754 tcg_temp_free_i32(tmp2
);
7755 store_reg(s
, rd
, tmp
);
7757 /* Data processing register constant shift. */
7759 tmp
= tcg_temp_new_i32();
7760 tcg_gen_movi_i32(tmp
, 0);
7762 tmp
= load_reg(s
, rn
);
7764 tmp2
= load_reg(s
, rm
);
7766 shiftop
= (insn
>> 4) & 3;
7767 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7768 conds
= (insn
& (1 << 20)) != 0;
7769 logic_cc
= (conds
&& thumb2_logic_op(op
));
7770 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7771 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
7773 tcg_temp_free_i32(tmp2
);
7775 store_reg(s
, rd
, tmp
);
7777 tcg_temp_free_i32(tmp
);
7781 case 13: /* Misc data processing. */
7782 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
7783 if (op
< 4 && (insn
& 0xf000) != 0xf000)
7786 case 0: /* Register controlled shift. */
7787 tmp
= load_reg(s
, rn
);
7788 tmp2
= load_reg(s
, rm
);
7789 if ((insn
& 0x70) != 0)
7791 op
= (insn
>> 21) & 3;
7792 logic_cc
= (insn
& (1 << 20)) != 0;
7793 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
7796 store_reg_bx(env
, s
, rd
, tmp
);
7798 case 1: /* Sign/zero extend. */
7799 tmp
= load_reg(s
, rm
);
7800 shift
= (insn
>> 4) & 3;
7801 /* ??? In many cases it's not neccessary to do a
7802 rotate, a shift is sufficient. */
7804 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7805 op
= (insn
>> 20) & 7;
7807 case 0: gen_sxth(tmp
); break;
7808 case 1: gen_uxth(tmp
); break;
7809 case 2: gen_sxtb16(tmp
); break;
7810 case 3: gen_uxtb16(tmp
); break;
7811 case 4: gen_sxtb(tmp
); break;
7812 case 5: gen_uxtb(tmp
); break;
7813 default: goto illegal_op
;
7816 tmp2
= load_reg(s
, rn
);
7817 if ((op
>> 1) == 1) {
7818 gen_add16(tmp
, tmp2
);
7820 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7821 tcg_temp_free_i32(tmp2
);
7824 store_reg(s
, rd
, tmp
);
7826 case 2: /* SIMD add/subtract. */
7827 op
= (insn
>> 20) & 7;
7828 shift
= (insn
>> 4) & 7;
7829 if ((op
& 3) == 3 || (shift
& 3) == 3)
7831 tmp
= load_reg(s
, rn
);
7832 tmp2
= load_reg(s
, rm
);
7833 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
7834 tcg_temp_free_i32(tmp2
);
7835 store_reg(s
, rd
, tmp
);
7837 case 3: /* Other data processing. */
7838 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
7840 /* Saturating add/subtract. */
7841 tmp
= load_reg(s
, rn
);
7842 tmp2
= load_reg(s
, rm
);
7844 gen_helper_double_saturate(tmp
, tmp
);
7846 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
7848 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
7849 tcg_temp_free_i32(tmp2
);
7851 tmp
= load_reg(s
, rn
);
7853 case 0x0a: /* rbit */
7854 gen_helper_rbit(tmp
, tmp
);
7856 case 0x08: /* rev */
7857 tcg_gen_bswap32_i32(tmp
, tmp
);
7859 case 0x09: /* rev16 */
7862 case 0x0b: /* revsh */
7865 case 0x10: /* sel */
7866 tmp2
= load_reg(s
, rm
);
7867 tmp3
= tcg_temp_new_i32();
7868 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7869 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7870 tcg_temp_free_i32(tmp3
);
7871 tcg_temp_free_i32(tmp2
);
7873 case 0x18: /* clz */
7874 gen_helper_clz(tmp
, tmp
);
7880 store_reg(s
, rd
, tmp
);
7882 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7883 op
= (insn
>> 4) & 0xf;
7884 tmp
= load_reg(s
, rn
);
7885 tmp2
= load_reg(s
, rm
);
7886 switch ((insn
>> 20) & 7) {
7887 case 0: /* 32 x 32 -> 32 */
7888 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7889 tcg_temp_free_i32(tmp2
);
7891 tmp2
= load_reg(s
, rs
);
7893 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7895 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7896 tcg_temp_free_i32(tmp2
);
7899 case 1: /* 16 x 16 -> 32 */
7900 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7901 tcg_temp_free_i32(tmp2
);
7903 tmp2
= load_reg(s
, rs
);
7904 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7905 tcg_temp_free_i32(tmp2
);
7908 case 2: /* Dual multiply add. */
7909 case 4: /* Dual multiply subtract. */
7911 gen_swap_half(tmp2
);
7912 gen_smul_dual(tmp
, tmp2
);
7913 if (insn
& (1 << 22)) {
7914 /* This subtraction cannot overflow. */
7915 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7917 /* This addition cannot overflow 32 bits;
7918 * however it may overflow considered as a signed
7919 * operation, in which case we must set the Q flag.
7921 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7923 tcg_temp_free_i32(tmp2
);
7926 tmp2
= load_reg(s
, rs
);
7927 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7928 tcg_temp_free_i32(tmp2
);
7931 case 3: /* 32 * 16 -> 32msb */
7933 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7936 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7937 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7938 tmp
= tcg_temp_new_i32();
7939 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7940 tcg_temp_free_i64(tmp64
);
7943 tmp2
= load_reg(s
, rs
);
7944 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7945 tcg_temp_free_i32(tmp2
);
7948 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7949 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7951 tmp
= load_reg(s
, rs
);
7952 if (insn
& (1 << 20)) {
7953 tmp64
= gen_addq_msw(tmp64
, tmp
);
7955 tmp64
= gen_subq_msw(tmp64
, tmp
);
7958 if (insn
& (1 << 4)) {
7959 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7961 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7962 tmp
= tcg_temp_new_i32();
7963 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7964 tcg_temp_free_i64(tmp64
);
7966 case 7: /* Unsigned sum of absolute differences. */
7967 gen_helper_usad8(tmp
, tmp
, tmp2
);
7968 tcg_temp_free_i32(tmp2
);
7970 tmp2
= load_reg(s
, rs
);
7971 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7972 tcg_temp_free_i32(tmp2
);
7976 store_reg(s
, rd
, tmp
);
7978 case 6: case 7: /* 64-bit multiply, Divide. */
7979 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
7980 tmp
= load_reg(s
, rn
);
7981 tmp2
= load_reg(s
, rm
);
7982 if ((op
& 0x50) == 0x10) {
7984 if (!arm_feature(env
, ARM_FEATURE_DIV
))
7987 gen_helper_udiv(tmp
, tmp
, tmp2
);
7989 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7990 tcg_temp_free_i32(tmp2
);
7991 store_reg(s
, rd
, tmp
);
7992 } else if ((op
& 0xe) == 0xc) {
7993 /* Dual multiply accumulate long. */
7995 gen_swap_half(tmp2
);
7996 gen_smul_dual(tmp
, tmp2
);
7998 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8000 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8002 tcg_temp_free_i32(tmp2
);
8004 tmp64
= tcg_temp_new_i64();
8005 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8006 tcg_temp_free_i32(tmp
);
8007 gen_addq(s
, tmp64
, rs
, rd
);
8008 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8009 tcg_temp_free_i64(tmp64
);
8012 /* Unsigned 64-bit multiply */
8013 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8017 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8018 tcg_temp_free_i32(tmp2
);
8019 tmp64
= tcg_temp_new_i64();
8020 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8021 tcg_temp_free_i32(tmp
);
8023 /* Signed 64-bit multiply */
8024 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8029 gen_addq_lo(s
, tmp64
, rs
);
8030 gen_addq_lo(s
, tmp64
, rd
);
8031 } else if (op
& 0x40) {
8032 /* 64-bit accumulate. */
8033 gen_addq(s
, tmp64
, rs
, rd
);
8035 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8036 tcg_temp_free_i64(tmp64
);
8041 case 6: case 7: case 14: case 15:
8043 if (((insn
>> 24) & 3) == 3) {
8044 /* Translate into the equivalent ARM encoding. */
8045 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8046 if (disas_neon_data_insn(env
, s
, insn
))
8049 if (insn
& (1 << 28))
8051 if (disas_coproc_insn (env
, s
, insn
))
8055 case 8: case 9: case 10: case 11:
8056 if (insn
& (1 << 15)) {
8057 /* Branches, misc control. */
8058 if (insn
& 0x5000) {
8059 /* Unconditional branch. */
8060 /* signextend(hw1[10:0]) -> offset[:12]. */
8061 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8062 /* hw1[10:0] -> offset[11:1]. */
8063 offset
|= (insn
& 0x7ff) << 1;
8064 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8065 offset[24:22] already have the same value because of the
8066 sign extension above. */
8067 offset
^= ((~insn
) & (1 << 13)) << 10;
8068 offset
^= ((~insn
) & (1 << 11)) << 11;
8070 if (insn
& (1 << 14)) {
8071 /* Branch and link. */
8072 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8076 if (insn
& (1 << 12)) {
8081 offset
&= ~(uint32_t)2;
8082 gen_bx_im(s
, offset
);
8084 } else if (((insn
>> 23) & 7) == 7) {
8086 if (insn
& (1 << 13))
8089 if (insn
& (1 << 26)) {
8090 /* Secure monitor call (v6Z) */
8091 goto illegal_op
; /* not implemented. */
8093 op
= (insn
>> 20) & 7;
8095 case 0: /* msr cpsr. */
8097 tmp
= load_reg(s
, rn
);
8098 addr
= tcg_const_i32(insn
& 0xff);
8099 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8100 tcg_temp_free_i32(addr
);
8101 tcg_temp_free_i32(tmp
);
8106 case 1: /* msr spsr. */
8109 tmp
= load_reg(s
, rn
);
8111 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8115 case 2: /* cps, nop-hint. */
8116 if (((insn
>> 8) & 7) == 0) {
8117 gen_nop_hint(s
, insn
& 0xff);
8119 /* Implemented as NOP in user mode. */
8124 if (insn
& (1 << 10)) {
8125 if (insn
& (1 << 7))
8127 if (insn
& (1 << 6))
8129 if (insn
& (1 << 5))
8131 if (insn
& (1 << 9))
8132 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8134 if (insn
& (1 << 8)) {
8136 imm
|= (insn
& 0x1f);
8139 gen_set_psr_im(s
, offset
, 0, imm
);
8142 case 3: /* Special control operations. */
8144 op
= (insn
>> 4) & 0xf;
8152 /* These execute as NOPs. */
8159 /* Trivial implementation equivalent to bx. */
8160 tmp
= load_reg(s
, rn
);
8163 case 5: /* Exception return. */
8167 if (rn
!= 14 || rd
!= 15) {
8170 tmp
= load_reg(s
, rn
);
8171 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8172 gen_exception_return(s
, tmp
);
8174 case 6: /* mrs cpsr. */
8175 tmp
= tcg_temp_new_i32();
8177 addr
= tcg_const_i32(insn
& 0xff);
8178 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8179 tcg_temp_free_i32(addr
);
8181 gen_helper_cpsr_read(tmp
);
8183 store_reg(s
, rd
, tmp
);
8185 case 7: /* mrs spsr. */
8186 /* Not accessible in user mode. */
8187 if (IS_USER(s
) || IS_M(env
))
8189 tmp
= load_cpu_field(spsr
);
8190 store_reg(s
, rd
, tmp
);
8195 /* Conditional branch. */
8196 op
= (insn
>> 22) & 0xf;
8197 /* Generate a conditional jump to next instruction. */
8198 s
->condlabel
= gen_new_label();
8199 gen_test_cc(op
^ 1, s
->condlabel
);
8202 /* offset[11:1] = insn[10:0] */
8203 offset
= (insn
& 0x7ff) << 1;
8204 /* offset[17:12] = insn[21:16]. */
8205 offset
|= (insn
& 0x003f0000) >> 4;
8206 /* offset[31:20] = insn[26]. */
8207 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8208 /* offset[18] = insn[13]. */
8209 offset
|= (insn
& (1 << 13)) << 5;
8210 /* offset[19] = insn[11]. */
8211 offset
|= (insn
& (1 << 11)) << 8;
8213 /* jump to the offset */
8214 gen_jmp(s
, s
->pc
+ offset
);
8217 /* Data processing immediate. */
8218 if (insn
& (1 << 25)) {
8219 if (insn
& (1 << 24)) {
8220 if (insn
& (1 << 20))
8222 /* Bitfield/Saturate. */
8223 op
= (insn
>> 21) & 7;
8225 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8227 tmp
= tcg_temp_new_i32();
8228 tcg_gen_movi_i32(tmp
, 0);
8230 tmp
= load_reg(s
, rn
);
8233 case 2: /* Signed bitfield extract. */
8235 if (shift
+ imm
> 32)
8238 gen_sbfx(tmp
, shift
, imm
);
8240 case 6: /* Unsigned bitfield extract. */
8242 if (shift
+ imm
> 32)
8245 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8247 case 3: /* Bitfield insert/clear. */
8250 imm
= imm
+ 1 - shift
;
8252 tmp2
= load_reg(s
, rd
);
8253 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8254 tcg_temp_free_i32(tmp2
);
8259 default: /* Saturate. */
8262 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8264 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8266 tmp2
= tcg_const_i32(imm
);
8269 if ((op
& 1) && shift
== 0)
8270 gen_helper_usat16(tmp
, tmp
, tmp2
);
8272 gen_helper_usat(tmp
, tmp
, tmp2
);
8275 if ((op
& 1) && shift
== 0)
8276 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8278 gen_helper_ssat(tmp
, tmp
, tmp2
);
8280 tcg_temp_free_i32(tmp2
);
8283 store_reg(s
, rd
, tmp
);
8285 imm
= ((insn
& 0x04000000) >> 15)
8286 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8287 if (insn
& (1 << 22)) {
8288 /* 16-bit immediate. */
8289 imm
|= (insn
>> 4) & 0xf000;
8290 if (insn
& (1 << 23)) {
8292 tmp
= load_reg(s
, rd
);
8293 tcg_gen_ext16u_i32(tmp
, tmp
);
8294 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8297 tmp
= tcg_temp_new_i32();
8298 tcg_gen_movi_i32(tmp
, imm
);
8301 /* Add/sub 12-bit immediate. */
8303 offset
= s
->pc
& ~(uint32_t)3;
8304 if (insn
& (1 << 23))
8308 tmp
= tcg_temp_new_i32();
8309 tcg_gen_movi_i32(tmp
, offset
);
8311 tmp
= load_reg(s
, rn
);
8312 if (insn
& (1 << 23))
8313 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8315 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8318 store_reg(s
, rd
, tmp
);
8321 int shifter_out
= 0;
8322 /* modified 12-bit immediate. */
8323 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8324 imm
= (insn
& 0xff);
8327 /* Nothing to do. */
8329 case 1: /* 00XY00XY */
8332 case 2: /* XY00XY00 */
8336 case 3: /* XYXYXYXY */
8340 default: /* Rotated constant. */
8341 shift
= (shift
<< 1) | (imm
>> 7);
8343 imm
= imm
<< (32 - shift
);
8347 tmp2
= tcg_temp_new_i32();
8348 tcg_gen_movi_i32(tmp2
, imm
);
8349 rn
= (insn
>> 16) & 0xf;
8351 tmp
= tcg_temp_new_i32();
8352 tcg_gen_movi_i32(tmp
, 0);
8354 tmp
= load_reg(s
, rn
);
8356 op
= (insn
>> 21) & 0xf;
8357 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8358 shifter_out
, tmp
, tmp2
))
8360 tcg_temp_free_i32(tmp2
);
8361 rd
= (insn
>> 8) & 0xf;
8363 store_reg(s
, rd
, tmp
);
8365 tcg_temp_free_i32(tmp
);
8370 case 12: /* Load/store single data item. */
8375 if ((insn
& 0x01100000) == 0x01000000) {
8376 if (disas_neon_ls_insn(env
, s
, insn
))
8380 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8382 if (!(insn
& (1 << 20))) {
8386 /* Byte or halfword load space with dest == r15 : memory hints.
8387 * Catch them early so we don't emit pointless addressing code.
8388 * This space is a mix of:
8389 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8390 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8392 * unallocated hints, which must be treated as NOPs
8393 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8394 * which is easiest for the decoding logic
8395 * Some space which must UNDEF
8397 int op1
= (insn
>> 23) & 3;
8398 int op2
= (insn
>> 6) & 0x3f;
8403 /* UNPREDICTABLE or unallocated hint */
8407 return 0; /* PLD* or unallocated hint */
8409 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
8410 return 0; /* PLD* or unallocated hint */
8412 /* UNDEF space, or an UNPREDICTABLE */
8418 addr
= tcg_temp_new_i32();
8420 /* s->pc has already been incremented by 4. */
8421 imm
= s
->pc
& 0xfffffffc;
8422 if (insn
& (1 << 23))
8423 imm
+= insn
& 0xfff;
8425 imm
-= insn
& 0xfff;
8426 tcg_gen_movi_i32(addr
, imm
);
8428 addr
= load_reg(s
, rn
);
8429 if (insn
& (1 << 23)) {
8430 /* Positive offset. */
8432 tcg_gen_addi_i32(addr
, addr
, imm
);
8435 switch ((insn
>> 8) & 0xf) {
8436 case 0x0: /* Shifted Register. */
8437 shift
= (insn
>> 4) & 0xf;
8439 tcg_temp_free_i32(addr
);
8442 tmp
= load_reg(s
, rm
);
8444 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8445 tcg_gen_add_i32(addr
, addr
, tmp
);
8446 tcg_temp_free_i32(tmp
);
8448 case 0xc: /* Negative offset. */
8449 tcg_gen_addi_i32(addr
, addr
, -imm
);
8451 case 0xe: /* User privilege. */
8452 tcg_gen_addi_i32(addr
, addr
, imm
);
8455 case 0x9: /* Post-decrement. */
8458 case 0xb: /* Post-increment. */
8462 case 0xd: /* Pre-decrement. */
8465 case 0xf: /* Pre-increment. */
8466 tcg_gen_addi_i32(addr
, addr
, imm
);
8470 tcg_temp_free_i32(addr
);
8475 if (insn
& (1 << 20)) {
8478 case 0: tmp
= gen_ld8u(addr
, user
); break;
8479 case 4: tmp
= gen_ld8s(addr
, user
); break;
8480 case 1: tmp
= gen_ld16u(addr
, user
); break;
8481 case 5: tmp
= gen_ld16s(addr
, user
); break;
8482 case 2: tmp
= gen_ld32(addr
, user
); break;
8484 tcg_temp_free_i32(addr
);
8490 store_reg(s
, rs
, tmp
);
8494 tmp
= load_reg(s
, rs
);
8496 case 0: gen_st8(tmp
, addr
, user
); break;
8497 case 1: gen_st16(tmp
, addr
, user
); break;
8498 case 2: gen_st32(tmp
, addr
, user
); break;
8500 tcg_temp_free_i32(addr
);
8505 tcg_gen_addi_i32(addr
, addr
, imm
);
8507 store_reg(s
, rn
, addr
);
8509 tcg_temp_free_i32(addr
);
8521 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
8523 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8530 if (s
->condexec_mask
) {
8531 cond
= s
->condexec_cond
;
8532 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
8533 s
->condlabel
= gen_new_label();
8534 gen_test_cc(cond
^ 1, s
->condlabel
);
8539 insn
= lduw_code(s
->pc
);
8542 switch (insn
>> 12) {
8546 op
= (insn
>> 11) & 3;
8549 rn
= (insn
>> 3) & 7;
8550 tmp
= load_reg(s
, rn
);
8551 if (insn
& (1 << 10)) {
8553 tmp2
= tcg_temp_new_i32();
8554 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
8557 rm
= (insn
>> 6) & 7;
8558 tmp2
= load_reg(s
, rm
);
8560 if (insn
& (1 << 9)) {
8561 if (s
->condexec_mask
)
8562 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8564 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8566 if (s
->condexec_mask
)
8567 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8569 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8571 tcg_temp_free_i32(tmp2
);
8572 store_reg(s
, rd
, tmp
);
8574 /* shift immediate */
8575 rm
= (insn
>> 3) & 7;
8576 shift
= (insn
>> 6) & 0x1f;
8577 tmp
= load_reg(s
, rm
);
8578 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
8579 if (!s
->condexec_mask
)
8581 store_reg(s
, rd
, tmp
);
8585 /* arithmetic large immediate */
8586 op
= (insn
>> 11) & 3;
8587 rd
= (insn
>> 8) & 0x7;
8588 if (op
== 0) { /* mov */
8589 tmp
= tcg_temp_new_i32();
8590 tcg_gen_movi_i32(tmp
, insn
& 0xff);
8591 if (!s
->condexec_mask
)
8593 store_reg(s
, rd
, tmp
);
8595 tmp
= load_reg(s
, rd
);
8596 tmp2
= tcg_temp_new_i32();
8597 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
8600 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8601 tcg_temp_free_i32(tmp
);
8602 tcg_temp_free_i32(tmp2
);
8605 if (s
->condexec_mask
)
8606 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8608 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8609 tcg_temp_free_i32(tmp2
);
8610 store_reg(s
, rd
, tmp
);
8613 if (s
->condexec_mask
)
8614 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8616 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8617 tcg_temp_free_i32(tmp2
);
8618 store_reg(s
, rd
, tmp
);
8624 if (insn
& (1 << 11)) {
8625 rd
= (insn
>> 8) & 7;
8626 /* load pc-relative. Bit 1 of PC is ignored. */
8627 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
8628 val
&= ~(uint32_t)2;
8629 addr
= tcg_temp_new_i32();
8630 tcg_gen_movi_i32(addr
, val
);
8631 tmp
= gen_ld32(addr
, IS_USER(s
));
8632 tcg_temp_free_i32(addr
);
8633 store_reg(s
, rd
, tmp
);
8636 if (insn
& (1 << 10)) {
8637 /* data processing extended or blx */
8638 rd
= (insn
& 7) | ((insn
>> 4) & 8);
8639 rm
= (insn
>> 3) & 0xf;
8640 op
= (insn
>> 8) & 3;
8643 tmp
= load_reg(s
, rd
);
8644 tmp2
= load_reg(s
, rm
);
8645 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8646 tcg_temp_free_i32(tmp2
);
8647 store_reg(s
, rd
, tmp
);
8650 tmp
= load_reg(s
, rd
);
8651 tmp2
= load_reg(s
, rm
);
8652 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8653 tcg_temp_free_i32(tmp2
);
8654 tcg_temp_free_i32(tmp
);
8656 case 2: /* mov/cpy */
8657 tmp
= load_reg(s
, rm
);
8658 store_reg(s
, rd
, tmp
);
8660 case 3:/* branch [and link] exchange thumb register */
8661 tmp
= load_reg(s
, rm
);
8662 if (insn
& (1 << 7)) {
8663 val
= (uint32_t)s
->pc
| 1;
8664 tmp2
= tcg_temp_new_i32();
8665 tcg_gen_movi_i32(tmp2
, val
);
8666 store_reg(s
, 14, tmp2
);
8674 /* data processing register */
8676 rm
= (insn
>> 3) & 7;
8677 op
= (insn
>> 6) & 0xf;
8678 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
8679 /* the shift/rotate ops want the operands backwards */
8688 if (op
== 9) { /* neg */
8689 tmp
= tcg_temp_new_i32();
8690 tcg_gen_movi_i32(tmp
, 0);
8691 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
8692 tmp
= load_reg(s
, rd
);
8697 tmp2
= load_reg(s
, rm
);
8700 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8701 if (!s
->condexec_mask
)
8705 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8706 if (!s
->condexec_mask
)
8710 if (s
->condexec_mask
) {
8711 gen_helper_shl(tmp2
, tmp2
, tmp
);
8713 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
8718 if (s
->condexec_mask
) {
8719 gen_helper_shr(tmp2
, tmp2
, tmp
);
8721 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
8726 if (s
->condexec_mask
) {
8727 gen_helper_sar(tmp2
, tmp2
, tmp
);
8729 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
8734 if (s
->condexec_mask
)
8737 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
8740 if (s
->condexec_mask
)
8741 gen_sub_carry(tmp
, tmp
, tmp2
);
8743 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
8746 if (s
->condexec_mask
) {
8747 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
8748 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
8750 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
8755 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8760 if (s
->condexec_mask
)
8761 tcg_gen_neg_i32(tmp
, tmp2
);
8763 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8766 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8770 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8774 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8775 if (!s
->condexec_mask
)
8779 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8780 if (!s
->condexec_mask
)
8784 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8785 if (!s
->condexec_mask
)
8789 tcg_gen_not_i32(tmp2
, tmp2
);
8790 if (!s
->condexec_mask
)
8798 store_reg(s
, rm
, tmp2
);
8800 tcg_temp_free_i32(tmp
);
8802 store_reg(s
, rd
, tmp
);
8803 tcg_temp_free_i32(tmp2
);
8806 tcg_temp_free_i32(tmp
);
8807 tcg_temp_free_i32(tmp2
);
8812 /* load/store register offset. */
8814 rn
= (insn
>> 3) & 7;
8815 rm
= (insn
>> 6) & 7;
8816 op
= (insn
>> 9) & 7;
8817 addr
= load_reg(s
, rn
);
8818 tmp
= load_reg(s
, rm
);
8819 tcg_gen_add_i32(addr
, addr
, tmp
);
8820 tcg_temp_free_i32(tmp
);
8822 if (op
< 3) /* store */
8823 tmp
= load_reg(s
, rd
);
8827 gen_st32(tmp
, addr
, IS_USER(s
));
8830 gen_st16(tmp
, addr
, IS_USER(s
));
8833 gen_st8(tmp
, addr
, IS_USER(s
));
8836 tmp
= gen_ld8s(addr
, IS_USER(s
));
8839 tmp
= gen_ld32(addr
, IS_USER(s
));
8842 tmp
= gen_ld16u(addr
, IS_USER(s
));
8845 tmp
= gen_ld8u(addr
, IS_USER(s
));
8848 tmp
= gen_ld16s(addr
, IS_USER(s
));
8851 if (op
>= 3) /* load */
8852 store_reg(s
, rd
, tmp
);
8853 tcg_temp_free_i32(addr
);
8857 /* load/store word immediate offset */
8859 rn
= (insn
>> 3) & 7;
8860 addr
= load_reg(s
, rn
);
8861 val
= (insn
>> 4) & 0x7c;
8862 tcg_gen_addi_i32(addr
, addr
, val
);
8864 if (insn
& (1 << 11)) {
8866 tmp
= gen_ld32(addr
, IS_USER(s
));
8867 store_reg(s
, rd
, tmp
);
8870 tmp
= load_reg(s
, rd
);
8871 gen_st32(tmp
, addr
, IS_USER(s
));
8873 tcg_temp_free_i32(addr
);
8877 /* load/store byte immediate offset */
8879 rn
= (insn
>> 3) & 7;
8880 addr
= load_reg(s
, rn
);
8881 val
= (insn
>> 6) & 0x1f;
8882 tcg_gen_addi_i32(addr
, addr
, val
);
8884 if (insn
& (1 << 11)) {
8886 tmp
= gen_ld8u(addr
, IS_USER(s
));
8887 store_reg(s
, rd
, tmp
);
8890 tmp
= load_reg(s
, rd
);
8891 gen_st8(tmp
, addr
, IS_USER(s
));
8893 tcg_temp_free_i32(addr
);
8897 /* load/store halfword immediate offset */
8899 rn
= (insn
>> 3) & 7;
8900 addr
= load_reg(s
, rn
);
8901 val
= (insn
>> 5) & 0x3e;
8902 tcg_gen_addi_i32(addr
, addr
, val
);
8904 if (insn
& (1 << 11)) {
8906 tmp
= gen_ld16u(addr
, IS_USER(s
));
8907 store_reg(s
, rd
, tmp
);
8910 tmp
= load_reg(s
, rd
);
8911 gen_st16(tmp
, addr
, IS_USER(s
));
8913 tcg_temp_free_i32(addr
);
8917 /* load/store from stack */
8918 rd
= (insn
>> 8) & 7;
8919 addr
= load_reg(s
, 13);
8920 val
= (insn
& 0xff) * 4;
8921 tcg_gen_addi_i32(addr
, addr
, val
);
8923 if (insn
& (1 << 11)) {
8925 tmp
= gen_ld32(addr
, IS_USER(s
));
8926 store_reg(s
, rd
, tmp
);
8929 tmp
= load_reg(s
, rd
);
8930 gen_st32(tmp
, addr
, IS_USER(s
));
8932 tcg_temp_free_i32(addr
);
8936 /* add to high reg */
8937 rd
= (insn
>> 8) & 7;
8938 if (insn
& (1 << 11)) {
8940 tmp
= load_reg(s
, 13);
8942 /* PC. bit 1 is ignored. */
8943 tmp
= tcg_temp_new_i32();
8944 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
8946 val
= (insn
& 0xff) * 4;
8947 tcg_gen_addi_i32(tmp
, tmp
, val
);
8948 store_reg(s
, rd
, tmp
);
8953 op
= (insn
>> 8) & 0xf;
8956 /* adjust stack pointer */
8957 tmp
= load_reg(s
, 13);
8958 val
= (insn
& 0x7f) * 4;
8959 if (insn
& (1 << 7))
8960 val
= -(int32_t)val
;
8961 tcg_gen_addi_i32(tmp
, tmp
, val
);
8962 store_reg(s
, 13, tmp
);
8965 case 2: /* sign/zero extend. */
8968 rm
= (insn
>> 3) & 7;
8969 tmp
= load_reg(s
, rm
);
8970 switch ((insn
>> 6) & 3) {
8971 case 0: gen_sxth(tmp
); break;
8972 case 1: gen_sxtb(tmp
); break;
8973 case 2: gen_uxth(tmp
); break;
8974 case 3: gen_uxtb(tmp
); break;
8976 store_reg(s
, rd
, tmp
);
8978 case 4: case 5: case 0xc: case 0xd:
8980 addr
= load_reg(s
, 13);
8981 if (insn
& (1 << 8))
8985 for (i
= 0; i
< 8; i
++) {
8986 if (insn
& (1 << i
))
8989 if ((insn
& (1 << 11)) == 0) {
8990 tcg_gen_addi_i32(addr
, addr
, -offset
);
8992 for (i
= 0; i
< 8; i
++) {
8993 if (insn
& (1 << i
)) {
8994 if (insn
& (1 << 11)) {
8996 tmp
= gen_ld32(addr
, IS_USER(s
));
8997 store_reg(s
, i
, tmp
);
9000 tmp
= load_reg(s
, i
);
9001 gen_st32(tmp
, addr
, IS_USER(s
));
9003 /* advance to the next address. */
9004 tcg_gen_addi_i32(addr
, addr
, 4);
9008 if (insn
& (1 << 8)) {
9009 if (insn
& (1 << 11)) {
9011 tmp
= gen_ld32(addr
, IS_USER(s
));
9012 /* don't set the pc until the rest of the instruction
9016 tmp
= load_reg(s
, 14);
9017 gen_st32(tmp
, addr
, IS_USER(s
));
9019 tcg_gen_addi_i32(addr
, addr
, 4);
9021 if ((insn
& (1 << 11)) == 0) {
9022 tcg_gen_addi_i32(addr
, addr
, -offset
);
9024 /* write back the new stack pointer */
9025 store_reg(s
, 13, addr
);
9026 /* set the new PC value */
9027 if ((insn
& 0x0900) == 0x0900)
9031 case 1: case 3: case 9: case 11: /* czb */
9033 tmp
= load_reg(s
, rm
);
9034 s
->condlabel
= gen_new_label();
9036 if (insn
& (1 << 11))
9037 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9039 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9040 tcg_temp_free_i32(tmp
);
9041 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9042 val
= (uint32_t)s
->pc
+ 2;
9047 case 15: /* IT, nop-hint. */
9048 if ((insn
& 0xf) == 0) {
9049 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9053 s
->condexec_cond
= (insn
>> 4) & 0xe;
9054 s
->condexec_mask
= insn
& 0x1f;
9055 /* No actual code generated for this insn, just setup state. */
9058 case 0xe: /* bkpt */
9059 gen_exception_insn(s
, 2, EXCP_BKPT
);
9064 rn
= (insn
>> 3) & 0x7;
9066 tmp
= load_reg(s
, rn
);
9067 switch ((insn
>> 6) & 3) {
9068 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9069 case 1: gen_rev16(tmp
); break;
9070 case 3: gen_revsh(tmp
); break;
9071 default: goto illegal_op
;
9073 store_reg(s
, rd
, tmp
);
9081 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9084 addr
= tcg_const_i32(16);
9085 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9086 tcg_temp_free_i32(addr
);
9090 addr
= tcg_const_i32(17);
9091 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9092 tcg_temp_free_i32(addr
);
9094 tcg_temp_free_i32(tmp
);
9097 if (insn
& (1 << 4))
9098 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9101 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9111 /* load/store multiple */
9112 rn
= (insn
>> 8) & 0x7;
9113 addr
= load_reg(s
, rn
);
9114 for (i
= 0; i
< 8; i
++) {
9115 if (insn
& (1 << i
)) {
9116 if (insn
& (1 << 11)) {
9118 tmp
= gen_ld32(addr
, IS_USER(s
));
9119 store_reg(s
, i
, tmp
);
9122 tmp
= load_reg(s
, i
);
9123 gen_st32(tmp
, addr
, IS_USER(s
));
9125 /* advance to the next address */
9126 tcg_gen_addi_i32(addr
, addr
, 4);
9129 /* Base register writeback. */
9130 if ((insn
& (1 << rn
)) == 0) {
9131 store_reg(s
, rn
, addr
);
9133 tcg_temp_free_i32(addr
);
9138 /* conditional branch or swi */
9139 cond
= (insn
>> 8) & 0xf;
9145 gen_set_pc_im(s
->pc
);
9146 s
->is_jmp
= DISAS_SWI
;
9149 /* generate a conditional jump to next instruction */
9150 s
->condlabel
= gen_new_label();
9151 gen_test_cc(cond
^ 1, s
->condlabel
);
9154 /* jump to the offset */
9155 val
= (uint32_t)s
->pc
+ 2;
9156 offset
= ((int32_t)insn
<< 24) >> 24;
9162 if (insn
& (1 << 11)) {
9163 if (disas_thumb2_insn(env
, s
, insn
))
9167 /* unconditional branch */
9168 val
= (uint32_t)s
->pc
;
9169 offset
= ((int32_t)insn
<< 21) >> 21;
9170 val
+= (offset
<< 1) + 2;
9175 if (disas_thumb2_insn(env
, s
, insn
))
9181 gen_exception_insn(s
, 4, EXCP_UDEF
);
9185 gen_exception_insn(s
, 2, EXCP_UDEF
);
9188 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9189 basic block 'tb'. If search_pc is TRUE, also generate PC
9190 information for each intermediate instruction. */
9191 static inline void gen_intermediate_code_internal(CPUState
*env
,
9192 TranslationBlock
*tb
,
9195 DisasContext dc1
, *dc
= &dc1
;
9197 uint16_t *gen_opc_end
;
9199 target_ulong pc_start
;
9200 uint32_t next_page_start
;
9204 /* generate intermediate code */
9209 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9211 dc
->is_jmp
= DISAS_NEXT
;
9213 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9215 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9216 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9217 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9218 #if !defined(CONFIG_USER_ONLY)
9219 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9221 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9222 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9223 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9224 cpu_F0s
= tcg_temp_new_i32();
9225 cpu_F1s
= tcg_temp_new_i32();
9226 cpu_F0d
= tcg_temp_new_i64();
9227 cpu_F1d
= tcg_temp_new_i64();
9230 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9231 cpu_M0
= tcg_temp_new_i64();
9232 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9235 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9237 max_insns
= CF_COUNT_MASK
;
9241 tcg_clear_temp_count();
9243 /* A note on handling of the condexec (IT) bits:
9245 * We want to avoid the overhead of having to write the updated condexec
9246 * bits back to the CPUState for every instruction in an IT block. So:
9247 * (1) if the condexec bits are not already zero then we write
9248 * zero back into the CPUState now. This avoids complications trying
9249 * to do it at the end of the block. (For example if we don't do this
9250 * it's hard to identify whether we can safely skip writing condexec
9251 * at the end of the TB, which we definitely want to do for the case
9252 * where a TB doesn't do anything with the IT state at all.)
9253 * (2) if we are going to leave the TB then we call gen_set_condexec()
9254 * which will write the correct value into CPUState if zero is wrong.
9255 * This is done both for leaving the TB at the end, and for leaving
9256 * it because of an exception we know will happen, which is done in
9257 * gen_exception_insn(). The latter is necessary because we need to
9258 * leave the TB with the PC/IT state just prior to execution of the
9259 * instruction which caused the exception.
9260 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9261 * then the CPUState will be wrong and we need to reset it.
9262 * This is handled in the same way as restoration of the
9263 * PC in these situations: we will be called again with search_pc=1
9264 * and generate a mapping of the condexec bits for each PC in
9265 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9266 * the condexec bits.
9268 * Note that there are no instructions which can read the condexec
9269 * bits, and none which can write non-static values to them, so
9270 * we don't need to care about whether CPUState is correct in the
9274 /* Reset the conditional execution bits immediately. This avoids
9275 complications trying to do it at the end of the block. */
9276 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9278 TCGv tmp
= tcg_temp_new_i32();
9279 tcg_gen_movi_i32(tmp
, 0);
9280 store_cpu_field(tmp
, condexec_bits
);
9283 #ifdef CONFIG_USER_ONLY
9284 /* Intercept jump to the magic kernel page. */
9285 if (dc
->pc
>= 0xffff0000) {
9286 /* We always get here via a jump, so know we are not in a
9287 conditional execution block. */
9288 gen_exception(EXCP_KERNEL_TRAP
);
9289 dc
->is_jmp
= DISAS_UPDATE
;
9293 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9294 /* We always get here via a jump, so know we are not in a
9295 conditional execution block. */
9296 gen_exception(EXCP_EXCEPTION_EXIT
);
9297 dc
->is_jmp
= DISAS_UPDATE
;
9302 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9303 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9304 if (bp
->pc
== dc
->pc
) {
9305 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9306 /* Advance PC so that clearing the breakpoint will
9307 invalidate this TB. */
9309 goto done_generating
;
9315 j
= gen_opc_ptr
- gen_opc_buf
;
9319 gen_opc_instr_start
[lj
++] = 0;
9321 gen_opc_pc
[lj
] = dc
->pc
;
9322 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9323 gen_opc_instr_start
[lj
] = 1;
9324 gen_opc_icount
[lj
] = num_insns
;
9327 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9330 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
9331 tcg_gen_debug_insn_start(dc
->pc
);
9335 disas_thumb_insn(env
, dc
);
9336 if (dc
->condexec_mask
) {
9337 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9338 | ((dc
->condexec_mask
>> 4) & 1);
9339 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9340 if (dc
->condexec_mask
== 0) {
9341 dc
->condexec_cond
= 0;
9345 disas_arm_insn(env
, dc
);
9348 if (dc
->condjmp
&& !dc
->is_jmp
) {
9349 gen_set_label(dc
->condlabel
);
9353 if (tcg_check_temp_count()) {
9354 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
9357 /* Translation stops when a conditional branch is encountered.
9358 * Otherwise the subsequent code could get translated several times.
9359 * Also stop translation when a page boundary is reached. This
9360 * ensures prefetch aborts occur at the right place. */
9362 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9363 !env
->singlestep_enabled
&&
9365 dc
->pc
< next_page_start
&&
9366 num_insns
< max_insns
);
9368 if (tb
->cflags
& CF_LAST_IO
) {
9370 /* FIXME: This can theoretically happen with self-modifying
9372 cpu_abort(env
, "IO on conditional branch instruction");
9377 /* At this stage dc->condjmp will only be set when the skipped
9378 instruction was a conditional branch or trap, and the PC has
9379 already been written. */
9380 if (unlikely(env
->singlestep_enabled
)) {
9381 /* Make sure the pc is updated, and raise a debug exception. */
9383 gen_set_condexec(dc
);
9384 if (dc
->is_jmp
== DISAS_SWI
) {
9385 gen_exception(EXCP_SWI
);
9387 gen_exception(EXCP_DEBUG
);
9389 gen_set_label(dc
->condlabel
);
9391 if (dc
->condjmp
|| !dc
->is_jmp
) {
9392 gen_set_pc_im(dc
->pc
);
9395 gen_set_condexec(dc
);
9396 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9397 gen_exception(EXCP_SWI
);
9399 /* FIXME: Single stepping a WFI insn will not halt
9401 gen_exception(EXCP_DEBUG
);
9404 /* While branches must always occur at the end of an IT block,
9405 there are a few other things that can cause us to terminate
9406 the TB in the middel of an IT block:
9407 - Exception generating instructions (bkpt, swi, undefined).
9409 - Hardware watchpoints.
9410 Hardware breakpoints have already been handled and skip this code.
9412 gen_set_condexec(dc
);
9413 switch(dc
->is_jmp
) {
9415 gen_goto_tb(dc
, 1, dc
->pc
);
9420 /* indicate that the hash table must be used to find the next TB */
9424 /* nothing more to generate */
9430 gen_exception(EXCP_SWI
);
9434 gen_set_label(dc
->condlabel
);
9435 gen_set_condexec(dc
);
9436 gen_goto_tb(dc
, 1, dc
->pc
);
9442 gen_icount_end(tb
, num_insns
);
9443 *gen_opc_ptr
= INDEX_op_end
;
9446 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9447 qemu_log("----------------\n");
9448 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9449 log_target_disas(pc_start
, dc
->pc
- pc_start
, dc
->thumb
);
9454 j
= gen_opc_ptr
- gen_opc_buf
;
9457 gen_opc_instr_start
[lj
++] = 0;
9459 tb
->size
= dc
->pc
- pc_start
;
9460 tb
->icount
= num_insns
;
9464 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
9466 gen_intermediate_code_internal(env
, tb
, 0);
9469 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
9471 gen_intermediate_code_internal(env
, tb
, 1);
9474 static const char *cpu_mode_names
[16] = {
9475 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9476 "???", "???", "???", "und", "???", "???", "???", "sys"
9479 void cpu_dump_state(CPUState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
9489 /* ??? This assumes float64 and double have the same layout.
9490 Oh well, it's only debug dumps. */
9499 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
9501 cpu_fprintf(f
, "\n");
9503 cpu_fprintf(f
, " ");
9505 psr
= cpsr_read(env
);
9506 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
9508 psr
& (1 << 31) ? 'N' : '-',
9509 psr
& (1 << 30) ? 'Z' : '-',
9510 psr
& (1 << 29) ? 'C' : '-',
9511 psr
& (1 << 28) ? 'V' : '-',
9512 psr
& CPSR_T
? 'T' : 'A',
9513 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
9516 for (i
= 0; i
< 16; i
++) {
9517 d
.d
= env
->vfp
.regs
[i
];
9521 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9522 i
* 2, (int)s0
.i
, s0
.s
,
9523 i
* 2 + 1, (int)s1
.i
, s1
.s
,
9524 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
9527 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
9531 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
9532 unsigned long searched_pc
, int pc_pos
, void *puc
)
9534 env
->regs
[15] = gen_opc_pc
[pc_pos
];
9535 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];