4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
45 /* internal defines */
46 typedef struct DisasContext
{
49 /* Nonzero if this instruction has been conditionally skipped. */
51 /* The label that will be jumped to when the instruction is skipped. */
53 /* Thumb-2 condtional execution bits. */
56 struct TranslationBlock
*tb
;
57 int singlestep_enabled
;
59 #if !defined(CONFIG_USER_ONLY)
64 #if defined(CONFIG_USER_ONLY)
67 #define IS_USER(s) (s->user)
70 /* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
75 static TCGv_ptr cpu_env
;
76 /* We reuse the same 64-bit temporaries for efficiency. */
77 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
78 static TCGv_i32 cpu_R
[16];
79 static TCGv_i32 cpu_exclusive_addr
;
80 static TCGv_i32 cpu_exclusive_val
;
81 static TCGv_i32 cpu_exclusive_high
;
82 #ifdef CONFIG_USER_ONLY
83 static TCGv_i32 cpu_exclusive_test
;
84 static TCGv_i32 cpu_exclusive_info
;
87 /* FIXME: These should be removed. */
88 static TCGv cpu_F0s
, cpu_F1s
;
89 static TCGv_i64 cpu_F0d
, cpu_F1d
;
91 #include "gen-icount.h"
93 static const char *regnames
[] =
94 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
97 /* initialize TCG globals. */
98 void arm_translate_init(void)
102 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
104 for (i
= 0; i
< 16; i
++) {
105 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
106 offsetof(CPUState
, regs
[i
]),
109 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
110 offsetof(CPUState
, exclusive_addr
), "exclusive_addr");
111 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
112 offsetof(CPUState
, exclusive_val
), "exclusive_val");
113 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
114 offsetof(CPUState
, exclusive_high
), "exclusive_high");
115 #ifdef CONFIG_USER_ONLY
116 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUState
, exclusive_test
), "exclusive_test");
118 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUState
, exclusive_info
), "exclusive_info");
126 static int num_temps
;
128 /* Allocate a temporary variable. */
129 static TCGv_i32
new_tmp(void)
132 return tcg_temp_new_i32();
135 /* Release a temporary variable. */
136 static void dead_tmp(TCGv tmp
)
142 static inline TCGv
load_cpu_offset(int offset
)
144 TCGv tmp
= new_tmp();
145 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
149 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
151 static inline void store_cpu_offset(TCGv var
, int offset
)
153 tcg_gen_st_i32(var
, cpu_env
, offset
);
157 #define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUState, name))
160 /* Set a variable to the value of a CPU register. */
161 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
165 /* normaly, since we updated PC, we need only to add one insn */
167 addr
= (long)s
->pc
+ 2;
169 addr
= (long)s
->pc
+ 4;
170 tcg_gen_movi_i32(var
, addr
);
172 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
176 /* Create a new temporary and set it to the value of a CPU register. */
177 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
179 TCGv tmp
= new_tmp();
180 load_reg_var(s
, tmp
, reg
);
184 /* Set a CPU register. The source must be a temporary and will be
186 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
189 tcg_gen_andi_i32(var
, var
, ~1);
190 s
->is_jmp
= DISAS_JUMP
;
192 tcg_gen_mov_i32(cpu_R
[reg
], var
);
196 /* Value extensions. */
197 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
199 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
202 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
206 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
208 TCGv tmp_mask
= tcg_const_i32(mask
);
209 gen_helper_cpsr_write(var
, tmp_mask
);
210 tcg_temp_free_i32(tmp_mask
);
212 /* Set NZCV flags from the high 4 bits of var. */
213 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
215 static void gen_exception(int excp
)
217 TCGv tmp
= new_tmp();
218 tcg_gen_movi_i32(tmp
, excp
);
219 gen_helper_exception(tmp
);
223 static void gen_smul_dual(TCGv a
, TCGv b
)
225 TCGv tmp1
= new_tmp();
226 TCGv tmp2
= new_tmp();
227 tcg_gen_ext16s_i32(tmp1
, a
);
228 tcg_gen_ext16s_i32(tmp2
, b
);
229 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
231 tcg_gen_sari_i32(a
, a
, 16);
232 tcg_gen_sari_i32(b
, b
, 16);
233 tcg_gen_mul_i32(b
, b
, a
);
234 tcg_gen_mov_i32(a
, tmp1
);
238 /* Byteswap each halfword. */
239 static void gen_rev16(TCGv var
)
241 TCGv tmp
= new_tmp();
242 tcg_gen_shri_i32(tmp
, var
, 8);
243 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
244 tcg_gen_shli_i32(var
, var
, 8);
245 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
246 tcg_gen_or_i32(var
, var
, tmp
);
250 /* Byteswap low halfword and sign extend. */
251 static void gen_revsh(TCGv var
)
253 TCGv tmp
= new_tmp();
254 tcg_gen_shri_i32(tmp
, var
, 8);
255 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff);
256 tcg_gen_shli_i32(var
, var
, 8);
257 tcg_gen_ext8s_i32(var
, var
);
258 tcg_gen_or_i32(var
, var
, tmp
);
262 /* Unsigned bitfield extract. */
263 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
266 tcg_gen_shri_i32(var
, var
, shift
);
267 tcg_gen_andi_i32(var
, var
, mask
);
270 /* Signed bitfield extract. */
271 static void gen_sbfx(TCGv var
, int shift
, int width
)
276 tcg_gen_sari_i32(var
, var
, shift
);
277 if (shift
+ width
< 32) {
278 signbit
= 1u << (width
- 1);
279 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
280 tcg_gen_xori_i32(var
, var
, signbit
);
281 tcg_gen_subi_i32(var
, var
, signbit
);
285 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
286 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
288 tcg_gen_andi_i32(val
, val
, mask
);
289 tcg_gen_shli_i32(val
, val
, shift
);
290 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
291 tcg_gen_or_i32(dest
, base
, val
);
294 /* Round the top 32 bits of a 64-bit value. */
295 static void gen_roundqd(TCGv a
, TCGv b
)
297 tcg_gen_shri_i32(a
, a
, 31);
298 tcg_gen_add_i32(a
, a
, b
);
301 /* FIXME: Most targets have native widening multiplication.
302 It would be good to use that instead of a full wide multiply. */
303 /* 32x32->64 multiply. Marks inputs as dead. */
304 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
306 TCGv_i64 tmp1
= tcg_temp_new_i64();
307 TCGv_i64 tmp2
= tcg_temp_new_i64();
309 tcg_gen_extu_i32_i64(tmp1
, a
);
311 tcg_gen_extu_i32_i64(tmp2
, b
);
313 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
314 tcg_temp_free_i64(tmp2
);
318 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
320 TCGv_i64 tmp1
= tcg_temp_new_i64();
321 TCGv_i64 tmp2
= tcg_temp_new_i64();
323 tcg_gen_ext_i32_i64(tmp1
, a
);
325 tcg_gen_ext_i32_i64(tmp2
, b
);
327 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
328 tcg_temp_free_i64(tmp2
);
332 /* Signed 32x32->64 multiply. */
333 static void gen_imull(TCGv a
, TCGv b
)
335 TCGv_i64 tmp1
= tcg_temp_new_i64();
336 TCGv_i64 tmp2
= tcg_temp_new_i64();
338 tcg_gen_ext_i32_i64(tmp1
, a
);
339 tcg_gen_ext_i32_i64(tmp2
, b
);
340 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
341 tcg_temp_free_i64(tmp2
);
342 tcg_gen_trunc_i64_i32(a
, tmp1
);
343 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
344 tcg_gen_trunc_i64_i32(b
, tmp1
);
345 tcg_temp_free_i64(tmp1
);
348 /* Swap low and high halfwords. */
349 static void gen_swap_half(TCGv var
)
351 TCGv tmp
= new_tmp();
352 tcg_gen_shri_i32(tmp
, var
, 16);
353 tcg_gen_shli_i32(var
, var
, 16);
354 tcg_gen_or_i32(var
, var
, tmp
);
358 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
359 tmp = (t0 ^ t1) & 0x8000;
362 t0 = (t0 + t1) ^ tmp;
365 static void gen_add16(TCGv t0
, TCGv t1
)
367 TCGv tmp
= new_tmp();
368 tcg_gen_xor_i32(tmp
, t0
, t1
);
369 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
370 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
371 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
372 tcg_gen_add_i32(t0
, t0
, t1
);
373 tcg_gen_xor_i32(t0
, t0
, tmp
);
378 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
380 /* Set CF to the top bit of var. */
381 static void gen_set_CF_bit31(TCGv var
)
383 TCGv tmp
= new_tmp();
384 tcg_gen_shri_i32(tmp
, var
, 31);
389 /* Set N and Z flags from var. */
390 static inline void gen_logic_CC(TCGv var
)
392 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
393 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
397 static void gen_adc(TCGv t0
, TCGv t1
)
400 tcg_gen_add_i32(t0
, t0
, t1
);
401 tmp
= load_cpu_field(CF
);
402 tcg_gen_add_i32(t0
, t0
, tmp
);
406 /* dest = T0 + T1 + CF. */
407 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
410 tcg_gen_add_i32(dest
, t0
, t1
);
411 tmp
= load_cpu_field(CF
);
412 tcg_gen_add_i32(dest
, dest
, tmp
);
416 /* dest = T0 - T1 + CF - 1. */
417 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
420 tcg_gen_sub_i32(dest
, t0
, t1
);
421 tmp
= load_cpu_field(CF
);
422 tcg_gen_add_i32(dest
, dest
, tmp
);
423 tcg_gen_subi_i32(dest
, dest
, 1);
427 /* FIXME: Implement this natively. */
428 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
430 static void shifter_out_im(TCGv var
, int shift
)
432 TCGv tmp
= new_tmp();
434 tcg_gen_andi_i32(tmp
, var
, 1);
436 tcg_gen_shri_i32(tmp
, var
, shift
);
438 tcg_gen_andi_i32(tmp
, tmp
, 1);
444 /* Shift by immediate. Includes special handling for shift == 0. */
445 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
451 shifter_out_im(var
, 32 - shift
);
452 tcg_gen_shli_i32(var
, var
, shift
);
458 tcg_gen_shri_i32(var
, var
, 31);
461 tcg_gen_movi_i32(var
, 0);
464 shifter_out_im(var
, shift
- 1);
465 tcg_gen_shri_i32(var
, var
, shift
);
472 shifter_out_im(var
, shift
- 1);
475 tcg_gen_sari_i32(var
, var
, shift
);
477 case 3: /* ROR/RRX */
480 shifter_out_im(var
, shift
- 1);
481 tcg_gen_rotri_i32(var
, var
, shift
); break;
483 TCGv tmp
= load_cpu_field(CF
);
485 shifter_out_im(var
, 0);
486 tcg_gen_shri_i32(var
, var
, 1);
487 tcg_gen_shli_i32(tmp
, tmp
, 31);
488 tcg_gen_or_i32(var
, var
, tmp
);
494 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
495 TCGv shift
, int flags
)
499 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
500 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
501 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
502 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
506 case 0: gen_helper_shl(var
, var
, shift
); break;
507 case 1: gen_helper_shr(var
, var
, shift
); break;
508 case 2: gen_helper_sar(var
, var
, shift
); break;
509 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
510 tcg_gen_rotr_i32(var
, var
, shift
); break;
516 #define PAS_OP(pfx) \
518 case 0: gen_pas_helper(glue(pfx,add16)); break; \
519 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
520 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
521 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
522 case 4: gen_pas_helper(glue(pfx,add8)); break; \
523 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
525 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
530 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
532 tmp
= tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
535 tcg_temp_free_ptr(tmp
);
538 tmp
= tcg_temp_new_ptr();
539 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
541 tcg_temp_free_ptr(tmp
);
543 #undef gen_pas_helper
544 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
557 #undef gen_pas_helper
562 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
563 #define PAS_OP(pfx) \
565 case 0: gen_pas_helper(glue(pfx,add8)); break; \
566 case 1: gen_pas_helper(glue(pfx,add16)); break; \
567 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
568 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
569 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
570 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
572 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
577 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
579 tmp
= tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
582 tcg_temp_free_ptr(tmp
);
585 tmp
= tcg_temp_new_ptr();
586 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
588 tcg_temp_free_ptr(tmp
);
590 #undef gen_pas_helper
591 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
604 #undef gen_pas_helper
609 static void gen_test_cc(int cc
, int label
)
617 tmp
= load_cpu_field(ZF
);
618 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
621 tmp
= load_cpu_field(ZF
);
622 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
625 tmp
= load_cpu_field(CF
);
626 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
629 tmp
= load_cpu_field(CF
);
630 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
633 tmp
= load_cpu_field(NF
);
634 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
637 tmp
= load_cpu_field(NF
);
638 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
641 tmp
= load_cpu_field(VF
);
642 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
645 tmp
= load_cpu_field(VF
);
646 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
648 case 8: /* hi: C && !Z */
649 inv
= gen_new_label();
650 tmp
= load_cpu_field(CF
);
651 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
653 tmp
= load_cpu_field(ZF
);
654 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
657 case 9: /* ls: !C || Z */
658 tmp
= load_cpu_field(CF
);
659 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
661 tmp
= load_cpu_field(ZF
);
662 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
664 case 10: /* ge: N == V -> N ^ V == 0 */
665 tmp
= load_cpu_field(VF
);
666 tmp2
= load_cpu_field(NF
);
667 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
669 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
671 case 11: /* lt: N != V -> N ^ V != 0 */
672 tmp
= load_cpu_field(VF
);
673 tmp2
= load_cpu_field(NF
);
674 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
676 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
678 case 12: /* gt: !Z && N == V */
679 inv
= gen_new_label();
680 tmp
= load_cpu_field(ZF
);
681 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
683 tmp
= load_cpu_field(VF
);
684 tmp2
= load_cpu_field(NF
);
685 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
687 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
690 case 13: /* le: Z || N != V */
691 tmp
= load_cpu_field(ZF
);
692 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
694 tmp
= load_cpu_field(VF
);
695 tmp2
= load_cpu_field(NF
);
696 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
698 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
701 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
707 static const uint8_t table_logic_cc
[16] = {
726 /* Set PC and Thumb state from an immediate address. */
727 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
731 s
->is_jmp
= DISAS_UPDATE
;
732 if (s
->thumb
!= (addr
& 1)) {
734 tcg_gen_movi_i32(tmp
, addr
& 1);
735 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
738 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
741 /* Set PC and Thumb state from var. var is marked as dead. */
742 static inline void gen_bx(DisasContext
*s
, TCGv var
)
744 s
->is_jmp
= DISAS_UPDATE
;
745 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
746 tcg_gen_andi_i32(var
, var
, 1);
747 store_cpu_field(var
, thumb
);
750 /* Variant of store_reg which uses branch&exchange logic when storing
751 to r15 in ARM architecture v7 and above. The source must be a temporary
752 and will be marked as dead. */
753 static inline void store_reg_bx(CPUState
*env
, DisasContext
*s
,
756 if (reg
== 15 && ENABLE_ARCH_7
) {
759 store_reg(s
, reg
, var
);
763 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
765 TCGv tmp
= new_tmp();
766 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
769 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
771 TCGv tmp
= new_tmp();
772 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
775 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
777 TCGv tmp
= new_tmp();
778 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
781 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
783 TCGv tmp
= new_tmp();
784 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
787 static inline TCGv
gen_ld32(TCGv addr
, int index
)
789 TCGv tmp
= new_tmp();
790 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
793 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
795 TCGv_i64 tmp
= tcg_temp_new_i64();
796 tcg_gen_qemu_ld64(tmp
, addr
, index
);
799 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
801 tcg_gen_qemu_st8(val
, addr
, index
);
804 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
806 tcg_gen_qemu_st16(val
, addr
, index
);
809 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
811 tcg_gen_qemu_st32(val
, addr
, index
);
814 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
816 tcg_gen_qemu_st64(val
, addr
, index
);
817 tcg_temp_free_i64(val
);
820 static inline void gen_set_pc_im(uint32_t val
)
822 tcg_gen_movi_i32(cpu_R
[15], val
);
825 /* Force a TB lookup after an instruction that changes the CPU state. */
826 static inline void gen_lookup_tb(DisasContext
*s
)
828 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
829 s
->is_jmp
= DISAS_UPDATE
;
832 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
835 int val
, rm
, shift
, shiftop
;
838 if (!(insn
& (1 << 25))) {
841 if (!(insn
& (1 << 23)))
844 tcg_gen_addi_i32(var
, var
, val
);
848 shift
= (insn
>> 7) & 0x1f;
849 shiftop
= (insn
>> 5) & 3;
850 offset
= load_reg(s
, rm
);
851 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
852 if (!(insn
& (1 << 23)))
853 tcg_gen_sub_i32(var
, var
, offset
);
855 tcg_gen_add_i32(var
, var
, offset
);
860 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
866 if (insn
& (1 << 22)) {
868 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
869 if (!(insn
& (1 << 23)))
873 tcg_gen_addi_i32(var
, var
, val
);
877 tcg_gen_addi_i32(var
, var
, extra
);
879 offset
= load_reg(s
, rm
);
880 if (!(insn
& (1 << 23)))
881 tcg_gen_sub_i32(var
, var
, offset
);
883 tcg_gen_add_i32(var
, var
, offset
);
888 #define VFP_OP2(name) \
889 static inline void gen_vfp_##name(int dp) \
892 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
894 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
904 static inline void gen_vfp_abs(int dp
)
907 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
909 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
912 static inline void gen_vfp_neg(int dp
)
915 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
917 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
920 static inline void gen_vfp_sqrt(int dp
)
923 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
925 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
928 static inline void gen_vfp_cmp(int dp
)
931 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
933 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
936 static inline void gen_vfp_cmpe(int dp
)
939 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
941 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
944 static inline void gen_vfp_F1_ld0(int dp
)
947 tcg_gen_movi_i64(cpu_F1d
, 0);
949 tcg_gen_movi_i32(cpu_F1s
, 0);
952 static inline void gen_vfp_uito(int dp
)
955 gen_helper_vfp_uitod(cpu_F0d
, cpu_F0s
, cpu_env
);
957 gen_helper_vfp_uitos(cpu_F0s
, cpu_F0s
, cpu_env
);
960 static inline void gen_vfp_sito(int dp
)
963 gen_helper_vfp_sitod(cpu_F0d
, cpu_F0s
, cpu_env
);
965 gen_helper_vfp_sitos(cpu_F0s
, cpu_F0s
, cpu_env
);
968 static inline void gen_vfp_toui(int dp
)
971 gen_helper_vfp_touid(cpu_F0s
, cpu_F0d
, cpu_env
);
973 gen_helper_vfp_touis(cpu_F0s
, cpu_F0s
, cpu_env
);
976 static inline void gen_vfp_touiz(int dp
)
979 gen_helper_vfp_touizd(cpu_F0s
, cpu_F0d
, cpu_env
);
981 gen_helper_vfp_touizs(cpu_F0s
, cpu_F0s
, cpu_env
);
984 static inline void gen_vfp_tosi(int dp
)
987 gen_helper_vfp_tosid(cpu_F0s
, cpu_F0d
, cpu_env
);
989 gen_helper_vfp_tosis(cpu_F0s
, cpu_F0s
, cpu_env
);
992 static inline void gen_vfp_tosiz(int dp
)
995 gen_helper_vfp_tosizd(cpu_F0s
, cpu_F0d
, cpu_env
);
997 gen_helper_vfp_tosizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1000 #define VFP_GEN_FIX(name) \
1001 static inline void gen_vfp_##name(int dp, int shift) \
1003 TCGv tmp_shift = tcg_const_i32(shift); \
1005 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1007 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1008 tcg_temp_free_i32(tmp_shift); \
1020 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1023 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1025 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1028 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1031 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1033 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1037 vfp_reg_offset (int dp
, int reg
)
1040 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1042 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1043 + offsetof(CPU_DoubleU
, l
.upper
);
1045 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1046 + offsetof(CPU_DoubleU
, l
.lower
);
1050 /* Return the offset of a 32-bit piece of a NEON register.
1051 zero is the least significant end of the register. */
1053 neon_reg_offset (int reg
, int n
)
1057 return vfp_reg_offset(0, sreg
);
1060 static TCGv
neon_load_reg(int reg
, int pass
)
1062 TCGv tmp
= new_tmp();
1063 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1067 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1069 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1073 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1075 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1078 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1080 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1083 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1084 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1085 #define tcg_gen_st_f32 tcg_gen_st_i32
1086 #define tcg_gen_st_f64 tcg_gen_st_i64
1088 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1091 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1093 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1096 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1099 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1101 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1104 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1107 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1109 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1112 #define ARM_CP_RW_BIT (1 << 20)
1114 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1116 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1119 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1121 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1124 static inline TCGv
iwmmxt_load_creg(int reg
)
1126 TCGv var
= new_tmp();
1127 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1131 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1133 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1136 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1138 iwmmxt_store_reg(cpu_M0
, rn
);
1141 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1143 iwmmxt_load_reg(cpu_M0
, rn
);
1146 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1148 iwmmxt_load_reg(cpu_V1
, rn
);
1149 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1152 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1154 iwmmxt_load_reg(cpu_V1
, rn
);
1155 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1158 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1160 iwmmxt_load_reg(cpu_V1
, rn
);
1161 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1164 #define IWMMXT_OP(name) \
1165 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1167 iwmmxt_load_reg(cpu_V1, rn); \
1168 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1171 #define IWMMXT_OP_ENV(name) \
1172 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1174 iwmmxt_load_reg(cpu_V1, rn); \
1175 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1178 #define IWMMXT_OP_ENV_SIZE(name) \
1179 IWMMXT_OP_ENV(name##b) \
1180 IWMMXT_OP_ENV(name##w) \
1181 IWMMXT_OP_ENV(name##l)
1183 #define IWMMXT_OP_ENV1(name) \
1184 static inline void gen_op_iwmmxt_##name##_M0(void) \
1186 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1200 IWMMXT_OP_ENV_SIZE(unpackl
)
1201 IWMMXT_OP_ENV_SIZE(unpackh
)
1203 IWMMXT_OP_ENV1(unpacklub
)
1204 IWMMXT_OP_ENV1(unpackluw
)
1205 IWMMXT_OP_ENV1(unpacklul
)
1206 IWMMXT_OP_ENV1(unpackhub
)
1207 IWMMXT_OP_ENV1(unpackhuw
)
1208 IWMMXT_OP_ENV1(unpackhul
)
1209 IWMMXT_OP_ENV1(unpacklsb
)
1210 IWMMXT_OP_ENV1(unpacklsw
)
1211 IWMMXT_OP_ENV1(unpacklsl
)
1212 IWMMXT_OP_ENV1(unpackhsb
)
1213 IWMMXT_OP_ENV1(unpackhsw
)
1214 IWMMXT_OP_ENV1(unpackhsl
)
1216 IWMMXT_OP_ENV_SIZE(cmpeq
)
1217 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1218 IWMMXT_OP_ENV_SIZE(cmpgts
)
1220 IWMMXT_OP_ENV_SIZE(mins
)
1221 IWMMXT_OP_ENV_SIZE(minu
)
1222 IWMMXT_OP_ENV_SIZE(maxs
)
1223 IWMMXT_OP_ENV_SIZE(maxu
)
1225 IWMMXT_OP_ENV_SIZE(subn
)
1226 IWMMXT_OP_ENV_SIZE(addn
)
1227 IWMMXT_OP_ENV_SIZE(subu
)
1228 IWMMXT_OP_ENV_SIZE(addu
)
1229 IWMMXT_OP_ENV_SIZE(subs
)
1230 IWMMXT_OP_ENV_SIZE(adds
)
1232 IWMMXT_OP_ENV(avgb0
)
1233 IWMMXT_OP_ENV(avgb1
)
1234 IWMMXT_OP_ENV(avgw0
)
1235 IWMMXT_OP_ENV(avgw1
)
1239 IWMMXT_OP_ENV(packuw
)
1240 IWMMXT_OP_ENV(packul
)
1241 IWMMXT_OP_ENV(packuq
)
1242 IWMMXT_OP_ENV(packsw
)
1243 IWMMXT_OP_ENV(packsl
)
1244 IWMMXT_OP_ENV(packsq
)
1246 static void gen_op_iwmmxt_set_mup(void)
1249 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1250 tcg_gen_ori_i32(tmp
, tmp
, 2);
1251 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1254 static void gen_op_iwmmxt_set_cup(void)
1257 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1258 tcg_gen_ori_i32(tmp
, tmp
, 1);
1259 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1262 static void gen_op_iwmmxt_setpsr_nz(void)
1264 TCGv tmp
= new_tmp();
1265 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1266 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1269 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1271 iwmmxt_load_reg(cpu_V1
, rn
);
1272 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1273 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1276 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1282 rd
= (insn
>> 16) & 0xf;
1283 tmp
= load_reg(s
, rd
);
1285 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1286 if (insn
& (1 << 24)) {
1288 if (insn
& (1 << 23))
1289 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1291 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1292 tcg_gen_mov_i32(dest
, tmp
);
1293 if (insn
& (1 << 21))
1294 store_reg(s
, rd
, tmp
);
1297 } else if (insn
& (1 << 21)) {
1299 tcg_gen_mov_i32(dest
, tmp
);
1300 if (insn
& (1 << 23))
1301 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1303 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1304 store_reg(s
, rd
, tmp
);
1305 } else if (!(insn
& (1 << 23)))
1310 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1312 int rd
= (insn
>> 0) & 0xf;
1315 if (insn
& (1 << 8)) {
1316 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1319 tmp
= iwmmxt_load_creg(rd
);
1323 iwmmxt_load_reg(cpu_V0
, rd
);
1324 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1326 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1327 tcg_gen_mov_i32(dest
, tmp
);
1332 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1333 (ie. an undefined instruction). */
1334 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1337 int rdhi
, rdlo
, rd0
, rd1
, i
;
1339 TCGv tmp
, tmp2
, tmp3
;
1341 if ((insn
& 0x0e000e00) == 0x0c000000) {
1342 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1344 rdlo
= (insn
>> 12) & 0xf;
1345 rdhi
= (insn
>> 16) & 0xf;
1346 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1347 iwmmxt_load_reg(cpu_V0
, wrd
);
1348 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1349 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1350 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1351 } else { /* TMCRR */
1352 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1353 iwmmxt_store_reg(cpu_V0
, wrd
);
1354 gen_op_iwmmxt_set_mup();
1359 wrd
= (insn
>> 12) & 0xf;
1361 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1365 if (insn
& ARM_CP_RW_BIT
) {
1366 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1368 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1369 iwmmxt_store_creg(wrd
, tmp
);
1372 if (insn
& (1 << 8)) {
1373 if (insn
& (1 << 22)) { /* WLDRD */
1374 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1376 } else { /* WLDRW wRd */
1377 tmp
= gen_ld32(addr
, IS_USER(s
));
1380 if (insn
& (1 << 22)) { /* WLDRH */
1381 tmp
= gen_ld16u(addr
, IS_USER(s
));
1382 } else { /* WLDRB */
1383 tmp
= gen_ld8u(addr
, IS_USER(s
));
1387 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1390 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1393 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1394 tmp
= iwmmxt_load_creg(wrd
);
1395 gen_st32(tmp
, addr
, IS_USER(s
));
1397 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1399 if (insn
& (1 << 8)) {
1400 if (insn
& (1 << 22)) { /* WSTRD */
1402 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1403 } else { /* WSTRW wRd */
1404 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1405 gen_st32(tmp
, addr
, IS_USER(s
));
1408 if (insn
& (1 << 22)) { /* WSTRH */
1409 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1410 gen_st16(tmp
, addr
, IS_USER(s
));
1411 } else { /* WSTRB */
1412 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1413 gen_st8(tmp
, addr
, IS_USER(s
));
1421 if ((insn
& 0x0f000000) != 0x0e000000)
1424 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1425 case 0x000: /* WOR */
1426 wrd
= (insn
>> 12) & 0xf;
1427 rd0
= (insn
>> 0) & 0xf;
1428 rd1
= (insn
>> 16) & 0xf;
1429 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1430 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1431 gen_op_iwmmxt_setpsr_nz();
1432 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1433 gen_op_iwmmxt_set_mup();
1434 gen_op_iwmmxt_set_cup();
1436 case 0x011: /* TMCR */
1439 rd
= (insn
>> 12) & 0xf;
1440 wrd
= (insn
>> 16) & 0xf;
1442 case ARM_IWMMXT_wCID
:
1443 case ARM_IWMMXT_wCASF
:
1445 case ARM_IWMMXT_wCon
:
1446 gen_op_iwmmxt_set_cup();
1448 case ARM_IWMMXT_wCSSF
:
1449 tmp
= iwmmxt_load_creg(wrd
);
1450 tmp2
= load_reg(s
, rd
);
1451 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1453 iwmmxt_store_creg(wrd
, tmp
);
1455 case ARM_IWMMXT_wCGR0
:
1456 case ARM_IWMMXT_wCGR1
:
1457 case ARM_IWMMXT_wCGR2
:
1458 case ARM_IWMMXT_wCGR3
:
1459 gen_op_iwmmxt_set_cup();
1460 tmp
= load_reg(s
, rd
);
1461 iwmmxt_store_creg(wrd
, tmp
);
1467 case 0x100: /* WXOR */
1468 wrd
= (insn
>> 12) & 0xf;
1469 rd0
= (insn
>> 0) & 0xf;
1470 rd1
= (insn
>> 16) & 0xf;
1471 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1472 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1473 gen_op_iwmmxt_setpsr_nz();
1474 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1475 gen_op_iwmmxt_set_mup();
1476 gen_op_iwmmxt_set_cup();
1478 case 0x111: /* TMRC */
1481 rd
= (insn
>> 12) & 0xf;
1482 wrd
= (insn
>> 16) & 0xf;
1483 tmp
= iwmmxt_load_creg(wrd
);
1484 store_reg(s
, rd
, tmp
);
1486 case 0x300: /* WANDN */
1487 wrd
= (insn
>> 12) & 0xf;
1488 rd0
= (insn
>> 0) & 0xf;
1489 rd1
= (insn
>> 16) & 0xf;
1490 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1491 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1492 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1493 gen_op_iwmmxt_setpsr_nz();
1494 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1495 gen_op_iwmmxt_set_mup();
1496 gen_op_iwmmxt_set_cup();
1498 case 0x200: /* WAND */
1499 wrd
= (insn
>> 12) & 0xf;
1500 rd0
= (insn
>> 0) & 0xf;
1501 rd1
= (insn
>> 16) & 0xf;
1502 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1503 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1504 gen_op_iwmmxt_setpsr_nz();
1505 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1506 gen_op_iwmmxt_set_mup();
1507 gen_op_iwmmxt_set_cup();
1509 case 0x810: case 0xa10: /* WMADD */
1510 wrd
= (insn
>> 12) & 0xf;
1511 rd0
= (insn
>> 0) & 0xf;
1512 rd1
= (insn
>> 16) & 0xf;
1513 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1514 if (insn
& (1 << 21))
1515 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1517 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1518 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1519 gen_op_iwmmxt_set_mup();
1521 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1522 wrd
= (insn
>> 12) & 0xf;
1523 rd0
= (insn
>> 16) & 0xf;
1524 rd1
= (insn
>> 0) & 0xf;
1525 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1526 switch ((insn
>> 22) & 3) {
1528 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1531 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1534 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1539 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1540 gen_op_iwmmxt_set_mup();
1541 gen_op_iwmmxt_set_cup();
1543 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1544 wrd
= (insn
>> 12) & 0xf;
1545 rd0
= (insn
>> 16) & 0xf;
1546 rd1
= (insn
>> 0) & 0xf;
1547 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1548 switch ((insn
>> 22) & 3) {
1550 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1553 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1556 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1561 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1562 gen_op_iwmmxt_set_mup();
1563 gen_op_iwmmxt_set_cup();
1565 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1566 wrd
= (insn
>> 12) & 0xf;
1567 rd0
= (insn
>> 16) & 0xf;
1568 rd1
= (insn
>> 0) & 0xf;
1569 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1570 if (insn
& (1 << 22))
1571 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1573 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1574 if (!(insn
& (1 << 20)))
1575 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1576 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1577 gen_op_iwmmxt_set_mup();
1579 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1580 wrd
= (insn
>> 12) & 0xf;
1581 rd0
= (insn
>> 16) & 0xf;
1582 rd1
= (insn
>> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1584 if (insn
& (1 << 21)) {
1585 if (insn
& (1 << 20))
1586 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1588 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1590 if (insn
& (1 << 20))
1591 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1593 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1595 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1596 gen_op_iwmmxt_set_mup();
1598 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1599 wrd
= (insn
>> 12) & 0xf;
1600 rd0
= (insn
>> 16) & 0xf;
1601 rd1
= (insn
>> 0) & 0xf;
1602 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1603 if (insn
& (1 << 21))
1604 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1606 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1607 if (!(insn
& (1 << 20))) {
1608 iwmmxt_load_reg(cpu_V1
, wrd
);
1609 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1611 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1612 gen_op_iwmmxt_set_mup();
1614 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1615 wrd
= (insn
>> 12) & 0xf;
1616 rd0
= (insn
>> 16) & 0xf;
1617 rd1
= (insn
>> 0) & 0xf;
1618 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1619 switch ((insn
>> 22) & 3) {
1621 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1624 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1627 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1632 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1633 gen_op_iwmmxt_set_mup();
1634 gen_op_iwmmxt_set_cup();
1636 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1637 wrd
= (insn
>> 12) & 0xf;
1638 rd0
= (insn
>> 16) & 0xf;
1639 rd1
= (insn
>> 0) & 0xf;
1640 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1641 if (insn
& (1 << 22)) {
1642 if (insn
& (1 << 20))
1643 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1645 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1647 if (insn
& (1 << 20))
1648 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1650 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1652 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1656 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1657 wrd
= (insn
>> 12) & 0xf;
1658 rd0
= (insn
>> 16) & 0xf;
1659 rd1
= (insn
>> 0) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1661 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1662 tcg_gen_andi_i32(tmp
, tmp
, 7);
1663 iwmmxt_load_reg(cpu_V1
, rd1
);
1664 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1666 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1667 gen_op_iwmmxt_set_mup();
1669 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1670 if (((insn
>> 6) & 3) == 3)
1672 rd
= (insn
>> 12) & 0xf;
1673 wrd
= (insn
>> 16) & 0xf;
1674 tmp
= load_reg(s
, rd
);
1675 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1676 switch ((insn
>> 6) & 3) {
1678 tmp2
= tcg_const_i32(0xff);
1679 tmp3
= tcg_const_i32((insn
& 7) << 3);
1682 tmp2
= tcg_const_i32(0xffff);
1683 tmp3
= tcg_const_i32((insn
& 3) << 4);
1686 tmp2
= tcg_const_i32(0xffffffff);
1687 tmp3
= tcg_const_i32((insn
& 1) << 5);
1693 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1694 tcg_temp_free(tmp3
);
1695 tcg_temp_free(tmp2
);
1697 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1698 gen_op_iwmmxt_set_mup();
1700 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1701 rd
= (insn
>> 12) & 0xf;
1702 wrd
= (insn
>> 16) & 0xf;
1703 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1705 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1707 switch ((insn
>> 22) & 3) {
1709 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1710 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1712 tcg_gen_ext8s_i32(tmp
, tmp
);
1714 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1718 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1719 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1721 tcg_gen_ext16s_i32(tmp
, tmp
);
1723 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1727 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1728 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1731 store_reg(s
, rd
, tmp
);
1733 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1734 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1736 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1737 switch ((insn
>> 22) & 3) {
1739 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1742 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1745 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1748 tcg_gen_shli_i32(tmp
, tmp
, 28);
1752 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1753 if (((insn
>> 6) & 3) == 3)
1755 rd
= (insn
>> 12) & 0xf;
1756 wrd
= (insn
>> 16) & 0xf;
1757 tmp
= load_reg(s
, rd
);
1758 switch ((insn
>> 6) & 3) {
1760 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1763 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1766 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1770 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1771 gen_op_iwmmxt_set_mup();
1773 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1774 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1776 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1778 tcg_gen_mov_i32(tmp2
, tmp
);
1779 switch ((insn
>> 22) & 3) {
1781 for (i
= 0; i
< 7; i
++) {
1782 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1783 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1787 for (i
= 0; i
< 3; i
++) {
1788 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1789 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1793 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1794 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1801 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1802 wrd
= (insn
>> 12) & 0xf;
1803 rd0
= (insn
>> 16) & 0xf;
1804 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1805 switch ((insn
>> 22) & 3) {
1807 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1810 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1813 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1818 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1819 gen_op_iwmmxt_set_mup();
1821 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1822 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1824 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1826 tcg_gen_mov_i32(tmp2
, tmp
);
1827 switch ((insn
>> 22) & 3) {
1829 for (i
= 0; i
< 7; i
++) {
1830 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1831 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1835 for (i
= 0; i
< 3; i
++) {
1836 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1837 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1841 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1842 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1849 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1850 rd
= (insn
>> 12) & 0xf;
1851 rd0
= (insn
>> 16) & 0xf;
1852 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1854 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1856 switch ((insn
>> 22) & 3) {
1858 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1861 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1864 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1867 store_reg(s
, rd
, tmp
);
1869 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1870 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1871 wrd
= (insn
>> 12) & 0xf;
1872 rd0
= (insn
>> 16) & 0xf;
1873 rd1
= (insn
>> 0) & 0xf;
1874 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1875 switch ((insn
>> 22) & 3) {
1877 if (insn
& (1 << 21))
1878 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1880 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1883 if (insn
& (1 << 21))
1884 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1886 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1889 if (insn
& (1 << 21))
1890 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1892 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1897 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1898 gen_op_iwmmxt_set_mup();
1899 gen_op_iwmmxt_set_cup();
1901 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1902 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1903 wrd
= (insn
>> 12) & 0xf;
1904 rd0
= (insn
>> 16) & 0xf;
1905 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1906 switch ((insn
>> 22) & 3) {
1908 if (insn
& (1 << 21))
1909 gen_op_iwmmxt_unpacklsb_M0();
1911 gen_op_iwmmxt_unpacklub_M0();
1914 if (insn
& (1 << 21))
1915 gen_op_iwmmxt_unpacklsw_M0();
1917 gen_op_iwmmxt_unpackluw_M0();
1920 if (insn
& (1 << 21))
1921 gen_op_iwmmxt_unpacklsl_M0();
1923 gen_op_iwmmxt_unpacklul_M0();
1928 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1929 gen_op_iwmmxt_set_mup();
1930 gen_op_iwmmxt_set_cup();
1932 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1933 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1934 wrd
= (insn
>> 12) & 0xf;
1935 rd0
= (insn
>> 16) & 0xf;
1936 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1937 switch ((insn
>> 22) & 3) {
1939 if (insn
& (1 << 21))
1940 gen_op_iwmmxt_unpackhsb_M0();
1942 gen_op_iwmmxt_unpackhub_M0();
1945 if (insn
& (1 << 21))
1946 gen_op_iwmmxt_unpackhsw_M0();
1948 gen_op_iwmmxt_unpackhuw_M0();
1951 if (insn
& (1 << 21))
1952 gen_op_iwmmxt_unpackhsl_M0();
1954 gen_op_iwmmxt_unpackhul_M0();
1959 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1960 gen_op_iwmmxt_set_mup();
1961 gen_op_iwmmxt_set_cup();
1963 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1964 case 0x214: case 0x614: case 0xa14: case 0xe14:
1965 if (((insn
>> 22) & 3) == 0)
1967 wrd
= (insn
>> 12) & 0xf;
1968 rd0
= (insn
>> 16) & 0xf;
1969 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1971 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
1975 switch ((insn
>> 22) & 3) {
1977 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1980 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1983 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1987 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1988 gen_op_iwmmxt_set_mup();
1989 gen_op_iwmmxt_set_cup();
1991 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1992 case 0x014: case 0x414: case 0x814: case 0xc14:
1993 if (((insn
>> 22) & 3) == 0)
1995 wrd
= (insn
>> 12) & 0xf;
1996 rd0
= (insn
>> 16) & 0xf;
1997 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1999 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2003 switch ((insn
>> 22) & 3) {
2005 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2008 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2011 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2015 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2016 gen_op_iwmmxt_set_mup();
2017 gen_op_iwmmxt_set_cup();
2019 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2020 case 0x114: case 0x514: case 0x914: case 0xd14:
2021 if (((insn
>> 22) & 3) == 0)
2023 wrd
= (insn
>> 12) & 0xf;
2024 rd0
= (insn
>> 16) & 0xf;
2025 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2027 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2031 switch ((insn
>> 22) & 3) {
2033 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2036 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2039 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2043 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2044 gen_op_iwmmxt_set_mup();
2045 gen_op_iwmmxt_set_cup();
2047 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2048 case 0x314: case 0x714: case 0xb14: case 0xf14:
2049 if (((insn
>> 22) & 3) == 0)
2051 wrd
= (insn
>> 12) & 0xf;
2052 rd0
= (insn
>> 16) & 0xf;
2053 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2055 switch ((insn
>> 22) & 3) {
2057 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2061 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2064 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2068 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2071 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2075 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2079 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2083 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2084 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2085 wrd
= (insn
>> 12) & 0xf;
2086 rd0
= (insn
>> 16) & 0xf;
2087 rd1
= (insn
>> 0) & 0xf;
2088 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2089 switch ((insn
>> 22) & 3) {
2091 if (insn
& (1 << 21))
2092 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2094 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2097 if (insn
& (1 << 21))
2098 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2100 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2103 if (insn
& (1 << 21))
2104 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2106 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2111 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2112 gen_op_iwmmxt_set_mup();
2114 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2115 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2116 wrd
= (insn
>> 12) & 0xf;
2117 rd0
= (insn
>> 16) & 0xf;
2118 rd1
= (insn
>> 0) & 0xf;
2119 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2120 switch ((insn
>> 22) & 3) {
2122 if (insn
& (1 << 21))
2123 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2125 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2128 if (insn
& (1 << 21))
2129 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2131 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2134 if (insn
& (1 << 21))
2135 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2137 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2142 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2143 gen_op_iwmmxt_set_mup();
2145 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2146 case 0x402: case 0x502: case 0x602: case 0x702:
2147 wrd
= (insn
>> 12) & 0xf;
2148 rd0
= (insn
>> 16) & 0xf;
2149 rd1
= (insn
>> 0) & 0xf;
2150 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2151 tmp
= tcg_const_i32((insn
>> 20) & 3);
2152 iwmmxt_load_reg(cpu_V1
, rd1
);
2153 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2155 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2156 gen_op_iwmmxt_set_mup();
2158 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2159 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2160 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2161 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2162 wrd
= (insn
>> 12) & 0xf;
2163 rd0
= (insn
>> 16) & 0xf;
2164 rd1
= (insn
>> 0) & 0xf;
2165 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2166 switch ((insn
>> 20) & 0xf) {
2168 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2171 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2174 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2177 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2180 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2183 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2186 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2189 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2192 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2197 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2198 gen_op_iwmmxt_set_mup();
2199 gen_op_iwmmxt_set_cup();
2201 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2202 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2203 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2204 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2205 wrd
= (insn
>> 12) & 0xf;
2206 rd0
= (insn
>> 16) & 0xf;
2207 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2208 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2209 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2211 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2212 gen_op_iwmmxt_set_mup();
2213 gen_op_iwmmxt_set_cup();
2215 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2216 case 0x418: case 0x518: case 0x618: case 0x718:
2217 case 0x818: case 0x918: case 0xa18: case 0xb18:
2218 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2219 wrd
= (insn
>> 12) & 0xf;
2220 rd0
= (insn
>> 16) & 0xf;
2221 rd1
= (insn
>> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2223 switch ((insn
>> 20) & 0xf) {
2225 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2228 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2231 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2234 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2237 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2240 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2243 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2246 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2249 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2254 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2255 gen_op_iwmmxt_set_mup();
2256 gen_op_iwmmxt_set_cup();
2258 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2259 case 0x408: case 0x508: case 0x608: case 0x708:
2260 case 0x808: case 0x908: case 0xa08: case 0xb08:
2261 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2262 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2264 wrd
= (insn
>> 12) & 0xf;
2265 rd0
= (insn
>> 16) & 0xf;
2266 rd1
= (insn
>> 0) & 0xf;
2267 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2268 switch ((insn
>> 22) & 3) {
2270 if (insn
& (1 << 21))
2271 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2273 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2276 if (insn
& (1 << 21))
2277 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2279 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2282 if (insn
& (1 << 21))
2283 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2285 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2288 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2289 gen_op_iwmmxt_set_mup();
2290 gen_op_iwmmxt_set_cup();
2292 case 0x201: case 0x203: case 0x205: case 0x207:
2293 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2294 case 0x211: case 0x213: case 0x215: case 0x217:
2295 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2296 wrd
= (insn
>> 5) & 0xf;
2297 rd0
= (insn
>> 12) & 0xf;
2298 rd1
= (insn
>> 0) & 0xf;
2299 if (rd0
== 0xf || rd1
== 0xf)
2301 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2302 tmp
= load_reg(s
, rd0
);
2303 tmp2
= load_reg(s
, rd1
);
2304 switch ((insn
>> 16) & 0xf) {
2305 case 0x0: /* TMIA */
2306 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2308 case 0x8: /* TMIAPH */
2309 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2311 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2312 if (insn
& (1 << 16))
2313 tcg_gen_shri_i32(tmp
, tmp
, 16);
2314 if (insn
& (1 << 17))
2315 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2316 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2325 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2326 gen_op_iwmmxt_set_mup();
2335 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2336 (ie. an undefined instruction). */
2337 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2339 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2342 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2343 /* Multiply with Internal Accumulate Format */
2344 rd0
= (insn
>> 12) & 0xf;
2346 acc
= (insn
>> 5) & 7;
2351 tmp
= load_reg(s
, rd0
);
2352 tmp2
= load_reg(s
, rd1
);
2353 switch ((insn
>> 16) & 0xf) {
2355 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2357 case 0x8: /* MIAPH */
2358 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2360 case 0xc: /* MIABB */
2361 case 0xd: /* MIABT */
2362 case 0xe: /* MIATB */
2363 case 0xf: /* MIATT */
2364 if (insn
& (1 << 16))
2365 tcg_gen_shri_i32(tmp
, tmp
, 16);
2366 if (insn
& (1 << 17))
2367 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2368 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2376 gen_op_iwmmxt_movq_wRn_M0(acc
);
2380 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2381 /* Internal Accumulator Access Format */
2382 rdhi
= (insn
>> 16) & 0xf;
2383 rdlo
= (insn
>> 12) & 0xf;
2389 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2390 iwmmxt_load_reg(cpu_V0
, acc
);
2391 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2392 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2393 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2394 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2396 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2397 iwmmxt_store_reg(cpu_V0
, acc
);
2405 /* Disassemble system coprocessor instruction. Return nonzero if
2406 instruction is not defined. */
2407 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2410 uint32_t rd
= (insn
>> 12) & 0xf;
2411 uint32_t cp
= (insn
>> 8) & 0xf;
2416 if (insn
& ARM_CP_RW_BIT
) {
2417 if (!env
->cp
[cp
].cp_read
)
2419 gen_set_pc_im(s
->pc
);
2421 tmp2
= tcg_const_i32(insn
);
2422 gen_helper_get_cp(tmp
, cpu_env
, tmp2
);
2423 tcg_temp_free(tmp2
);
2424 store_reg(s
, rd
, tmp
);
2426 if (!env
->cp
[cp
].cp_write
)
2428 gen_set_pc_im(s
->pc
);
2429 tmp
= load_reg(s
, rd
);
2430 tmp2
= tcg_const_i32(insn
);
2431 gen_helper_set_cp(cpu_env
, tmp2
, tmp
);
2432 tcg_temp_free(tmp2
);
2438 static int cp15_user_ok(uint32_t insn
)
2440 int cpn
= (insn
>> 16) & 0xf;
2441 int cpm
= insn
& 0xf;
2442 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2444 if (cpn
== 13 && cpm
== 0) {
2446 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2450 /* ISB, DSB, DMB. */
2451 if ((cpm
== 5 && op
== 4)
2452 || (cpm
== 10 && (op
== 4 || op
== 5)))
2458 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2459 instruction is not defined. */
2460 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2465 /* M profile cores use memory mapped registers instead of cp15. */
2466 if (arm_feature(env
, ARM_FEATURE_M
))
2469 if ((insn
& (1 << 25)) == 0) {
2470 if (insn
& (1 << 20)) {
2474 /* mcrr. Used for block cache operations, so implement as no-op. */
2477 if ((insn
& (1 << 4)) == 0) {
2481 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
2484 if ((insn
& 0x0fff0fff) == 0x0e070f90
2485 || (insn
& 0x0fff0fff) == 0x0e070f58) {
2486 /* Wait for interrupt. */
2487 gen_set_pc_im(s
->pc
);
2488 s
->is_jmp
= DISAS_WFI
;
2491 rd
= (insn
>> 12) & 0xf;
2492 tmp2
= tcg_const_i32(insn
);
2493 if (insn
& ARM_CP_RW_BIT
) {
2495 gen_helper_get_cp15(tmp
, cpu_env
, tmp2
);
2496 /* If the destination register is r15 then sets condition codes. */
2498 store_reg(s
, rd
, tmp
);
2502 tmp
= load_reg(s
, rd
);
2503 gen_helper_set_cp15(cpu_env
, tmp2
, tmp
);
2505 /* Normally we would always end the TB here, but Linux
2506 * arch/arm/mach-pxa/sleep.S expects two instructions following
2507 * an MMU enable to execute from cache. Imitate this behaviour. */
2508 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2509 (insn
& 0x0fff0fff) != 0x0e010f10)
2512 tcg_temp_free_i32(tmp2
);
2516 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2517 #define VFP_SREG(insn, bigbit, smallbit) \
2518 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2519 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2520 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2521 reg = (((insn) >> (bigbit)) & 0x0f) \
2522 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2524 if (insn & (1 << (smallbit))) \
2526 reg = ((insn) >> (bigbit)) & 0x0f; \
2529 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2530 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2531 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2532 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2533 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2534 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2536 /* Move between integer and VFP cores. */
2537 static TCGv
gen_vfp_mrs(void)
2539 TCGv tmp
= new_tmp();
2540 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2544 static void gen_vfp_msr(TCGv tmp
)
2546 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2551 vfp_enabled(CPUState
* env
)
2553 return ((env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) != 0);
2556 static void gen_neon_dup_u8(TCGv var
, int shift
)
2558 TCGv tmp
= new_tmp();
2560 tcg_gen_shri_i32(var
, var
, shift
);
2561 tcg_gen_ext8u_i32(var
, var
);
2562 tcg_gen_shli_i32(tmp
, var
, 8);
2563 tcg_gen_or_i32(var
, var
, tmp
);
2564 tcg_gen_shli_i32(tmp
, var
, 16);
2565 tcg_gen_or_i32(var
, var
, tmp
);
2569 static void gen_neon_dup_low16(TCGv var
)
2571 TCGv tmp
= new_tmp();
2572 tcg_gen_ext16u_i32(var
, var
);
2573 tcg_gen_shli_i32(tmp
, var
, 16);
2574 tcg_gen_or_i32(var
, var
, tmp
);
2578 static void gen_neon_dup_high16(TCGv var
)
2580 TCGv tmp
= new_tmp();
2581 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2582 tcg_gen_shri_i32(tmp
, var
, 16);
2583 tcg_gen_or_i32(var
, var
, tmp
);
2587 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2588 (ie. an undefined instruction). */
2589 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2591 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2597 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2600 if (!vfp_enabled(env
)) {
2601 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2602 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2604 rn
= (insn
>> 16) & 0xf;
2605 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2606 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2609 dp
= ((insn
& 0xf00) == 0xb00);
2610 switch ((insn
>> 24) & 0xf) {
2612 if (insn
& (1 << 4)) {
2613 /* single register transfer */
2614 rd
= (insn
>> 12) & 0xf;
2619 VFP_DREG_N(rn
, insn
);
2622 if (insn
& 0x00c00060
2623 && !arm_feature(env
, ARM_FEATURE_NEON
))
2626 pass
= (insn
>> 21) & 1;
2627 if (insn
& (1 << 22)) {
2629 offset
= ((insn
>> 5) & 3) * 8;
2630 } else if (insn
& (1 << 5)) {
2632 offset
= (insn
& (1 << 6)) ? 16 : 0;
2637 if (insn
& ARM_CP_RW_BIT
) {
2639 tmp
= neon_load_reg(rn
, pass
);
2643 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2644 if (insn
& (1 << 23))
2650 if (insn
& (1 << 23)) {
2652 tcg_gen_shri_i32(tmp
, tmp
, 16);
2658 tcg_gen_sari_i32(tmp
, tmp
, 16);
2667 store_reg(s
, rd
, tmp
);
2670 tmp
= load_reg(s
, rd
);
2671 if (insn
& (1 << 23)) {
2674 gen_neon_dup_u8(tmp
, 0);
2675 } else if (size
== 1) {
2676 gen_neon_dup_low16(tmp
);
2678 for (n
= 0; n
<= pass
* 2; n
++) {
2680 tcg_gen_mov_i32(tmp2
, tmp
);
2681 neon_store_reg(rn
, n
, tmp2
);
2683 neon_store_reg(rn
, n
, tmp
);
2688 tmp2
= neon_load_reg(rn
, pass
);
2689 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2693 tmp2
= neon_load_reg(rn
, pass
);
2694 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2700 neon_store_reg(rn
, pass
, tmp
);
2704 if ((insn
& 0x6f) != 0x00)
2706 rn
= VFP_SREG_N(insn
);
2707 if (insn
& ARM_CP_RW_BIT
) {
2709 if (insn
& (1 << 21)) {
2710 /* system register */
2715 /* VFP2 allows access to FSID from userspace.
2716 VFP3 restricts all id registers to privileged
2719 && arm_feature(env
, ARM_FEATURE_VFP3
))
2721 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2726 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2728 case ARM_VFP_FPINST
:
2729 case ARM_VFP_FPINST2
:
2730 /* Not present in VFP3. */
2732 || arm_feature(env
, ARM_FEATURE_VFP3
))
2734 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2738 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2739 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2742 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2748 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2750 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2756 gen_mov_F0_vreg(0, rn
);
2757 tmp
= gen_vfp_mrs();
2760 /* Set the 4 flag bits in the CPSR. */
2764 store_reg(s
, rd
, tmp
);
2768 tmp
= load_reg(s
, rd
);
2769 if (insn
& (1 << 21)) {
2771 /* system register */
2776 /* Writes are ignored. */
2779 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2786 /* TODO: VFP subarchitecture support.
2787 * For now, keep the EN bit only */
2788 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2789 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2792 case ARM_VFP_FPINST
:
2793 case ARM_VFP_FPINST2
:
2794 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2801 gen_mov_vreg_F0(0, rn
);
2806 /* data processing */
2807 /* The opcode is in bits 23, 21, 20 and 6. */
2808 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2812 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2814 /* rn is register number */
2815 VFP_DREG_N(rn
, insn
);
2818 if (op
== 15 && (rn
== 15 || rn
> 17)) {
2819 /* Integer or single precision destination. */
2820 rd
= VFP_SREG_D(insn
);
2822 VFP_DREG_D(rd
, insn
);
2825 if (op
== 15 && (rn
== 16 || rn
== 17)) {
2826 /* Integer source. */
2827 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
2829 VFP_DREG_M(rm
, insn
);
2832 rn
= VFP_SREG_N(insn
);
2833 if (op
== 15 && rn
== 15) {
2834 /* Double precision destination. */
2835 VFP_DREG_D(rd
, insn
);
2837 rd
= VFP_SREG_D(insn
);
2839 rm
= VFP_SREG_M(insn
);
2842 veclen
= env
->vfp
.vec_len
;
2843 if (op
== 15 && rn
> 3)
2846 /* Shut up compiler warnings. */
2857 /* Figure out what type of vector operation this is. */
2858 if ((rd
& bank_mask
) == 0) {
2863 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
2865 delta_d
= env
->vfp
.vec_stride
+ 1;
2867 if ((rm
& bank_mask
) == 0) {
2868 /* mixed scalar/vector */
2877 /* Load the initial operands. */
2882 /* Integer source */
2883 gen_mov_F0_vreg(0, rm
);
2888 gen_mov_F0_vreg(dp
, rd
);
2889 gen_mov_F1_vreg(dp
, rm
);
2893 /* Compare with zero */
2894 gen_mov_F0_vreg(dp
, rd
);
2905 /* Source and destination the same. */
2906 gen_mov_F0_vreg(dp
, rd
);
2909 /* One source operand. */
2910 gen_mov_F0_vreg(dp
, rm
);
2914 /* Two source operands. */
2915 gen_mov_F0_vreg(dp
, rn
);
2916 gen_mov_F1_vreg(dp
, rm
);
2920 /* Perform the calculation. */
2922 case 0: /* mac: fd + (fn * fm) */
2924 gen_mov_F1_vreg(dp
, rd
);
2927 case 1: /* nmac: fd - (fn * fm) */
2930 gen_mov_F1_vreg(dp
, rd
);
2933 case 2: /* msc: -fd + (fn * fm) */
2935 gen_mov_F1_vreg(dp
, rd
);
2938 case 3: /* nmsc: -fd - (fn * fm) */
2941 gen_mov_F1_vreg(dp
, rd
);
2944 case 4: /* mul: fn * fm */
2947 case 5: /* nmul: -(fn * fm) */
2951 case 6: /* add: fn + fm */
2954 case 7: /* sub: fn - fm */
2957 case 8: /* div: fn / fm */
2960 case 14: /* fconst */
2961 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2964 n
= (insn
<< 12) & 0x80000000;
2965 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
2972 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
2979 tcg_gen_movi_i32(cpu_F0s
, n
);
2982 case 15: /* extension space */
2996 case 4: /* vcvtb.f32.f16 */
2997 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
2999 tmp
= gen_vfp_mrs();
3000 tcg_gen_ext16u_i32(tmp
, tmp
);
3001 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3004 case 5: /* vcvtt.f32.f16 */
3005 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3007 tmp
= gen_vfp_mrs();
3008 tcg_gen_shri_i32(tmp
, tmp
, 16);
3009 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3012 case 6: /* vcvtb.f16.f32 */
3013 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3016 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3017 gen_mov_F0_vreg(0, rd
);
3018 tmp2
= gen_vfp_mrs();
3019 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3020 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3024 case 7: /* vcvtt.f16.f32 */
3025 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3028 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3029 tcg_gen_shli_i32(tmp
, tmp
, 16);
3030 gen_mov_F0_vreg(0, rd
);
3031 tmp2
= gen_vfp_mrs();
3032 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3033 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3046 case 11: /* cmpez */
3050 case 15: /* single<->double conversion */
3052 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3054 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3056 case 16: /* fuito */
3059 case 17: /* fsito */
3062 case 20: /* fshto */
3063 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3065 gen_vfp_shto(dp
, 16 - rm
);
3067 case 21: /* fslto */
3068 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3070 gen_vfp_slto(dp
, 32 - rm
);
3072 case 22: /* fuhto */
3073 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3075 gen_vfp_uhto(dp
, 16 - rm
);
3077 case 23: /* fulto */
3078 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3080 gen_vfp_ulto(dp
, 32 - rm
);
3082 case 24: /* ftoui */
3085 case 25: /* ftouiz */
3088 case 26: /* ftosi */
3091 case 27: /* ftosiz */
3094 case 28: /* ftosh */
3095 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3097 gen_vfp_tosh(dp
, 16 - rm
);
3099 case 29: /* ftosl */
3100 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3102 gen_vfp_tosl(dp
, 32 - rm
);
3104 case 30: /* ftouh */
3105 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3107 gen_vfp_touh(dp
, 16 - rm
);
3109 case 31: /* ftoul */
3110 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3112 gen_vfp_toul(dp
, 32 - rm
);
3114 default: /* undefined */
3115 printf ("rn:%d\n", rn
);
3119 default: /* undefined */
3120 printf ("op:%d\n", op
);
3124 /* Write back the result. */
3125 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3126 ; /* Comparison, do nothing. */
3127 else if (op
== 15 && rn
> 17)
3128 /* Integer result. */
3129 gen_mov_vreg_F0(0, rd
);
3130 else if (op
== 15 && rn
== 15)
3132 gen_mov_vreg_F0(!dp
, rd
);
3134 gen_mov_vreg_F0(dp
, rd
);
3136 /* break out of the loop if we have finished */
3140 if (op
== 15 && delta_m
== 0) {
3141 /* single source one-many */
3143 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3145 gen_mov_vreg_F0(dp
, rd
);
3149 /* Setup the next operands. */
3151 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3155 /* One source operand. */
3156 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3158 gen_mov_F0_vreg(dp
, rm
);
3160 /* Two source operands. */
3161 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3163 gen_mov_F0_vreg(dp
, rn
);
3165 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3167 gen_mov_F1_vreg(dp
, rm
);
3175 if (dp
&& (insn
& 0x03e00000) == 0x00400000) {
3176 /* two-register transfer */
3177 rn
= (insn
>> 16) & 0xf;
3178 rd
= (insn
>> 12) & 0xf;
3180 VFP_DREG_M(rm
, insn
);
3182 rm
= VFP_SREG_M(insn
);
3185 if (insn
& ARM_CP_RW_BIT
) {
3188 gen_mov_F0_vreg(0, rm
* 2);
3189 tmp
= gen_vfp_mrs();
3190 store_reg(s
, rd
, tmp
);
3191 gen_mov_F0_vreg(0, rm
* 2 + 1);
3192 tmp
= gen_vfp_mrs();
3193 store_reg(s
, rn
, tmp
);
3195 gen_mov_F0_vreg(0, rm
);
3196 tmp
= gen_vfp_mrs();
3197 store_reg(s
, rn
, tmp
);
3198 gen_mov_F0_vreg(0, rm
+ 1);
3199 tmp
= gen_vfp_mrs();
3200 store_reg(s
, rd
, tmp
);
3205 tmp
= load_reg(s
, rd
);
3207 gen_mov_vreg_F0(0, rm
* 2);
3208 tmp
= load_reg(s
, rn
);
3210 gen_mov_vreg_F0(0, rm
* 2 + 1);
3212 tmp
= load_reg(s
, rn
);
3214 gen_mov_vreg_F0(0, rm
);
3215 tmp
= load_reg(s
, rd
);
3217 gen_mov_vreg_F0(0, rm
+ 1);
3222 rn
= (insn
>> 16) & 0xf;
3224 VFP_DREG_D(rd
, insn
);
3226 rd
= VFP_SREG_D(insn
);
3227 if (s
->thumb
&& rn
== 15) {
3229 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3231 addr
= load_reg(s
, rn
);
3233 if ((insn
& 0x01200000) == 0x01000000) {
3234 /* Single load/store */
3235 offset
= (insn
& 0xff) << 2;
3236 if ((insn
& (1 << 23)) == 0)
3238 tcg_gen_addi_i32(addr
, addr
, offset
);
3239 if (insn
& (1 << 20)) {
3240 gen_vfp_ld(s
, dp
, addr
);
3241 gen_mov_vreg_F0(dp
, rd
);
3243 gen_mov_F0_vreg(dp
, rd
);
3244 gen_vfp_st(s
, dp
, addr
);
3248 /* load/store multiple */
3250 n
= (insn
>> 1) & 0x7f;
3254 if (insn
& (1 << 24)) /* pre-decrement */
3255 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3261 for (i
= 0; i
< n
; i
++) {
3262 if (insn
& ARM_CP_RW_BIT
) {
3264 gen_vfp_ld(s
, dp
, addr
);
3265 gen_mov_vreg_F0(dp
, rd
+ i
);
3268 gen_mov_F0_vreg(dp
, rd
+ i
);
3269 gen_vfp_st(s
, dp
, addr
);
3271 tcg_gen_addi_i32(addr
, addr
, offset
);
3273 if (insn
& (1 << 21)) {
3275 if (insn
& (1 << 24))
3276 offset
= -offset
* n
;
3277 else if (dp
&& (insn
& 1))
3283 tcg_gen_addi_i32(addr
, addr
, offset
);
3284 store_reg(s
, rn
, addr
);
3292 /* Should never happen. */
3298 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3300 TranslationBlock
*tb
;
3303 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3305 gen_set_pc_im(dest
);
3306 tcg_gen_exit_tb((long)tb
+ n
);
3308 gen_set_pc_im(dest
);
3313 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3315 if (unlikely(s
->singlestep_enabled
)) {
3316 /* An indirect jump so that we still trigger the debug exception. */
3321 gen_goto_tb(s
, 0, dest
);
3322 s
->is_jmp
= DISAS_TB_JUMP
;
3326 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3329 tcg_gen_sari_i32(t0
, t0
, 16);
3333 tcg_gen_sari_i32(t1
, t1
, 16);
3336 tcg_gen_mul_i32(t0
, t0
, t1
);
3339 /* Return the mask of PSR bits set by a MSR instruction. */
3340 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3344 if (flags
& (1 << 0))
3346 if (flags
& (1 << 1))
3348 if (flags
& (1 << 2))
3350 if (flags
& (1 << 3))
3353 /* Mask out undefined bits. */
3354 mask
&= ~CPSR_RESERVED
;
3355 if (!arm_feature(env
, ARM_FEATURE_V6
))
3356 mask
&= ~(CPSR_E
| CPSR_GE
);
3357 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3359 /* Mask out execution state bits. */
3362 /* Mask out privileged bits. */
3368 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3369 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3373 /* ??? This is also undefined in system mode. */
3377 tmp
= load_cpu_field(spsr
);
3378 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3379 tcg_gen_andi_i32(t0
, t0
, mask
);
3380 tcg_gen_or_i32(tmp
, tmp
, t0
);
3381 store_cpu_field(tmp
, spsr
);
3383 gen_set_cpsr(t0
, mask
);
3390 /* Returns nonzero if access to the PSR is not permitted. */
3391 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3395 tcg_gen_movi_i32(tmp
, val
);
3396 return gen_set_psr(s
, mask
, spsr
, tmp
);
3399 /* Generate an old-style exception return. Marks pc as dead. */
3400 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3403 store_reg(s
, 15, pc
);
3404 tmp
= load_cpu_field(spsr
);
3405 gen_set_cpsr(tmp
, 0xffffffff);
3407 s
->is_jmp
= DISAS_UPDATE
;
3410 /* Generate a v6 exception return. Marks both values as dead. */
3411 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3413 gen_set_cpsr(cpsr
, 0xffffffff);
3415 store_reg(s
, 15, pc
);
3416 s
->is_jmp
= DISAS_UPDATE
;
3420 gen_set_condexec (DisasContext
*s
)
3422 if (s
->condexec_mask
) {
3423 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3424 TCGv tmp
= new_tmp();
3425 tcg_gen_movi_i32(tmp
, val
);
3426 store_cpu_field(tmp
, condexec_bits
);
3430 static void gen_nop_hint(DisasContext
*s
, int val
)
3434 gen_set_pc_im(s
->pc
);
3435 s
->is_jmp
= DISAS_WFI
;
3439 /* TODO: Implement SEV and WFE. May help SMP performance. */
3445 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3447 static inline int gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3450 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3451 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3452 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3458 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3461 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3462 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3463 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3468 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3469 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3470 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3471 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3472 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3474 /* FIXME: This is wrong. They set the wrong overflow bit. */
3475 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3476 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3477 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3478 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3480 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3481 switch ((size << 1) | u) { \
3483 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3486 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3489 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3492 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3495 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3498 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3500 default: return 1; \
3503 #define GEN_NEON_INTEGER_OP(name) do { \
3504 switch ((size << 1) | u) { \
3506 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3509 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3512 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3515 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3518 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3521 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3523 default: return 1; \
3526 static TCGv
neon_load_scratch(int scratch
)
3528 TCGv tmp
= new_tmp();
3529 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3533 static void neon_store_scratch(int scratch
, TCGv var
)
3535 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3539 static inline TCGv
neon_get_scalar(int size
, int reg
)
3543 tmp
= neon_load_reg(reg
>> 1, reg
& 1);
3545 tmp
= neon_load_reg(reg
>> 2, (reg
>> 1) & 1);
3547 gen_neon_dup_low16(tmp
);
3549 gen_neon_dup_high16(tmp
);
3555 static void gen_neon_unzip_u8(TCGv t0
, TCGv t1
)
3563 tcg_gen_andi_i32(rd
, t0
, 0xff);
3564 tcg_gen_shri_i32(tmp
, t0
, 8);
3565 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3566 tcg_gen_or_i32(rd
, rd
, tmp
);
3567 tcg_gen_shli_i32(tmp
, t1
, 16);
3568 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3569 tcg_gen_or_i32(rd
, rd
, tmp
);
3570 tcg_gen_shli_i32(tmp
, t1
, 8);
3571 tcg_gen_andi_i32(tmp
, tmp
, 0xff000000);
3572 tcg_gen_or_i32(rd
, rd
, tmp
);
3574 tcg_gen_shri_i32(rm
, t0
, 8);
3575 tcg_gen_andi_i32(rm
, rm
, 0xff);
3576 tcg_gen_shri_i32(tmp
, t0
, 16);
3577 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3578 tcg_gen_or_i32(rm
, rm
, tmp
);
3579 tcg_gen_shli_i32(tmp
, t1
, 8);
3580 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3581 tcg_gen_or_i32(rm
, rm
, tmp
);
3582 tcg_gen_andi_i32(tmp
, t1
, 0xff000000);
3583 tcg_gen_or_i32(t1
, rm
, tmp
);
3584 tcg_gen_mov_i32(t0
, rd
);
3591 static void gen_neon_zip_u8(TCGv t0
, TCGv t1
)
3599 tcg_gen_andi_i32(rd
, t0
, 0xff);
3600 tcg_gen_shli_i32(tmp
, t1
, 8);
3601 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3602 tcg_gen_or_i32(rd
, rd
, tmp
);
3603 tcg_gen_shli_i32(tmp
, t0
, 16);
3604 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3605 tcg_gen_or_i32(rd
, rd
, tmp
);
3606 tcg_gen_shli_i32(tmp
, t1
, 24);
3607 tcg_gen_andi_i32(tmp
, tmp
, 0xff000000);
3608 tcg_gen_or_i32(rd
, rd
, tmp
);
3610 tcg_gen_andi_i32(rm
, t1
, 0xff000000);
3611 tcg_gen_shri_i32(tmp
, t0
, 8);
3612 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3613 tcg_gen_or_i32(rm
, rm
, tmp
);
3614 tcg_gen_shri_i32(tmp
, t1
, 8);
3615 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3616 tcg_gen_or_i32(rm
, rm
, tmp
);
3617 tcg_gen_shri_i32(tmp
, t0
, 16);
3618 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
3619 tcg_gen_or_i32(t1
, rm
, tmp
);
3620 tcg_gen_mov_i32(t0
, rd
);
3627 static void gen_neon_zip_u16(TCGv t0
, TCGv t1
)
3634 tcg_gen_andi_i32(tmp
, t0
, 0xffff);
3635 tcg_gen_shli_i32(tmp2
, t1
, 16);
3636 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3637 tcg_gen_andi_i32(t1
, t1
, 0xffff0000);
3638 tcg_gen_shri_i32(tmp2
, t0
, 16);
3639 tcg_gen_or_i32(t1
, t1
, tmp2
);
3640 tcg_gen_mov_i32(t0
, tmp
);
3646 static void gen_neon_unzip(int reg
, int q
, int tmp
, int size
)
3651 for (n
= 0; n
< q
+ 1; n
+= 2) {
3652 t0
= neon_load_reg(reg
, n
);
3653 t1
= neon_load_reg(reg
, n
+ 1);
3655 case 0: gen_neon_unzip_u8(t0
, t1
); break;
3656 case 1: gen_neon_zip_u16(t0
, t1
); break; /* zip and unzip are the same. */
3657 case 2: /* no-op */; break;
3660 neon_store_scratch(tmp
+ n
, t0
);
3661 neon_store_scratch(tmp
+ n
+ 1, t1
);
3665 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3672 tcg_gen_shli_i32(rd
, t0
, 8);
3673 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3674 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3675 tcg_gen_or_i32(rd
, rd
, tmp
);
3677 tcg_gen_shri_i32(t1
, t1
, 8);
3678 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3679 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3680 tcg_gen_or_i32(t1
, t1
, tmp
);
3681 tcg_gen_mov_i32(t0
, rd
);
3687 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3694 tcg_gen_shli_i32(rd
, t0
, 16);
3695 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3696 tcg_gen_or_i32(rd
, rd
, tmp
);
3697 tcg_gen_shri_i32(t1
, t1
, 16);
3698 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3699 tcg_gen_or_i32(t1
, t1
, tmp
);
3700 tcg_gen_mov_i32(t0
, rd
);
3711 } neon_ls_element_type
[11] = {
3725 /* Translate a NEON load/store element instruction. Return nonzero if the
3726 instruction is invalid. */
3727 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3746 if (!vfp_enabled(env
))
3748 VFP_DREG_D(rd
, insn
);
3749 rn
= (insn
>> 16) & 0xf;
3751 load
= (insn
& (1 << 21)) != 0;
3753 if ((insn
& (1 << 23)) == 0) {
3754 /* Load store all elements. */
3755 op
= (insn
>> 8) & 0xf;
3756 size
= (insn
>> 6) & 3;
3759 nregs
= neon_ls_element_type
[op
].nregs
;
3760 interleave
= neon_ls_element_type
[op
].interleave
;
3761 spacing
= neon_ls_element_type
[op
].spacing
;
3762 if (size
== 3 && (interleave
| spacing
) != 1)
3764 load_reg_var(s
, addr
, rn
);
3765 stride
= (1 << size
) * interleave
;
3766 for (reg
= 0; reg
< nregs
; reg
++) {
3767 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3768 load_reg_var(s
, addr
, rn
);
3769 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3770 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3771 load_reg_var(s
, addr
, rn
);
3772 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3776 tmp64
= gen_ld64(addr
, IS_USER(s
));
3777 neon_store_reg64(tmp64
, rd
);
3778 tcg_temp_free_i64(tmp64
);
3780 tmp64
= tcg_temp_new_i64();
3781 neon_load_reg64(tmp64
, rd
);
3782 gen_st64(tmp64
, addr
, IS_USER(s
));
3784 tcg_gen_addi_i32(addr
, addr
, stride
);
3786 for (pass
= 0; pass
< 2; pass
++) {
3789 tmp
= gen_ld32(addr
, IS_USER(s
));
3790 neon_store_reg(rd
, pass
, tmp
);
3792 tmp
= neon_load_reg(rd
, pass
);
3793 gen_st32(tmp
, addr
, IS_USER(s
));
3795 tcg_gen_addi_i32(addr
, addr
, stride
);
3796 } else if (size
== 1) {
3798 tmp
= gen_ld16u(addr
, IS_USER(s
));
3799 tcg_gen_addi_i32(addr
, addr
, stride
);
3800 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3801 tcg_gen_addi_i32(addr
, addr
, stride
);
3802 gen_bfi(tmp
, tmp
, tmp2
, 16, 0xffff);
3804 neon_store_reg(rd
, pass
, tmp
);
3806 tmp
= neon_load_reg(rd
, pass
);
3808 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3809 gen_st16(tmp
, addr
, IS_USER(s
));
3810 tcg_gen_addi_i32(addr
, addr
, stride
);
3811 gen_st16(tmp2
, addr
, IS_USER(s
));
3812 tcg_gen_addi_i32(addr
, addr
, stride
);
3814 } else /* size == 0 */ {
3817 for (n
= 0; n
< 4; n
++) {
3818 tmp
= gen_ld8u(addr
, IS_USER(s
));
3819 tcg_gen_addi_i32(addr
, addr
, stride
);
3823 gen_bfi(tmp2
, tmp2
, tmp
, n
* 8, 0xff);
3827 neon_store_reg(rd
, pass
, tmp2
);
3829 tmp2
= neon_load_reg(rd
, pass
);
3830 for (n
= 0; n
< 4; n
++) {
3833 tcg_gen_mov_i32(tmp
, tmp2
);
3835 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3837 gen_st8(tmp
, addr
, IS_USER(s
));
3838 tcg_gen_addi_i32(addr
, addr
, stride
);
3849 size
= (insn
>> 10) & 3;
3851 /* Load single element to all lanes. */
3854 size
= (insn
>> 6) & 3;
3855 nregs
= ((insn
>> 8) & 3) + 1;
3856 stride
= (insn
& (1 << 5)) ? 2 : 1;
3857 load_reg_var(s
, addr
, rn
);
3858 for (reg
= 0; reg
< nregs
; reg
++) {
3861 tmp
= gen_ld8u(addr
, IS_USER(s
));
3862 gen_neon_dup_u8(tmp
, 0);
3865 tmp
= gen_ld16u(addr
, IS_USER(s
));
3866 gen_neon_dup_low16(tmp
);
3869 tmp
= gen_ld32(addr
, IS_USER(s
));
3873 default: /* Avoid compiler warnings. */
3876 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3878 tcg_gen_mov_i32(tmp2
, tmp
);
3879 neon_store_reg(rd
, 0, tmp2
);
3880 neon_store_reg(rd
, 1, tmp
);
3883 stride
= (1 << size
) * nregs
;
3885 /* Single element. */
3886 pass
= (insn
>> 7) & 1;
3889 shift
= ((insn
>> 5) & 3) * 8;
3893 shift
= ((insn
>> 6) & 1) * 16;
3894 stride
= (insn
& (1 << 5)) ? 2 : 1;
3898 stride
= (insn
& (1 << 6)) ? 2 : 1;
3903 nregs
= ((insn
>> 8) & 3) + 1;
3904 load_reg_var(s
, addr
, rn
);
3905 for (reg
= 0; reg
< nregs
; reg
++) {
3909 tmp
= gen_ld8u(addr
, IS_USER(s
));
3912 tmp
= gen_ld16u(addr
, IS_USER(s
));
3915 tmp
= gen_ld32(addr
, IS_USER(s
));
3917 default: /* Avoid compiler warnings. */
3921 tmp2
= neon_load_reg(rd
, pass
);
3922 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
3925 neon_store_reg(rd
, pass
, tmp
);
3926 } else { /* Store */
3927 tmp
= neon_load_reg(rd
, pass
);
3929 tcg_gen_shri_i32(tmp
, tmp
, shift
);
3932 gen_st8(tmp
, addr
, IS_USER(s
));
3935 gen_st16(tmp
, addr
, IS_USER(s
));
3938 gen_st32(tmp
, addr
, IS_USER(s
));
3943 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3945 stride
= nregs
* (1 << size
);
3952 base
= load_reg(s
, rn
);
3954 tcg_gen_addi_i32(base
, base
, stride
);
3957 index
= load_reg(s
, rm
);
3958 tcg_gen_add_i32(base
, base
, index
);
3961 store_reg(s
, rn
, base
);
3966 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3967 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
3969 tcg_gen_and_i32(t
, t
, c
);
3970 tcg_gen_andc_i32(f
, f
, c
);
3971 tcg_gen_or_i32(dest
, t
, f
);
3974 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
3977 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
3978 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
3979 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
3984 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
3987 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
3988 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
3989 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
3994 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
3997 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
3998 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
3999 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4004 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4010 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4011 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4016 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4017 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4024 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4025 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4030 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4031 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4038 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4042 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4043 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4044 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4049 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4050 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4051 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4058 static inline void gen_neon_addl(int size
)
4061 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4062 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4063 case 2: tcg_gen_add_i64(CPU_V001
); break;
4068 static inline void gen_neon_subl(int size
)
4071 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4072 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4073 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4078 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4081 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4082 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4083 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4088 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4091 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4092 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4097 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4101 switch ((size
<< 1) | u
) {
4102 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4103 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4104 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4105 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4107 tmp
= gen_muls_i64_i32(a
, b
);
4108 tcg_gen_mov_i64(dest
, tmp
);
4111 tmp
= gen_mulu_i64_i32(a
, b
);
4112 tcg_gen_mov_i64(dest
, tmp
);
4118 /* Translate a NEON data processing instruction. Return nonzero if the
4119 instruction is invalid.
4120 We process data in a mixture of 32-bit and 64-bit chunks.
4121 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4123 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4136 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4139 if (!vfp_enabled(env
))
4141 q
= (insn
& (1 << 6)) != 0;
4142 u
= (insn
>> 24) & 1;
4143 VFP_DREG_D(rd
, insn
);
4144 VFP_DREG_N(rn
, insn
);
4145 VFP_DREG_M(rm
, insn
);
4146 size
= (insn
>> 20) & 3;
4147 if ((insn
& (1 << 23)) == 0) {
4148 /* Three register same length. */
4149 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4150 if (size
== 3 && (op
== 1 || op
== 5 || op
== 8 || op
== 9
4151 || op
== 10 || op
== 11 || op
== 16)) {
4152 /* 64-bit element instructions. */
4153 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4154 neon_load_reg64(cpu_V0
, rn
+ pass
);
4155 neon_load_reg64(cpu_V1
, rm
+ pass
);
4159 gen_helper_neon_add_saturate_u64(CPU_V001
);
4161 gen_helper_neon_add_saturate_s64(CPU_V001
);
4166 gen_helper_neon_sub_saturate_u64(CPU_V001
);
4168 gen_helper_neon_sub_saturate_s64(CPU_V001
);
4173 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4175 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4180 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4183 gen_helper_neon_qshl_s64(cpu_V1
, cpu_env
,
4187 case 10: /* VRSHL */
4189 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4191 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4194 case 11: /* VQRSHL */
4196 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4199 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4205 tcg_gen_sub_i64(CPU_V001
);
4207 tcg_gen_add_i64(CPU_V001
);
4213 neon_store_reg64(cpu_V0
, rd
+ pass
);
4220 case 10: /* VRSHL */
4221 case 11: /* VQRSHL */
4224 /* Shift instruction operands are reversed. */
4231 case 20: /* VPMAX */
4232 case 21: /* VPMIN */
4233 case 23: /* VPADD */
4236 case 26: /* VPADD (float) */
4237 pairwise
= (u
&& size
< 2);
4239 case 30: /* VPMIN/VPMAX (float) */
4247 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4256 tmp
= neon_load_reg(rn
, n
);
4257 tmp2
= neon_load_reg(rn
, n
+ 1);
4259 tmp
= neon_load_reg(rm
, n
);
4260 tmp2
= neon_load_reg(rm
, n
+ 1);
4264 tmp
= neon_load_reg(rn
, pass
);
4265 tmp2
= neon_load_reg(rm
, pass
);
4269 GEN_NEON_INTEGER_OP(hadd
);
4272 GEN_NEON_INTEGER_OP_ENV(qadd
);
4274 case 2: /* VRHADD */
4275 GEN_NEON_INTEGER_OP(rhadd
);
4277 case 3: /* Logic ops. */
4278 switch ((u
<< 2) | size
) {
4280 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4283 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4286 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4289 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4292 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4295 tmp3
= neon_load_reg(rd
, pass
);
4296 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4300 tmp3
= neon_load_reg(rd
, pass
);
4301 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4305 tmp3
= neon_load_reg(rd
, pass
);
4306 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4312 GEN_NEON_INTEGER_OP(hsub
);
4315 GEN_NEON_INTEGER_OP_ENV(qsub
);
4318 GEN_NEON_INTEGER_OP(cgt
);
4321 GEN_NEON_INTEGER_OP(cge
);
4324 GEN_NEON_INTEGER_OP(shl
);
4327 GEN_NEON_INTEGER_OP_ENV(qshl
);
4329 case 10: /* VRSHL */
4330 GEN_NEON_INTEGER_OP(rshl
);
4332 case 11: /* VQRSHL */
4333 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4336 GEN_NEON_INTEGER_OP(max
);
4339 GEN_NEON_INTEGER_OP(min
);
4342 GEN_NEON_INTEGER_OP(abd
);
4345 GEN_NEON_INTEGER_OP(abd
);
4347 tmp2
= neon_load_reg(rd
, pass
);
4348 gen_neon_add(size
, tmp
, tmp2
);
4351 if (!u
) { /* VADD */
4352 if (gen_neon_add(size
, tmp
, tmp2
))
4356 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4357 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4358 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4364 if (!u
) { /* VTST */
4366 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4367 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4368 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4373 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4374 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4375 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4380 case 18: /* Multiply. */
4382 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4383 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4384 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4388 tmp2
= neon_load_reg(rd
, pass
);
4390 gen_neon_rsb(size
, tmp
, tmp2
);
4392 gen_neon_add(size
, tmp
, tmp2
);
4396 if (u
) { /* polynomial */
4397 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4398 } else { /* Integer */
4400 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4401 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4402 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4407 case 20: /* VPMAX */
4408 GEN_NEON_INTEGER_OP(pmax
);
4410 case 21: /* VPMIN */
4411 GEN_NEON_INTEGER_OP(pmin
);
4413 case 22: /* Hultiply high. */
4414 if (!u
) { /* VQDMULH */
4416 case 1: gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
); break;
4417 case 2: gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
); break;
4420 } else { /* VQRDHMUL */
4422 case 1: gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
); break;
4423 case 2: gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
); break;
4428 case 23: /* VPADD */
4432 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4433 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4434 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4438 case 26: /* Floating point arithnetic. */
4439 switch ((u
<< 2) | size
) {
4441 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4444 gen_helper_neon_sub_f32(tmp
, tmp
, tmp2
);
4447 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4450 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
);
4456 case 27: /* Float multiply. */
4457 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
4460 tmp2
= neon_load_reg(rd
, pass
);
4462 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4464 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
4468 case 28: /* Float compare. */
4470 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
4473 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
4475 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
4478 case 29: /* Float compare absolute. */
4482 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
);
4484 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
);
4486 case 30: /* Float min/max. */
4488 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
);
4490 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
);
4494 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4496 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4503 /* Save the result. For elementwise operations we can put it
4504 straight into the destination register. For pairwise operations
4505 we have to be careful to avoid clobbering the source operands. */
4506 if (pairwise
&& rd
== rm
) {
4507 neon_store_scratch(pass
, tmp
);
4509 neon_store_reg(rd
, pass
, tmp
);
4513 if (pairwise
&& rd
== rm
) {
4514 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4515 tmp
= neon_load_scratch(pass
);
4516 neon_store_reg(rd
, pass
, tmp
);
4519 /* End of 3 register same size operations. */
4520 } else if (insn
& (1 << 4)) {
4521 if ((insn
& 0x00380080) != 0) {
4522 /* Two registers and shift. */
4523 op
= (insn
>> 8) & 0xf;
4524 if (insn
& (1 << 7)) {
4529 while ((insn
& (1 << (size
+ 19))) == 0)
4532 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4533 /* To avoid excessive dumplication of ops we implement shift
4534 by immediate using the variable shift operations. */
4536 /* Shift by immediate:
4537 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4538 /* Right shifts are encoded as N - shift, where N is the
4539 element size in bits. */
4541 shift
= shift
- (1 << (size
+ 3));
4549 imm
= (uint8_t) shift
;
4554 imm
= (uint16_t) shift
;
4565 for (pass
= 0; pass
< count
; pass
++) {
4567 neon_load_reg64(cpu_V0
, rm
+ pass
);
4568 tcg_gen_movi_i64(cpu_V1
, imm
);
4573 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4575 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4580 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4582 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4587 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4589 case 5: /* VSHL, VSLI */
4590 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4594 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4596 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4598 case 7: /* VQSHLU */
4599 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4602 if (op
== 1 || op
== 3) {
4604 neon_load_reg64(cpu_V0
, rd
+ pass
);
4605 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4606 } else if (op
== 4 || (op
== 5 && u
)) {
4608 cpu_abort(env
, "VS[LR]I.64 not implemented");
4610 neon_store_reg64(cpu_V0
, rd
+ pass
);
4611 } else { /* size < 3 */
4612 /* Operands in T0 and T1. */
4613 tmp
= neon_load_reg(rm
, pass
);
4615 tcg_gen_movi_i32(tmp2
, imm
);
4619 GEN_NEON_INTEGER_OP(shl
);
4623 GEN_NEON_INTEGER_OP(rshl
);
4628 GEN_NEON_INTEGER_OP(shl
);
4630 case 5: /* VSHL, VSLI */
4632 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
4633 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
4634 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
4639 GEN_NEON_INTEGER_OP_ENV(qshl
);
4641 case 7: /* VQSHLU */
4643 case 0: gen_helper_neon_qshl_u8(tmp
, cpu_env
, tmp
, tmp2
); break;
4644 case 1: gen_helper_neon_qshl_u16(tmp
, cpu_env
, tmp
, tmp2
); break;
4645 case 2: gen_helper_neon_qshl_u32(tmp
, cpu_env
, tmp
, tmp2
); break;
4652 if (op
== 1 || op
== 3) {
4654 tmp2
= neon_load_reg(rd
, pass
);
4655 gen_neon_add(size
, tmp2
, tmp
);
4657 } else if (op
== 4 || (op
== 5 && u
)) {
4662 mask
= 0xff >> -shift
;
4664 mask
= (uint8_t)(0xff << shift
);
4670 mask
= 0xffff >> -shift
;
4672 mask
= (uint16_t)(0xffff << shift
);
4676 if (shift
< -31 || shift
> 31) {
4680 mask
= 0xffffffffu
>> -shift
;
4682 mask
= 0xffffffffu
<< shift
;
4688 tmp2
= neon_load_reg(rd
, pass
);
4689 tcg_gen_andi_i32(tmp
, tmp
, mask
);
4690 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
4691 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4694 neon_store_reg(rd
, pass
, tmp
);
4697 } else if (op
< 10) {
4698 /* Shift by immediate and narrow:
4699 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4700 shift
= shift
- (1 << (size
+ 3));
4704 imm
= (uint16_t)shift
;
4706 tmp2
= tcg_const_i32(imm
);
4707 TCGV_UNUSED_I64(tmp64
);
4710 imm
= (uint32_t)shift
;
4711 tmp2
= tcg_const_i32(imm
);
4712 TCGV_UNUSED_I64(tmp64
);
4715 tmp64
= tcg_const_i64(shift
);
4722 for (pass
= 0; pass
< 2; pass
++) {
4724 neon_load_reg64(cpu_V0
, rm
+ pass
);
4727 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, tmp64
);
4729 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, tmp64
);
4732 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, tmp64
);
4734 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, tmp64
);
4737 tmp
= neon_load_reg(rm
+ pass
, 0);
4738 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
, u
);
4739 tmp3
= neon_load_reg(rm
+ pass
, 1);
4740 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
, u
);
4741 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
4746 if (op
== 8 && !u
) {
4747 gen_neon_narrow(size
- 1, tmp
, cpu_V0
);
4750 gen_neon_narrow_sats(size
- 1, tmp
, cpu_V0
);
4752 gen_neon_narrow_satu(size
- 1, tmp
, cpu_V0
);
4754 neon_store_reg(rd
, pass
, tmp
);
4757 tcg_temp_free_i64(tmp64
);
4761 } else if (op
== 10) {
4765 tmp
= neon_load_reg(rm
, 0);
4766 tmp2
= neon_load_reg(rm
, 1);
4767 for (pass
= 0; pass
< 2; pass
++) {
4771 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4774 /* The shift is less than the width of the source
4775 type, so we can just shift the whole register. */
4776 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
4777 if (size
< 2 || !u
) {
4780 imm
= (0xffu
>> (8 - shift
));
4783 imm
= 0xffff >> (16 - shift
);
4785 imm64
= imm
| (((uint64_t)imm
) << 32);
4786 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, imm64
);
4789 neon_store_reg64(cpu_V0
, rd
+ pass
);
4791 } else if (op
== 15 || op
== 16) {
4792 /* VCVT fixed-point. */
4793 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4794 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
4797 gen_vfp_ulto(0, shift
);
4799 gen_vfp_slto(0, shift
);
4802 gen_vfp_toul(0, shift
);
4804 gen_vfp_tosl(0, shift
);
4806 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
4811 } else { /* (insn & 0x00380080) == 0 */
4814 op
= (insn
>> 8) & 0xf;
4815 /* One register and immediate. */
4816 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
4817 invert
= (insn
& (1 << 5)) != 0;
4835 imm
= (imm
<< 8) | (imm
<< 24);
4838 imm
= (imm
< 8) | 0xff;
4841 imm
= (imm
<< 16) | 0xffff;
4844 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
4849 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
4850 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
4856 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4857 if (op
& 1 && op
< 12) {
4858 tmp
= neon_load_reg(rd
, pass
);
4860 /* The immediate value has already been inverted, so
4862 tcg_gen_andi_i32(tmp
, tmp
, imm
);
4864 tcg_gen_ori_i32(tmp
, tmp
, imm
);
4869 if (op
== 14 && invert
) {
4872 for (n
= 0; n
< 4; n
++) {
4873 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
4874 val
|= 0xff << (n
* 8);
4876 tcg_gen_movi_i32(tmp
, val
);
4878 tcg_gen_movi_i32(tmp
, imm
);
4881 neon_store_reg(rd
, pass
, tmp
);
4884 } else { /* (insn & 0x00800010 == 0x00800000) */
4886 op
= (insn
>> 8) & 0xf;
4887 if ((insn
& (1 << 6)) == 0) {
4888 /* Three registers of different lengths. */
4892 /* prewiden, src1_wide, src2_wide */
4893 static const int neon_3reg_wide
[16][3] = {
4894 {1, 0, 0}, /* VADDL */
4895 {1, 1, 0}, /* VADDW */
4896 {1, 0, 0}, /* VSUBL */
4897 {1, 1, 0}, /* VSUBW */
4898 {0, 1, 1}, /* VADDHN */
4899 {0, 0, 0}, /* VABAL */
4900 {0, 1, 1}, /* VSUBHN */
4901 {0, 0, 0}, /* VABDL */
4902 {0, 0, 0}, /* VMLAL */
4903 {0, 0, 0}, /* VQDMLAL */
4904 {0, 0, 0}, /* VMLSL */
4905 {0, 0, 0}, /* VQDMLSL */
4906 {0, 0, 0}, /* Integer VMULL */
4907 {0, 0, 0}, /* VQDMULL */
4908 {0, 0, 0} /* Polynomial VMULL */
4911 prewiden
= neon_3reg_wide
[op
][0];
4912 src1_wide
= neon_3reg_wide
[op
][1];
4913 src2_wide
= neon_3reg_wide
[op
][2];
4915 if (size
== 0 && (op
== 9 || op
== 11 || op
== 13))
4918 /* Avoid overlapping operands. Wide source operands are
4919 always aligned so will never overlap with wide
4920 destinations in problematic ways. */
4921 if (rd
== rm
&& !src2_wide
) {
4922 tmp
= neon_load_reg(rm
, 1);
4923 neon_store_scratch(2, tmp
);
4924 } else if (rd
== rn
&& !src1_wide
) {
4925 tmp
= neon_load_reg(rn
, 1);
4926 neon_store_scratch(2, tmp
);
4929 for (pass
= 0; pass
< 2; pass
++) {
4931 neon_load_reg64(cpu_V0
, rn
+ pass
);
4934 if (pass
== 1 && rd
== rn
) {
4935 tmp
= neon_load_scratch(2);
4937 tmp
= neon_load_reg(rn
, pass
);
4940 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4944 neon_load_reg64(cpu_V1
, rm
+ pass
);
4947 if (pass
== 1 && rd
== rm
) {
4948 tmp2
= neon_load_scratch(2);
4950 tmp2
= neon_load_reg(rm
, pass
);
4953 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
4957 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4958 gen_neon_addl(size
);
4960 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4961 gen_neon_subl(size
);
4963 case 5: case 7: /* VABAL, VABDL */
4964 switch ((size
<< 1) | u
) {
4966 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
4969 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
4972 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
4975 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
4978 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
4981 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
4988 case 8: case 9: case 10: case 11: case 12: case 13:
4989 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4990 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
4994 case 14: /* Polynomial VMULL */
4995 cpu_abort(env
, "Polynomial VMULL not implemented");
4997 default: /* 15 is RESERVED. */
5000 if (op
== 5 || op
== 13 || (op
>= 8 && op
<= 11)) {
5002 if (op
== 10 || op
== 11) {
5003 gen_neon_negl(cpu_V0
, size
);
5007 neon_load_reg64(cpu_V1
, rd
+ pass
);
5011 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
5012 gen_neon_addl(size
);
5014 case 9: case 11: /* VQDMLAL, VQDMLSL */
5015 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5016 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5019 case 13: /* VQDMULL */
5020 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5025 neon_store_reg64(cpu_V0
, rd
+ pass
);
5026 } else if (op
== 4 || op
== 6) {
5027 /* Narrowing operation. */
5032 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5035 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5038 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5039 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5046 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5049 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5052 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5053 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5054 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5062 neon_store_reg(rd
, 0, tmp3
);
5063 neon_store_reg(rd
, 1, tmp
);
5066 /* Write back the result. */
5067 neon_store_reg64(cpu_V0
, rd
+ pass
);
5071 /* Two registers and a scalar. */
5073 case 0: /* Integer VMLA scalar */
5074 case 1: /* Float VMLA scalar */
5075 case 4: /* Integer VMLS scalar */
5076 case 5: /* Floating point VMLS scalar */
5077 case 8: /* Integer VMUL scalar */
5078 case 9: /* Floating point VMUL scalar */
5079 case 12: /* VQDMULH scalar */
5080 case 13: /* VQRDMULH scalar */
5081 tmp
= neon_get_scalar(size
, rm
);
5082 neon_store_scratch(0, tmp
);
5083 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5084 tmp
= neon_load_scratch(0);
5085 tmp2
= neon_load_reg(rn
, pass
);
5088 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5090 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5092 } else if (op
== 13) {
5094 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5096 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5098 } else if (op
& 1) {
5099 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
5102 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5103 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5104 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5111 tmp2
= neon_load_reg(rd
, pass
);
5114 gen_neon_add(size
, tmp
, tmp2
);
5117 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
5120 gen_neon_rsb(size
, tmp
, tmp2
);
5123 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
5130 neon_store_reg(rd
, pass
, tmp
);
5133 case 2: /* VMLAL sclar */
5134 case 3: /* VQDMLAL scalar */
5135 case 6: /* VMLSL scalar */
5136 case 7: /* VQDMLSL scalar */
5137 case 10: /* VMULL scalar */
5138 case 11: /* VQDMULL scalar */
5139 if (size
== 0 && (op
== 3 || op
== 7 || op
== 11))
5142 tmp2
= neon_get_scalar(size
, rm
);
5143 tmp3
= neon_load_reg(rn
, 1);
5145 for (pass
= 0; pass
< 2; pass
++) {
5147 tmp
= neon_load_reg(rn
, 0);
5151 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5153 if (op
== 6 || op
== 7) {
5154 gen_neon_negl(cpu_V0
, size
);
5157 neon_load_reg64(cpu_V1
, rd
+ pass
);
5161 gen_neon_addl(size
);
5164 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5165 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5171 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5176 neon_store_reg64(cpu_V0
, rd
+ pass
);
5182 default: /* 14 and 15 are RESERVED */
5186 } else { /* size == 3 */
5189 imm
= (insn
>> 8) & 0xf;
5196 neon_load_reg64(cpu_V0
, rn
);
5198 neon_load_reg64(cpu_V1
, rn
+ 1);
5200 } else if (imm
== 8) {
5201 neon_load_reg64(cpu_V0
, rn
+ 1);
5203 neon_load_reg64(cpu_V1
, rm
);
5206 tmp64
= tcg_temp_new_i64();
5208 neon_load_reg64(cpu_V0
, rn
);
5209 neon_load_reg64(tmp64
, rn
+ 1);
5211 neon_load_reg64(cpu_V0
, rn
+ 1);
5212 neon_load_reg64(tmp64
, rm
);
5214 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5215 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5216 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5218 neon_load_reg64(cpu_V1
, rm
);
5220 neon_load_reg64(cpu_V1
, rm
+ 1);
5223 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5224 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5225 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5226 tcg_temp_free_i64(tmp64
);
5229 neon_load_reg64(cpu_V0
, rn
);
5230 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5231 neon_load_reg64(cpu_V1
, rm
);
5232 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5233 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5235 neon_store_reg64(cpu_V0
, rd
);
5237 neon_store_reg64(cpu_V1
, rd
+ 1);
5239 } else if ((insn
& (1 << 11)) == 0) {
5240 /* Two register misc. */
5241 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5242 size
= (insn
>> 18) & 3;
5244 case 0: /* VREV64 */
5247 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5248 tmp
= neon_load_reg(rm
, pass
* 2);
5249 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5251 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5252 case 1: gen_swap_half(tmp
); break;
5253 case 2: /* no-op */ break;
5256 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5258 neon_store_reg(rd
, pass
* 2, tmp2
);
5261 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5262 case 1: gen_swap_half(tmp2
); break;
5265 neon_store_reg(rd
, pass
* 2, tmp2
);
5269 case 4: case 5: /* VPADDL */
5270 case 12: case 13: /* VPADAL */
5273 for (pass
= 0; pass
< q
+ 1; pass
++) {
5274 tmp
= neon_load_reg(rm
, pass
* 2);
5275 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5276 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5277 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5279 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5280 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5281 case 2: tcg_gen_add_i64(CPU_V001
); break;
5286 neon_load_reg64(cpu_V1
, rd
+ pass
);
5287 gen_neon_addl(size
);
5289 neon_store_reg64(cpu_V0
, rd
+ pass
);
5294 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5295 tmp
= neon_load_reg(rm
, n
);
5296 tmp2
= neon_load_reg(rd
, n
+ 1);
5297 neon_store_reg(rm
, n
, tmp2
);
5298 neon_store_reg(rd
, n
+ 1, tmp
);
5306 Rd A3 A2 A1 A0 B2 B0 A2 A0
5307 Rm B3 B2 B1 B0 B3 B1 A3 A1
5311 gen_neon_unzip(rd
, q
, 0, size
);
5312 gen_neon_unzip(rm
, q
, 4, size
);
5314 static int unzip_order_q
[8] =
5315 {0, 2, 4, 6, 1, 3, 5, 7};
5316 for (n
= 0; n
< 8; n
++) {
5317 int reg
= (n
< 4) ? rd
: rm
;
5318 tmp
= neon_load_scratch(unzip_order_q
[n
]);
5319 neon_store_reg(reg
, n
% 4, tmp
);
5322 static int unzip_order
[4] =
5324 for (n
= 0; n
< 4; n
++) {
5325 int reg
= (n
< 2) ? rd
: rm
;
5326 tmp
= neon_load_scratch(unzip_order
[n
]);
5327 neon_store_reg(reg
, n
% 2, tmp
);
5333 Rd A3 A2 A1 A0 B1 A1 B0 A0
5334 Rm B3 B2 B1 B0 B3 A3 B2 A2
5338 count
= (q
? 4 : 2);
5339 for (n
= 0; n
< count
; n
++) {
5340 tmp
= neon_load_reg(rd
, n
);
5341 tmp2
= neon_load_reg(rd
, n
);
5343 case 0: gen_neon_zip_u8(tmp
, tmp2
); break;
5344 case 1: gen_neon_zip_u16(tmp
, tmp2
); break;
5345 case 2: /* no-op */; break;
5348 neon_store_scratch(n
* 2, tmp
);
5349 neon_store_scratch(n
* 2 + 1, tmp2
);
5351 for (n
= 0; n
< count
* 2; n
++) {
5352 int reg
= (n
< count
) ? rd
: rm
;
5353 tmp
= neon_load_scratch(n
);
5354 neon_store_reg(reg
, n
% count
, tmp
);
5357 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5361 for (pass
= 0; pass
< 2; pass
++) {
5362 neon_load_reg64(cpu_V0
, rm
+ pass
);
5364 if (op
== 36 && q
== 0) {
5365 gen_neon_narrow(size
, tmp
, cpu_V0
);
5367 gen_neon_narrow_satu(size
, tmp
, cpu_V0
);
5369 gen_neon_narrow_sats(size
, tmp
, cpu_V0
);
5374 neon_store_reg(rd
, 0, tmp2
);
5375 neon_store_reg(rd
, 1, tmp
);
5379 case 38: /* VSHLL */
5382 tmp
= neon_load_reg(rm
, 0);
5383 tmp2
= neon_load_reg(rm
, 1);
5384 for (pass
= 0; pass
< 2; pass
++) {
5387 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5388 neon_store_reg64(cpu_V0
, rd
+ pass
);
5391 case 44: /* VCVT.F16.F32 */
5392 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5396 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5397 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5398 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5399 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5400 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5401 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5402 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5403 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5404 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5405 neon_store_reg(rd
, 0, tmp2
);
5407 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5408 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5409 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5410 neon_store_reg(rd
, 1, tmp2
);
5413 case 46: /* VCVT.F32.F16 */
5414 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5417 tmp
= neon_load_reg(rm
, 0);
5418 tmp2
= neon_load_reg(rm
, 1);
5419 tcg_gen_ext16u_i32(tmp3
, tmp
);
5420 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5421 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5422 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5423 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5424 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5426 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5427 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5428 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5429 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5430 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5431 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5437 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5438 if (op
== 30 || op
== 31 || op
>= 58) {
5439 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5440 neon_reg_offset(rm
, pass
));
5443 tmp
= neon_load_reg(rm
, pass
);
5446 case 1: /* VREV32 */
5448 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5449 case 1: gen_swap_half(tmp
); break;
5453 case 2: /* VREV16 */
5460 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5461 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5462 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5468 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5469 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5470 case 2: gen_helper_clz(tmp
, tmp
); break;
5477 gen_helper_neon_cnt_u8(tmp
, tmp
);
5482 tcg_gen_not_i32(tmp
, tmp
);
5484 case 14: /* VQABS */
5486 case 0: gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
); break;
5487 case 1: gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
); break;
5488 case 2: gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
); break;
5492 case 15: /* VQNEG */
5494 case 0: gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
); break;
5495 case 1: gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
); break;
5496 case 2: gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
); break;
5500 case 16: case 19: /* VCGT #0, VCLE #0 */
5501 tmp2
= tcg_const_i32(0);
5503 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5504 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5505 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
5508 tcg_temp_free(tmp2
);
5510 tcg_gen_not_i32(tmp
, tmp
);
5512 case 17: case 20: /* VCGE #0, VCLT #0 */
5513 tmp2
= tcg_const_i32(0);
5515 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
5516 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
5517 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
5520 tcg_temp_free(tmp2
);
5522 tcg_gen_not_i32(tmp
, tmp
);
5524 case 18: /* VCEQ #0 */
5525 tmp2
= tcg_const_i32(0);
5527 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5528 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5529 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5532 tcg_temp_free(tmp2
);
5536 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
5537 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
5538 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
5545 tmp2
= tcg_const_i32(0);
5546 gen_neon_rsb(size
, tmp
, tmp2
);
5547 tcg_temp_free(tmp2
);
5549 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5550 tmp2
= tcg_const_i32(0);
5551 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
5552 tcg_temp_free(tmp2
);
5554 tcg_gen_not_i32(tmp
, tmp
);
5556 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5557 tmp2
= tcg_const_i32(0);
5558 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
5559 tcg_temp_free(tmp2
);
5561 tcg_gen_not_i32(tmp
, tmp
);
5563 case 26: /* Float VCEQ #0 */
5564 tmp2
= tcg_const_i32(0);
5565 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
5566 tcg_temp_free(tmp2
);
5568 case 30: /* Float VABS */
5571 case 31: /* Float VNEG */
5575 tmp2
= neon_load_reg(rd
, pass
);
5576 neon_store_reg(rm
, pass
, tmp2
);
5579 tmp2
= neon_load_reg(rd
, pass
);
5581 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
5582 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
5586 neon_store_reg(rm
, pass
, tmp2
);
5588 case 56: /* Integer VRECPE */
5589 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
5591 case 57: /* Integer VRSQRTE */
5592 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
5594 case 58: /* Float VRECPE */
5595 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5597 case 59: /* Float VRSQRTE */
5598 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5600 case 60: /* VCVT.F32.S32 */
5603 case 61: /* VCVT.F32.U32 */
5606 case 62: /* VCVT.S32.F32 */
5609 case 63: /* VCVT.U32.F32 */
5613 /* Reserved: 21, 29, 39-56 */
5616 if (op
== 30 || op
== 31 || op
>= 58) {
5617 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
5618 neon_reg_offset(rd
, pass
));
5620 neon_store_reg(rd
, pass
, tmp
);
5625 } else if ((insn
& (1 << 10)) == 0) {
5627 n
= ((insn
>> 5) & 0x18) + 8;
5628 if (insn
& (1 << 6)) {
5629 tmp
= neon_load_reg(rd
, 0);
5632 tcg_gen_movi_i32(tmp
, 0);
5634 tmp2
= neon_load_reg(rm
, 0);
5635 tmp4
= tcg_const_i32(rn
);
5636 tmp5
= tcg_const_i32(n
);
5637 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
5639 if (insn
& (1 << 6)) {
5640 tmp
= neon_load_reg(rd
, 1);
5643 tcg_gen_movi_i32(tmp
, 0);
5645 tmp3
= neon_load_reg(rm
, 1);
5646 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
5647 tcg_temp_free_i32(tmp5
);
5648 tcg_temp_free_i32(tmp4
);
5649 neon_store_reg(rd
, 0, tmp2
);
5650 neon_store_reg(rd
, 1, tmp3
);
5652 } else if ((insn
& 0x380) == 0) {
5654 if (insn
& (1 << 19)) {
5655 tmp
= neon_load_reg(rm
, 1);
5657 tmp
= neon_load_reg(rm
, 0);
5659 if (insn
& (1 << 16)) {
5660 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
5661 } else if (insn
& (1 << 17)) {
5662 if ((insn
>> 18) & 1)
5663 gen_neon_dup_high16(tmp
);
5665 gen_neon_dup_low16(tmp
);
5667 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5669 tcg_gen_mov_i32(tmp2
, tmp
);
5670 neon_store_reg(rd
, pass
, tmp2
);
5681 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5683 int crn
= (insn
>> 16) & 0xf;
5684 int crm
= insn
& 0xf;
5685 int op1
= (insn
>> 21) & 7;
5686 int op2
= (insn
>> 5) & 7;
5687 int rt
= (insn
>> 12) & 0xf;
5690 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5691 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5695 tmp
= load_cpu_field(teecr
);
5696 store_reg(s
, rt
, tmp
);
5699 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5701 if (IS_USER(s
) && (env
->teecr
& 1))
5703 tmp
= load_cpu_field(teehbr
);
5704 store_reg(s
, rt
, tmp
);
5708 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5709 op1
, crn
, crm
, op2
);
5713 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5715 int crn
= (insn
>> 16) & 0xf;
5716 int crm
= insn
& 0xf;
5717 int op1
= (insn
>> 21) & 7;
5718 int op2
= (insn
>> 5) & 7;
5719 int rt
= (insn
>> 12) & 0xf;
5722 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5723 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5727 tmp
= load_reg(s
, rt
);
5728 gen_helper_set_teecr(cpu_env
, tmp
);
5732 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5734 if (IS_USER(s
) && (env
->teecr
& 1))
5736 tmp
= load_reg(s
, rt
);
5737 store_cpu_field(tmp
, teehbr
);
5741 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5742 op1
, crn
, crm
, op2
);
5746 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5750 cpnum
= (insn
>> 8) & 0xf;
5751 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
5752 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
5758 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5759 return disas_iwmmxt_insn(env
, s
, insn
);
5760 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5761 return disas_dsp_insn(env
, s
, insn
);
5766 return disas_vfp_insn (env
, s
, insn
);
5768 /* Coprocessors 7-15 are architecturally reserved by ARM.
5769 Unfortunately Intel decided to ignore this. */
5770 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
5772 if (insn
& (1 << 20))
5773 return disas_cp14_read(env
, s
, insn
);
5775 return disas_cp14_write(env
, s
, insn
);
5777 return disas_cp15_insn (env
, s
, insn
);
5780 /* Unknown coprocessor. See if the board has hooked it. */
5781 return disas_cp_insn (env
, s
, insn
);
5786 /* Store a 64-bit value to a register pair. Clobbers val. */
5787 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
5791 tcg_gen_trunc_i64_i32(tmp
, val
);
5792 store_reg(s
, rlow
, tmp
);
5794 tcg_gen_shri_i64(val
, val
, 32);
5795 tcg_gen_trunc_i64_i32(tmp
, val
);
5796 store_reg(s
, rhigh
, tmp
);
5799 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5800 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
5805 /* Load value and extend to 64 bits. */
5806 tmp
= tcg_temp_new_i64();
5807 tmp2
= load_reg(s
, rlow
);
5808 tcg_gen_extu_i32_i64(tmp
, tmp2
);
5810 tcg_gen_add_i64(val
, val
, tmp
);
5811 tcg_temp_free_i64(tmp
);
5814 /* load and add a 64-bit value from a register pair. */
5815 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
5821 /* Load 64-bit value rd:rn. */
5822 tmpl
= load_reg(s
, rlow
);
5823 tmph
= load_reg(s
, rhigh
);
5824 tmp
= tcg_temp_new_i64();
5825 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
5828 tcg_gen_add_i64(val
, val
, tmp
);
5829 tcg_temp_free_i64(tmp
);
5832 /* Set N and Z flags from a 64-bit value. */
5833 static void gen_logicq_cc(TCGv_i64 val
)
5835 TCGv tmp
= new_tmp();
5836 gen_helper_logicq_cc(tmp
, val
);
5841 /* Load/Store exclusive instructions are implemented by remembering
5842 the value/address loaded, and seeing if these are the same
5843 when the store is performed. This should be is sufficient to implement
5844 the architecturally mandated semantics, and avoids having to monitor
5847 In system emulation mode only one CPU will be running at once, so
5848 this sequence is effectively atomic. In user emulation mode we
5849 throw an exception and handle the atomic operation elsewhere. */
5850 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
5851 TCGv addr
, int size
)
5857 tmp
= gen_ld8u(addr
, IS_USER(s
));
5860 tmp
= gen_ld16u(addr
, IS_USER(s
));
5864 tmp
= gen_ld32(addr
, IS_USER(s
));
5869 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
5870 store_reg(s
, rt
, tmp
);
5872 tcg_gen_addi_i32(addr
, addr
, 4);
5873 tmp
= gen_ld32(addr
, IS_USER(s
));
5874 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
5875 store_reg(s
, rt2
, tmp
);
5877 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
5880 static void gen_clrex(DisasContext
*s
)
5882 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
5885 #ifdef CONFIG_USER_ONLY
5886 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
5887 TCGv addr
, int size
)
5889 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
5890 tcg_gen_movi_i32(cpu_exclusive_info
,
5891 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
5892 gen_set_condexec(s
);
5893 gen_set_pc_im(s
->pc
- 4);
5894 gen_exception(EXCP_STREX
);
5895 s
->is_jmp
= DISAS_JUMP
;
5898 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
5899 TCGv addr
, int size
)
5905 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5911 fail_label
= gen_new_label();
5912 done_label
= gen_new_label();
5913 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
5916 tmp
= gen_ld8u(addr
, IS_USER(s
));
5919 tmp
= gen_ld16u(addr
, IS_USER(s
));
5923 tmp
= gen_ld32(addr
, IS_USER(s
));
5928 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
5931 TCGv tmp2
= new_tmp();
5932 tcg_gen_addi_i32(tmp2
, addr
, 4);
5933 tmp
= gen_ld32(addr
, IS_USER(s
));
5935 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
5938 tmp
= load_reg(s
, rt
);
5941 gen_st8(tmp
, addr
, IS_USER(s
));
5944 gen_st16(tmp
, addr
, IS_USER(s
));
5948 gen_st32(tmp
, addr
, IS_USER(s
));
5954 tcg_gen_addi_i32(addr
, addr
, 4);
5955 tmp
= load_reg(s
, rt2
);
5956 gen_st32(tmp
, addr
, IS_USER(s
));
5958 tcg_gen_movi_i32(cpu_R
[rd
], 0);
5959 tcg_gen_br(done_label
);
5960 gen_set_label(fail_label
);
5961 tcg_gen_movi_i32(cpu_R
[rd
], 1);
5962 gen_set_label(done_label
);
5963 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
5967 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
5969 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
5976 insn
= ldl_code(s
->pc
);
5979 /* M variants do not implement ARM mode. */
5984 /* Unconditional instructions. */
5985 if (((insn
>> 25) & 7) == 1) {
5986 /* NEON Data processing. */
5987 if (!arm_feature(env
, ARM_FEATURE_NEON
))
5990 if (disas_neon_data_insn(env
, s
, insn
))
5994 if ((insn
& 0x0f100000) == 0x04000000) {
5995 /* NEON load/store. */
5996 if (!arm_feature(env
, ARM_FEATURE_NEON
))
5999 if (disas_neon_ls_insn(env
, s
, insn
))
6003 if ((insn
& 0x0d70f000) == 0x0550f000)
6005 else if ((insn
& 0x0ffffdff) == 0x01010000) {
6008 if (insn
& (1 << 9)) {
6009 /* BE8 mode not implemented. */
6013 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6014 switch ((insn
>> 4) & 0xf) {
6023 /* We don't emulate caches so these are a no-op. */
6028 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6034 op1
= (insn
& 0x1f);
6035 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
6036 addr
= load_reg(s
, 13);
6039 tmp
= tcg_const_i32(op1
);
6040 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6041 tcg_temp_free_i32(tmp
);
6043 i
= (insn
>> 23) & 3;
6045 case 0: offset
= -4; break; /* DA */
6046 case 1: offset
= 0; break; /* IA */
6047 case 2: offset
= -8; break; /* DB */
6048 case 3: offset
= 4; break; /* IB */
6052 tcg_gen_addi_i32(addr
, addr
, offset
);
6053 tmp
= load_reg(s
, 14);
6054 gen_st32(tmp
, addr
, 0);
6055 tmp
= load_cpu_field(spsr
);
6056 tcg_gen_addi_i32(addr
, addr
, 4);
6057 gen_st32(tmp
, addr
, 0);
6058 if (insn
& (1 << 21)) {
6059 /* Base writeback. */
6061 case 0: offset
= -8; break;
6062 case 1: offset
= 4; break;
6063 case 2: offset
= -4; break;
6064 case 3: offset
= 0; break;
6068 tcg_gen_addi_i32(addr
, addr
, offset
);
6069 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
6070 store_reg(s
, 13, addr
);
6072 tmp
= tcg_const_i32(op1
);
6073 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6074 tcg_temp_free_i32(tmp
);
6080 } else if ((insn
& 0x0e5fffe0) == 0x081d0a00) {
6086 rn
= (insn
>> 16) & 0xf;
6087 addr
= load_reg(s
, rn
);
6088 i
= (insn
>> 23) & 3;
6090 case 0: offset
= -4; break; /* DA */
6091 case 1: offset
= 0; break; /* IA */
6092 case 2: offset
= -8; break; /* DB */
6093 case 3: offset
= 4; break; /* IB */
6097 tcg_gen_addi_i32(addr
, addr
, offset
);
6098 /* Load PC into tmp and CPSR into tmp2. */
6099 tmp
= gen_ld32(addr
, 0);
6100 tcg_gen_addi_i32(addr
, addr
, 4);
6101 tmp2
= gen_ld32(addr
, 0);
6102 if (insn
& (1 << 21)) {
6103 /* Base writeback. */
6105 case 0: offset
= -8; break;
6106 case 1: offset
= 4; break;
6107 case 2: offset
= -4; break;
6108 case 3: offset
= 0; break;
6112 tcg_gen_addi_i32(addr
, addr
, offset
);
6113 store_reg(s
, rn
, addr
);
6117 gen_rfe(s
, tmp
, tmp2
);
6119 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6120 /* branch link and change to thumb (blx <offset>) */
6123 val
= (uint32_t)s
->pc
;
6125 tcg_gen_movi_i32(tmp
, val
);
6126 store_reg(s
, 14, tmp
);
6127 /* Sign-extend the 24-bit offset */
6128 offset
= (((int32_t)insn
) << 8) >> 8;
6129 /* offset * 4 + bit24 * 2 + (thumb bit) */
6130 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6131 /* pipeline offset */
6135 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6136 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6137 /* iWMMXt register transfer. */
6138 if (env
->cp15
.c15_cpar
& (1 << 1))
6139 if (!disas_iwmmxt_insn(env
, s
, insn
))
6142 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6143 /* Coprocessor double register transfer. */
6144 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6145 /* Additional coprocessor register transfer. */
6146 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6149 /* cps (privileged) */
6153 if (insn
& (1 << 19)) {
6154 if (insn
& (1 << 8))
6156 if (insn
& (1 << 7))
6158 if (insn
& (1 << 6))
6160 if (insn
& (1 << 18))
6163 if (insn
& (1 << 17)) {
6165 val
|= (insn
& 0x1f);
6168 gen_set_psr_im(s
, mask
, 0, val
);
6175 /* if not always execute, we generate a conditional jump to
6177 s
->condlabel
= gen_new_label();
6178 gen_test_cc(cond
^ 1, s
->condlabel
);
6181 if ((insn
& 0x0f900000) == 0x03000000) {
6182 if ((insn
& (1 << 21)) == 0) {
6184 rd
= (insn
>> 12) & 0xf;
6185 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6186 if ((insn
& (1 << 22)) == 0) {
6189 tcg_gen_movi_i32(tmp
, val
);
6192 tmp
= load_reg(s
, rd
);
6193 tcg_gen_ext16u_i32(tmp
, tmp
);
6194 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6196 store_reg(s
, rd
, tmp
);
6198 if (((insn
>> 12) & 0xf) != 0xf)
6200 if (((insn
>> 16) & 0xf) == 0) {
6201 gen_nop_hint(s
, insn
& 0xff);
6203 /* CPSR = immediate */
6205 shift
= ((insn
>> 8) & 0xf) * 2;
6207 val
= (val
>> shift
) | (val
<< (32 - shift
));
6208 i
= ((insn
& (1 << 22)) != 0);
6209 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6213 } else if ((insn
& 0x0f900000) == 0x01000000
6214 && (insn
& 0x00000090) != 0x00000090) {
6215 /* miscellaneous instructions */
6216 op1
= (insn
>> 21) & 3;
6217 sh
= (insn
>> 4) & 0xf;
6220 case 0x0: /* move program status register */
6223 tmp
= load_reg(s
, rm
);
6224 i
= ((op1
& 2) != 0);
6225 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6229 rd
= (insn
>> 12) & 0xf;
6233 tmp
= load_cpu_field(spsr
);
6236 gen_helper_cpsr_read(tmp
);
6238 store_reg(s
, rd
, tmp
);
6243 /* branch/exchange thumb (bx). */
6244 tmp
= load_reg(s
, rm
);
6246 } else if (op1
== 3) {
6248 rd
= (insn
>> 12) & 0xf;
6249 tmp
= load_reg(s
, rm
);
6250 gen_helper_clz(tmp
, tmp
);
6251 store_reg(s
, rd
, tmp
);
6259 /* Trivial implementation equivalent to bx. */
6260 tmp
= load_reg(s
, rm
);
6270 /* branch link/exchange thumb (blx) */
6271 tmp
= load_reg(s
, rm
);
6273 tcg_gen_movi_i32(tmp2
, s
->pc
);
6274 store_reg(s
, 14, tmp2
);
6277 case 0x5: /* saturating add/subtract */
6278 rd
= (insn
>> 12) & 0xf;
6279 rn
= (insn
>> 16) & 0xf;
6280 tmp
= load_reg(s
, rm
);
6281 tmp2
= load_reg(s
, rn
);
6283 gen_helper_double_saturate(tmp2
, tmp2
);
6285 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6287 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6289 store_reg(s
, rd
, tmp
);
6292 gen_set_condexec(s
);
6293 gen_set_pc_im(s
->pc
- 4);
6294 gen_exception(EXCP_BKPT
);
6295 s
->is_jmp
= DISAS_JUMP
;
6297 case 0x8: /* signed multiply */
6301 rs
= (insn
>> 8) & 0xf;
6302 rn
= (insn
>> 12) & 0xf;
6303 rd
= (insn
>> 16) & 0xf;
6305 /* (32 * 16) >> 16 */
6306 tmp
= load_reg(s
, rm
);
6307 tmp2
= load_reg(s
, rs
);
6309 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6312 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6313 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6315 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6316 tcg_temp_free_i64(tmp64
);
6317 if ((sh
& 2) == 0) {
6318 tmp2
= load_reg(s
, rn
);
6319 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6322 store_reg(s
, rd
, tmp
);
6325 tmp
= load_reg(s
, rm
);
6326 tmp2
= load_reg(s
, rs
);
6327 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6330 tmp64
= tcg_temp_new_i64();
6331 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6333 gen_addq(s
, tmp64
, rn
, rd
);
6334 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6335 tcg_temp_free_i64(tmp64
);
6338 tmp2
= load_reg(s
, rn
);
6339 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6342 store_reg(s
, rd
, tmp
);
6349 } else if (((insn
& 0x0e000000) == 0 &&
6350 (insn
& 0x00000090) != 0x90) ||
6351 ((insn
& 0x0e000000) == (1 << 25))) {
6352 int set_cc
, logic_cc
, shiftop
;
6354 op1
= (insn
>> 21) & 0xf;
6355 set_cc
= (insn
>> 20) & 1;
6356 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6358 /* data processing instruction */
6359 if (insn
& (1 << 25)) {
6360 /* immediate operand */
6362 shift
= ((insn
>> 8) & 0xf) * 2;
6364 val
= (val
>> shift
) | (val
<< (32 - shift
));
6367 tcg_gen_movi_i32(tmp2
, val
);
6368 if (logic_cc
&& shift
) {
6369 gen_set_CF_bit31(tmp2
);
6374 tmp2
= load_reg(s
, rm
);
6375 shiftop
= (insn
>> 5) & 3;
6376 if (!(insn
& (1 << 4))) {
6377 shift
= (insn
>> 7) & 0x1f;
6378 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6380 rs
= (insn
>> 8) & 0xf;
6381 tmp
= load_reg(s
, rs
);
6382 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6385 if (op1
!= 0x0f && op1
!= 0x0d) {
6386 rn
= (insn
>> 16) & 0xf;
6387 tmp
= load_reg(s
, rn
);
6391 rd
= (insn
>> 12) & 0xf;
6394 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6398 store_reg_bx(env
, s
, rd
, tmp
);
6401 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6405 store_reg_bx(env
, s
, rd
, tmp
);
6408 if (set_cc
&& rd
== 15) {
6409 /* SUBS r15, ... is used for exception return. */
6413 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6414 gen_exception_return(s
, tmp
);
6417 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6419 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6421 store_reg_bx(env
, s
, rd
, tmp
);
6426 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
6428 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6430 store_reg_bx(env
, s
, rd
, tmp
);
6434 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6436 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6438 store_reg_bx(env
, s
, rd
, tmp
);
6442 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
6444 gen_add_carry(tmp
, tmp
, tmp2
);
6446 store_reg_bx(env
, s
, rd
, tmp
);
6450 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
6452 gen_sub_carry(tmp
, tmp
, tmp2
);
6454 store_reg_bx(env
, s
, rd
, tmp
);
6458 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
6460 gen_sub_carry(tmp
, tmp2
, tmp
);
6462 store_reg_bx(env
, s
, rd
, tmp
);
6466 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6473 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6480 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6486 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6491 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6495 store_reg_bx(env
, s
, rd
, tmp
);
6498 if (logic_cc
&& rd
== 15) {
6499 /* MOVS r15, ... is used for exception return. */
6503 gen_exception_return(s
, tmp2
);
6508 store_reg_bx(env
, s
, rd
, tmp2
);
6512 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
6516 store_reg_bx(env
, s
, rd
, tmp
);
6520 tcg_gen_not_i32(tmp2
, tmp2
);
6524 store_reg_bx(env
, s
, rd
, tmp2
);
6527 if (op1
!= 0x0f && op1
!= 0x0d) {
6531 /* other instructions */
6532 op1
= (insn
>> 24) & 0xf;
6536 /* multiplies, extra load/stores */
6537 sh
= (insn
>> 5) & 3;
6540 rd
= (insn
>> 16) & 0xf;
6541 rn
= (insn
>> 12) & 0xf;
6542 rs
= (insn
>> 8) & 0xf;
6544 op1
= (insn
>> 20) & 0xf;
6546 case 0: case 1: case 2: case 3: case 6:
6548 tmp
= load_reg(s
, rs
);
6549 tmp2
= load_reg(s
, rm
);
6550 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
6552 if (insn
& (1 << 22)) {
6553 /* Subtract (mls) */
6555 tmp2
= load_reg(s
, rn
);
6556 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6558 } else if (insn
& (1 << 21)) {
6560 tmp2
= load_reg(s
, rn
);
6561 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6564 if (insn
& (1 << 20))
6566 store_reg(s
, rd
, tmp
);
6570 tmp
= load_reg(s
, rs
);
6571 tmp2
= load_reg(s
, rm
);
6572 if (insn
& (1 << 22))
6573 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6575 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6576 if (insn
& (1 << 21)) /* mult accumulate */
6577 gen_addq(s
, tmp64
, rn
, rd
);
6578 if (!(insn
& (1 << 23))) { /* double accumulate */
6580 gen_addq_lo(s
, tmp64
, rn
);
6581 gen_addq_lo(s
, tmp64
, rd
);
6583 if (insn
& (1 << 20))
6584 gen_logicq_cc(tmp64
);
6585 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6586 tcg_temp_free_i64(tmp64
);
6590 rn
= (insn
>> 16) & 0xf;
6591 rd
= (insn
>> 12) & 0xf;
6592 if (insn
& (1 << 23)) {
6593 /* load/store exclusive */
6594 op1
= (insn
>> 21) & 0x3;
6599 addr
= tcg_temp_local_new_i32();
6600 load_reg_var(s
, addr
, rn
);
6601 if (insn
& (1 << 20)) {
6604 gen_load_exclusive(s
, rd
, 15, addr
, 2);
6606 case 1: /* ldrexd */
6607 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
6609 case 2: /* ldrexb */
6610 gen_load_exclusive(s
, rd
, 15, addr
, 0);
6612 case 3: /* ldrexh */
6613 gen_load_exclusive(s
, rd
, 15, addr
, 1);
6622 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
6624 case 1: /* strexd */
6625 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
6627 case 2: /* strexb */
6628 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
6630 case 3: /* strexh */
6631 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
6637 tcg_temp_free(addr
);
6639 /* SWP instruction */
6642 /* ??? This is not really atomic. However we know
6643 we never have multiple CPUs running in parallel,
6644 so it is good enough. */
6645 addr
= load_reg(s
, rn
);
6646 tmp
= load_reg(s
, rm
);
6647 if (insn
& (1 << 22)) {
6648 tmp2
= gen_ld8u(addr
, IS_USER(s
));
6649 gen_st8(tmp
, addr
, IS_USER(s
));
6651 tmp2
= gen_ld32(addr
, IS_USER(s
));
6652 gen_st32(tmp
, addr
, IS_USER(s
));
6655 store_reg(s
, rd
, tmp2
);
6661 /* Misc load/store */
6662 rn
= (insn
>> 16) & 0xf;
6663 rd
= (insn
>> 12) & 0xf;
6664 addr
= load_reg(s
, rn
);
6665 if (insn
& (1 << 24))
6666 gen_add_datah_offset(s
, insn
, 0, addr
);
6668 if (insn
& (1 << 20)) {
6672 tmp
= gen_ld16u(addr
, IS_USER(s
));
6675 tmp
= gen_ld8s(addr
, IS_USER(s
));
6679 tmp
= gen_ld16s(addr
, IS_USER(s
));
6683 } else if (sh
& 2) {
6687 tmp
= load_reg(s
, rd
);
6688 gen_st32(tmp
, addr
, IS_USER(s
));
6689 tcg_gen_addi_i32(addr
, addr
, 4);
6690 tmp
= load_reg(s
, rd
+ 1);
6691 gen_st32(tmp
, addr
, IS_USER(s
));
6695 tmp
= gen_ld32(addr
, IS_USER(s
));
6696 store_reg(s
, rd
, tmp
);
6697 tcg_gen_addi_i32(addr
, addr
, 4);
6698 tmp
= gen_ld32(addr
, IS_USER(s
));
6702 address_offset
= -4;
6705 tmp
= load_reg(s
, rd
);
6706 gen_st16(tmp
, addr
, IS_USER(s
));
6709 /* Perform base writeback before the loaded value to
6710 ensure correct behavior with overlapping index registers.
6711 ldrd with base writeback is is undefined if the
6712 destination and index registers overlap. */
6713 if (!(insn
& (1 << 24))) {
6714 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
6715 store_reg(s
, rn
, addr
);
6716 } else if (insn
& (1 << 21)) {
6718 tcg_gen_addi_i32(addr
, addr
, address_offset
);
6719 store_reg(s
, rn
, addr
);
6724 /* Complete the load. */
6725 store_reg(s
, rd
, tmp
);
6734 if (insn
& (1 << 4)) {
6736 /* Armv6 Media instructions. */
6738 rn
= (insn
>> 16) & 0xf;
6739 rd
= (insn
>> 12) & 0xf;
6740 rs
= (insn
>> 8) & 0xf;
6741 switch ((insn
>> 23) & 3) {
6742 case 0: /* Parallel add/subtract. */
6743 op1
= (insn
>> 20) & 7;
6744 tmp
= load_reg(s
, rn
);
6745 tmp2
= load_reg(s
, rm
);
6746 sh
= (insn
>> 5) & 7;
6747 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
6749 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
6751 store_reg(s
, rd
, tmp
);
6754 if ((insn
& 0x00700020) == 0) {
6755 /* Halfword pack. */
6756 tmp
= load_reg(s
, rn
);
6757 tmp2
= load_reg(s
, rm
);
6758 shift
= (insn
>> 7) & 0x1f;
6759 if (insn
& (1 << 6)) {
6763 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
6764 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
6765 tcg_gen_ext16u_i32(tmp2
, tmp2
);
6769 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
6770 tcg_gen_ext16u_i32(tmp
, tmp
);
6771 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
6773 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6775 store_reg(s
, rd
, tmp
);
6776 } else if ((insn
& 0x00200020) == 0x00200000) {
6778 tmp
= load_reg(s
, rm
);
6779 shift
= (insn
>> 7) & 0x1f;
6780 if (insn
& (1 << 6)) {
6783 tcg_gen_sari_i32(tmp
, tmp
, shift
);
6785 tcg_gen_shli_i32(tmp
, tmp
, shift
);
6787 sh
= (insn
>> 16) & 0x1f;
6789 tmp2
= tcg_const_i32(sh
);
6790 if (insn
& (1 << 22))
6791 gen_helper_usat(tmp
, tmp
, tmp2
);
6793 gen_helper_ssat(tmp
, tmp
, tmp2
);
6794 tcg_temp_free_i32(tmp2
);
6796 store_reg(s
, rd
, tmp
);
6797 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
6799 tmp
= load_reg(s
, rm
);
6800 sh
= (insn
>> 16) & 0x1f;
6802 tmp2
= tcg_const_i32(sh
);
6803 if (insn
& (1 << 22))
6804 gen_helper_usat16(tmp
, tmp
, tmp2
);
6806 gen_helper_ssat16(tmp
, tmp
, tmp2
);
6807 tcg_temp_free_i32(tmp2
);
6809 store_reg(s
, rd
, tmp
);
6810 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
6812 tmp
= load_reg(s
, rn
);
6813 tmp2
= load_reg(s
, rm
);
6815 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
6816 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
6819 store_reg(s
, rd
, tmp
);
6820 } else if ((insn
& 0x000003e0) == 0x00000060) {
6821 tmp
= load_reg(s
, rm
);
6822 shift
= (insn
>> 10) & 3;
6823 /* ??? In many cases it's not neccessary to do a
6824 rotate, a shift is sufficient. */
6826 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
6827 op1
= (insn
>> 20) & 7;
6829 case 0: gen_sxtb16(tmp
); break;
6830 case 2: gen_sxtb(tmp
); break;
6831 case 3: gen_sxth(tmp
); break;
6832 case 4: gen_uxtb16(tmp
); break;
6833 case 6: gen_uxtb(tmp
); break;
6834 case 7: gen_uxth(tmp
); break;
6835 default: goto illegal_op
;
6838 tmp2
= load_reg(s
, rn
);
6839 if ((op1
& 3) == 0) {
6840 gen_add16(tmp
, tmp2
);
6842 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6846 store_reg(s
, rd
, tmp
);
6847 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
6849 tmp
= load_reg(s
, rm
);
6850 if (insn
& (1 << 22)) {
6851 if (insn
& (1 << 7)) {
6855 gen_helper_rbit(tmp
, tmp
);
6858 if (insn
& (1 << 7))
6861 tcg_gen_bswap32_i32(tmp
, tmp
);
6863 store_reg(s
, rd
, tmp
);
6868 case 2: /* Multiplies (Type 3). */
6869 tmp
= load_reg(s
, rm
);
6870 tmp2
= load_reg(s
, rs
);
6871 if (insn
& (1 << 20)) {
6872 /* Signed multiply most significant [accumulate]. */
6873 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6874 if (insn
& (1 << 5))
6875 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
6876 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6878 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6879 tcg_temp_free_i64(tmp64
);
6881 tmp2
= load_reg(s
, rd
);
6882 if (insn
& (1 << 6)) {
6883 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6885 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6889 store_reg(s
, rn
, tmp
);
6891 if (insn
& (1 << 5))
6892 gen_swap_half(tmp2
);
6893 gen_smul_dual(tmp
, tmp2
);
6894 /* This addition cannot overflow. */
6895 if (insn
& (1 << 6)) {
6896 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6898 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6901 if (insn
& (1 << 22)) {
6902 /* smlald, smlsld */
6903 tmp64
= tcg_temp_new_i64();
6904 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6906 gen_addq(s
, tmp64
, rd
, rn
);
6907 gen_storeq_reg(s
, rd
, rn
, tmp64
);
6908 tcg_temp_free_i64(tmp64
);
6910 /* smuad, smusd, smlad, smlsd */
6913 tmp2
= load_reg(s
, rd
);
6914 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6917 store_reg(s
, rn
, tmp
);
6922 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
6924 case 0: /* Unsigned sum of absolute differences. */
6926 tmp
= load_reg(s
, rm
);
6927 tmp2
= load_reg(s
, rs
);
6928 gen_helper_usad8(tmp
, tmp
, tmp2
);
6931 tmp2
= load_reg(s
, rd
);
6932 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6935 store_reg(s
, rn
, tmp
);
6937 case 0x20: case 0x24: case 0x28: case 0x2c:
6938 /* Bitfield insert/clear. */
6940 shift
= (insn
>> 7) & 0x1f;
6941 i
= (insn
>> 16) & 0x1f;
6945 tcg_gen_movi_i32(tmp
, 0);
6947 tmp
= load_reg(s
, rm
);
6950 tmp2
= load_reg(s
, rd
);
6951 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
6954 store_reg(s
, rd
, tmp
);
6956 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6957 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6959 tmp
= load_reg(s
, rm
);
6960 shift
= (insn
>> 7) & 0x1f;
6961 i
= ((insn
>> 16) & 0x1f) + 1;
6966 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
6968 gen_sbfx(tmp
, shift
, i
);
6971 store_reg(s
, rd
, tmp
);
6981 /* Check for undefined extension instructions
6982 * per the ARM Bible IE:
6983 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6985 sh
= (0xf << 20) | (0xf << 4);
6986 if (op1
== 0x7 && ((insn
& sh
) == sh
))
6990 /* load/store byte/word */
6991 rn
= (insn
>> 16) & 0xf;
6992 rd
= (insn
>> 12) & 0xf;
6993 tmp2
= load_reg(s
, rn
);
6994 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
6995 if (insn
& (1 << 24))
6996 gen_add_data_offset(s
, insn
, tmp2
);
6997 if (insn
& (1 << 20)) {
6999 if (insn
& (1 << 22)) {
7000 tmp
= gen_ld8u(tmp2
, i
);
7002 tmp
= gen_ld32(tmp2
, i
);
7006 tmp
= load_reg(s
, rd
);
7007 if (insn
& (1 << 22))
7008 gen_st8(tmp
, tmp2
, i
);
7010 gen_st32(tmp
, tmp2
, i
);
7012 if (!(insn
& (1 << 24))) {
7013 gen_add_data_offset(s
, insn
, tmp2
);
7014 store_reg(s
, rn
, tmp2
);
7015 } else if (insn
& (1 << 21)) {
7016 store_reg(s
, rn
, tmp2
);
7020 if (insn
& (1 << 20)) {
7021 /* Complete the load. */
7025 store_reg(s
, rd
, tmp
);
7031 int j
, n
, user
, loaded_base
;
7033 /* load/store multiple words */
7034 /* XXX: store correct base if write back */
7036 if (insn
& (1 << 22)) {
7038 goto illegal_op
; /* only usable in supervisor mode */
7040 if ((insn
& (1 << 15)) == 0)
7043 rn
= (insn
>> 16) & 0xf;
7044 addr
= load_reg(s
, rn
);
7046 /* compute total size */
7048 TCGV_UNUSED(loaded_var
);
7051 if (insn
& (1 << i
))
7054 /* XXX: test invalid n == 0 case ? */
7055 if (insn
& (1 << 23)) {
7056 if (insn
& (1 << 24)) {
7058 tcg_gen_addi_i32(addr
, addr
, 4);
7060 /* post increment */
7063 if (insn
& (1 << 24)) {
7065 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7067 /* post decrement */
7069 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7074 if (insn
& (1 << i
)) {
7075 if (insn
& (1 << 20)) {
7077 tmp
= gen_ld32(addr
, IS_USER(s
));
7081 tmp2
= tcg_const_i32(i
);
7082 gen_helper_set_user_reg(tmp2
, tmp
);
7083 tcg_temp_free_i32(tmp2
);
7085 } else if (i
== rn
) {
7089 store_reg(s
, i
, tmp
);
7094 /* special case: r15 = PC + 8 */
7095 val
= (long)s
->pc
+ 4;
7097 tcg_gen_movi_i32(tmp
, val
);
7100 tmp2
= tcg_const_i32(i
);
7101 gen_helper_get_user_reg(tmp
, tmp2
);
7102 tcg_temp_free_i32(tmp2
);
7104 tmp
= load_reg(s
, i
);
7106 gen_st32(tmp
, addr
, IS_USER(s
));
7109 /* no need to add after the last transfer */
7111 tcg_gen_addi_i32(addr
, addr
, 4);
7114 if (insn
& (1 << 21)) {
7116 if (insn
& (1 << 23)) {
7117 if (insn
& (1 << 24)) {
7120 /* post increment */
7121 tcg_gen_addi_i32(addr
, addr
, 4);
7124 if (insn
& (1 << 24)) {
7127 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7129 /* post decrement */
7130 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7133 store_reg(s
, rn
, addr
);
7138 store_reg(s
, rn
, loaded_var
);
7140 if ((insn
& (1 << 22)) && !user
) {
7141 /* Restore CPSR from SPSR. */
7142 tmp
= load_cpu_field(spsr
);
7143 gen_set_cpsr(tmp
, 0xffffffff);
7145 s
->is_jmp
= DISAS_UPDATE
;
7154 /* branch (and link) */
7155 val
= (int32_t)s
->pc
;
7156 if (insn
& (1 << 24)) {
7158 tcg_gen_movi_i32(tmp
, val
);
7159 store_reg(s
, 14, tmp
);
7161 offset
= (((int32_t)insn
<< 8) >> 8);
7162 val
+= (offset
<< 2) + 4;
7170 if (disas_coproc_insn(env
, s
, insn
))
7175 gen_set_pc_im(s
->pc
);
7176 s
->is_jmp
= DISAS_SWI
;
7180 gen_set_condexec(s
);
7181 gen_set_pc_im(s
->pc
- 4);
7182 gen_exception(EXCP_UDEF
);
7183 s
->is_jmp
= DISAS_JUMP
;
7189 /* Return true if this is a Thumb-2 logical op. */
7191 thumb2_logic_op(int op
)
7196 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7197 then set condition code flags based on the result of the operation.
7198 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7199 to the high bit of T1.
7200 Returns zero if the opcode is valid. */
7203 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7210 tcg_gen_and_i32(t0
, t0
, t1
);
7214 tcg_gen_andc_i32(t0
, t0
, t1
);
7218 tcg_gen_or_i32(t0
, t0
, t1
);
7222 tcg_gen_not_i32(t1
, t1
);
7223 tcg_gen_or_i32(t0
, t0
, t1
);
7227 tcg_gen_xor_i32(t0
, t0
, t1
);
7232 gen_helper_add_cc(t0
, t0
, t1
);
7234 tcg_gen_add_i32(t0
, t0
, t1
);
7238 gen_helper_adc_cc(t0
, t0
, t1
);
7244 gen_helper_sbc_cc(t0
, t0
, t1
);
7246 gen_sub_carry(t0
, t0
, t1
);
7250 gen_helper_sub_cc(t0
, t0
, t1
);
7252 tcg_gen_sub_i32(t0
, t0
, t1
);
7256 gen_helper_sub_cc(t0
, t1
, t0
);
7258 tcg_gen_sub_i32(t0
, t1
, t0
);
7260 default: /* 5, 6, 7, 9, 12, 15. */
7266 gen_set_CF_bit31(t1
);
7271 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7273 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7275 uint32_t insn
, imm
, shift
, offset
;
7276 uint32_t rd
, rn
, rm
, rs
;
7287 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7288 || arm_feature (env
, ARM_FEATURE_M
))) {
7289 /* Thumb-1 cores may need to treat bl and blx as a pair of
7290 16-bit instructions to get correct prefetch abort behavior. */
7292 if ((insn
& (1 << 12)) == 0) {
7293 /* Second half of blx. */
7294 offset
= ((insn
& 0x7ff) << 1);
7295 tmp
= load_reg(s
, 14);
7296 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7297 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7300 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7301 store_reg(s
, 14, tmp2
);
7305 if (insn
& (1 << 11)) {
7306 /* Second half of bl. */
7307 offset
= ((insn
& 0x7ff) << 1) | 1;
7308 tmp
= load_reg(s
, 14);
7309 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7312 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7313 store_reg(s
, 14, tmp2
);
7317 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7318 /* Instruction spans a page boundary. Implement it as two
7319 16-bit instructions in case the second half causes an
7321 offset
= ((int32_t)insn
<< 21) >> 9;
7322 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7325 /* Fall through to 32-bit decode. */
7328 insn
= lduw_code(s
->pc
);
7330 insn
|= (uint32_t)insn_hw1
<< 16;
7332 if ((insn
& 0xf800e800) != 0xf000e800) {
7336 rn
= (insn
>> 16) & 0xf;
7337 rs
= (insn
>> 12) & 0xf;
7338 rd
= (insn
>> 8) & 0xf;
7340 switch ((insn
>> 25) & 0xf) {
7341 case 0: case 1: case 2: case 3:
7342 /* 16-bit instructions. Should never happen. */
7345 if (insn
& (1 << 22)) {
7346 /* Other load/store, table branch. */
7347 if (insn
& 0x01200000) {
7348 /* Load/store doubleword. */
7351 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7353 addr
= load_reg(s
, rn
);
7355 offset
= (insn
& 0xff) * 4;
7356 if ((insn
& (1 << 23)) == 0)
7358 if (insn
& (1 << 24)) {
7359 tcg_gen_addi_i32(addr
, addr
, offset
);
7362 if (insn
& (1 << 20)) {
7364 tmp
= gen_ld32(addr
, IS_USER(s
));
7365 store_reg(s
, rs
, tmp
);
7366 tcg_gen_addi_i32(addr
, addr
, 4);
7367 tmp
= gen_ld32(addr
, IS_USER(s
));
7368 store_reg(s
, rd
, tmp
);
7371 tmp
= load_reg(s
, rs
);
7372 gen_st32(tmp
, addr
, IS_USER(s
));
7373 tcg_gen_addi_i32(addr
, addr
, 4);
7374 tmp
= load_reg(s
, rd
);
7375 gen_st32(tmp
, addr
, IS_USER(s
));
7377 if (insn
& (1 << 21)) {
7378 /* Base writeback. */
7381 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7382 store_reg(s
, rn
, addr
);
7386 } else if ((insn
& (1 << 23)) == 0) {
7387 /* Load/store exclusive word. */
7388 addr
= tcg_temp_local_new();
7389 load_reg_var(s
, addr
, rn
);
7390 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
7391 if (insn
& (1 << 20)) {
7392 gen_load_exclusive(s
, rs
, 15, addr
, 2);
7394 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
7396 tcg_temp_free(addr
);
7397 } else if ((insn
& (1 << 6)) == 0) {
7401 tcg_gen_movi_i32(addr
, s
->pc
);
7403 addr
= load_reg(s
, rn
);
7405 tmp
= load_reg(s
, rm
);
7406 tcg_gen_add_i32(addr
, addr
, tmp
);
7407 if (insn
& (1 << 4)) {
7409 tcg_gen_add_i32(addr
, addr
, tmp
);
7411 tmp
= gen_ld16u(addr
, IS_USER(s
));
7414 tmp
= gen_ld8u(addr
, IS_USER(s
));
7417 tcg_gen_shli_i32(tmp
, tmp
, 1);
7418 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
7419 store_reg(s
, 15, tmp
);
7421 /* Load/store exclusive byte/halfword/doubleword. */
7423 op
= (insn
>> 4) & 0x3;
7427 addr
= tcg_temp_local_new();
7428 load_reg_var(s
, addr
, rn
);
7429 if (insn
& (1 << 20)) {
7430 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
7432 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
7434 tcg_temp_free(addr
);
7437 /* Load/store multiple, RFE, SRS. */
7438 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
7439 /* Not available in user mode. */
7442 if (insn
& (1 << 20)) {
7444 addr
= load_reg(s
, rn
);
7445 if ((insn
& (1 << 24)) == 0)
7446 tcg_gen_addi_i32(addr
, addr
, -8);
7447 /* Load PC into tmp and CPSR into tmp2. */
7448 tmp
= gen_ld32(addr
, 0);
7449 tcg_gen_addi_i32(addr
, addr
, 4);
7450 tmp2
= gen_ld32(addr
, 0);
7451 if (insn
& (1 << 21)) {
7452 /* Base writeback. */
7453 if (insn
& (1 << 24)) {
7454 tcg_gen_addi_i32(addr
, addr
, 4);
7456 tcg_gen_addi_i32(addr
, addr
, -4);
7458 store_reg(s
, rn
, addr
);
7462 gen_rfe(s
, tmp
, tmp2
);
7466 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7467 addr
= load_reg(s
, 13);
7470 tmp
= tcg_const_i32(op
);
7471 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7472 tcg_temp_free_i32(tmp
);
7474 if ((insn
& (1 << 24)) == 0) {
7475 tcg_gen_addi_i32(addr
, addr
, -8);
7477 tmp
= load_reg(s
, 14);
7478 gen_st32(tmp
, addr
, 0);
7479 tcg_gen_addi_i32(addr
, addr
, 4);
7481 gen_helper_cpsr_read(tmp
);
7482 gen_st32(tmp
, addr
, 0);
7483 if (insn
& (1 << 21)) {
7484 if ((insn
& (1 << 24)) == 0) {
7485 tcg_gen_addi_i32(addr
, addr
, -4);
7487 tcg_gen_addi_i32(addr
, addr
, 4);
7489 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7490 store_reg(s
, 13, addr
);
7492 tmp
= tcg_const_i32(op
);
7493 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7494 tcg_temp_free_i32(tmp
);
7502 /* Load/store multiple. */
7503 addr
= load_reg(s
, rn
);
7505 for (i
= 0; i
< 16; i
++) {
7506 if (insn
& (1 << i
))
7509 if (insn
& (1 << 24)) {
7510 tcg_gen_addi_i32(addr
, addr
, -offset
);
7513 for (i
= 0; i
< 16; i
++) {
7514 if ((insn
& (1 << i
)) == 0)
7516 if (insn
& (1 << 20)) {
7518 tmp
= gen_ld32(addr
, IS_USER(s
));
7522 store_reg(s
, i
, tmp
);
7526 tmp
= load_reg(s
, i
);
7527 gen_st32(tmp
, addr
, IS_USER(s
));
7529 tcg_gen_addi_i32(addr
, addr
, 4);
7531 if (insn
& (1 << 21)) {
7532 /* Base register writeback. */
7533 if (insn
& (1 << 24)) {
7534 tcg_gen_addi_i32(addr
, addr
, -offset
);
7536 /* Fault if writeback register is in register list. */
7537 if (insn
& (1 << rn
))
7539 store_reg(s
, rn
, addr
);
7546 case 5: /* Data processing register constant shift. */
7549 tcg_gen_movi_i32(tmp
, 0);
7551 tmp
= load_reg(s
, rn
);
7553 tmp2
= load_reg(s
, rm
);
7554 op
= (insn
>> 21) & 0xf;
7555 shiftop
= (insn
>> 4) & 3;
7556 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7557 conds
= (insn
& (1 << 20)) != 0;
7558 logic_cc
= (conds
&& thumb2_logic_op(op
));
7559 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7560 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
7564 store_reg(s
, rd
, tmp
);
7569 case 13: /* Misc data processing. */
7570 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
7571 if (op
< 4 && (insn
& 0xf000) != 0xf000)
7574 case 0: /* Register controlled shift. */
7575 tmp
= load_reg(s
, rn
);
7576 tmp2
= load_reg(s
, rm
);
7577 if ((insn
& 0x70) != 0)
7579 op
= (insn
>> 21) & 3;
7580 logic_cc
= (insn
& (1 << 20)) != 0;
7581 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
7584 store_reg_bx(env
, s
, rd
, tmp
);
7586 case 1: /* Sign/zero extend. */
7587 tmp
= load_reg(s
, rm
);
7588 shift
= (insn
>> 4) & 3;
7589 /* ??? In many cases it's not neccessary to do a
7590 rotate, a shift is sufficient. */
7592 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7593 op
= (insn
>> 20) & 7;
7595 case 0: gen_sxth(tmp
); break;
7596 case 1: gen_uxth(tmp
); break;
7597 case 2: gen_sxtb16(tmp
); break;
7598 case 3: gen_uxtb16(tmp
); break;
7599 case 4: gen_sxtb(tmp
); break;
7600 case 5: gen_uxtb(tmp
); break;
7601 default: goto illegal_op
;
7604 tmp2
= load_reg(s
, rn
);
7605 if ((op
>> 1) == 1) {
7606 gen_add16(tmp
, tmp2
);
7608 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7612 store_reg(s
, rd
, tmp
);
7614 case 2: /* SIMD add/subtract. */
7615 op
= (insn
>> 20) & 7;
7616 shift
= (insn
>> 4) & 7;
7617 if ((op
& 3) == 3 || (shift
& 3) == 3)
7619 tmp
= load_reg(s
, rn
);
7620 tmp2
= load_reg(s
, rm
);
7621 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
7623 store_reg(s
, rd
, tmp
);
7625 case 3: /* Other data processing. */
7626 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
7628 /* Saturating add/subtract. */
7629 tmp
= load_reg(s
, rn
);
7630 tmp2
= load_reg(s
, rm
);
7632 gen_helper_double_saturate(tmp
, tmp
);
7634 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
7636 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
7639 tmp
= load_reg(s
, rn
);
7641 case 0x0a: /* rbit */
7642 gen_helper_rbit(tmp
, tmp
);
7644 case 0x08: /* rev */
7645 tcg_gen_bswap32_i32(tmp
, tmp
);
7647 case 0x09: /* rev16 */
7650 case 0x0b: /* revsh */
7653 case 0x10: /* sel */
7654 tmp2
= load_reg(s
, rm
);
7656 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7657 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7661 case 0x18: /* clz */
7662 gen_helper_clz(tmp
, tmp
);
7668 store_reg(s
, rd
, tmp
);
7670 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7671 op
= (insn
>> 4) & 0xf;
7672 tmp
= load_reg(s
, rn
);
7673 tmp2
= load_reg(s
, rm
);
7674 switch ((insn
>> 20) & 7) {
7675 case 0: /* 32 x 32 -> 32 */
7676 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7679 tmp2
= load_reg(s
, rs
);
7681 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7683 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7687 case 1: /* 16 x 16 -> 32 */
7688 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7691 tmp2
= load_reg(s
, rs
);
7692 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7696 case 2: /* Dual multiply add. */
7697 case 4: /* Dual multiply subtract. */
7699 gen_swap_half(tmp2
);
7700 gen_smul_dual(tmp
, tmp2
);
7701 /* This addition cannot overflow. */
7702 if (insn
& (1 << 22)) {
7703 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7705 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7710 tmp2
= load_reg(s
, rs
);
7711 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7715 case 3: /* 32 * 16 -> 32msb */
7717 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7720 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7721 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7723 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7724 tcg_temp_free_i64(tmp64
);
7727 tmp2
= load_reg(s
, rs
);
7728 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7732 case 5: case 6: /* 32 * 32 -> 32msb */
7733 gen_imull(tmp
, tmp2
);
7734 if (insn
& (1 << 5)) {
7735 gen_roundqd(tmp
, tmp2
);
7742 tmp2
= load_reg(s
, rs
);
7743 if (insn
& (1 << 21)) {
7744 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7746 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7751 case 7: /* Unsigned sum of absolute differences. */
7752 gen_helper_usad8(tmp
, tmp
, tmp2
);
7755 tmp2
= load_reg(s
, rs
);
7756 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7761 store_reg(s
, rd
, tmp
);
7763 case 6: case 7: /* 64-bit multiply, Divide. */
7764 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
7765 tmp
= load_reg(s
, rn
);
7766 tmp2
= load_reg(s
, rm
);
7767 if ((op
& 0x50) == 0x10) {
7769 if (!arm_feature(env
, ARM_FEATURE_DIV
))
7772 gen_helper_udiv(tmp
, tmp
, tmp2
);
7774 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7776 store_reg(s
, rd
, tmp
);
7777 } else if ((op
& 0xe) == 0xc) {
7778 /* Dual multiply accumulate long. */
7780 gen_swap_half(tmp2
);
7781 gen_smul_dual(tmp
, tmp2
);
7783 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7785 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7789 tmp64
= tcg_temp_new_i64();
7790 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7792 gen_addq(s
, tmp64
, rs
, rd
);
7793 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7794 tcg_temp_free_i64(tmp64
);
7797 /* Unsigned 64-bit multiply */
7798 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7802 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7804 tmp64
= tcg_temp_new_i64();
7805 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7808 /* Signed 64-bit multiply */
7809 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7814 gen_addq_lo(s
, tmp64
, rs
);
7815 gen_addq_lo(s
, tmp64
, rd
);
7816 } else if (op
& 0x40) {
7817 /* 64-bit accumulate. */
7818 gen_addq(s
, tmp64
, rs
, rd
);
7820 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7821 tcg_temp_free_i64(tmp64
);
7826 case 6: case 7: case 14: case 15:
7828 if (((insn
>> 24) & 3) == 3) {
7829 /* Translate into the equivalent ARM encoding. */
7830 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4);
7831 if (disas_neon_data_insn(env
, s
, insn
))
7834 if (insn
& (1 << 28))
7836 if (disas_coproc_insn (env
, s
, insn
))
7840 case 8: case 9: case 10: case 11:
7841 if (insn
& (1 << 15)) {
7842 /* Branches, misc control. */
7843 if (insn
& 0x5000) {
7844 /* Unconditional branch. */
7845 /* signextend(hw1[10:0]) -> offset[:12]. */
7846 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
7847 /* hw1[10:0] -> offset[11:1]. */
7848 offset
|= (insn
& 0x7ff) << 1;
7849 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7850 offset[24:22] already have the same value because of the
7851 sign extension above. */
7852 offset
^= ((~insn
) & (1 << 13)) << 10;
7853 offset
^= ((~insn
) & (1 << 11)) << 11;
7855 if (insn
& (1 << 14)) {
7856 /* Branch and link. */
7857 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
7861 if (insn
& (1 << 12)) {
7866 offset
&= ~(uint32_t)2;
7867 gen_bx_im(s
, offset
);
7869 } else if (((insn
>> 23) & 7) == 7) {
7871 if (insn
& (1 << 13))
7874 if (insn
& (1 << 26)) {
7875 /* Secure monitor call (v6Z) */
7876 goto illegal_op
; /* not implemented. */
7878 op
= (insn
>> 20) & 7;
7880 case 0: /* msr cpsr. */
7882 tmp
= load_reg(s
, rn
);
7883 addr
= tcg_const_i32(insn
& 0xff);
7884 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
7885 tcg_temp_free_i32(addr
);
7891 case 1: /* msr spsr. */
7894 tmp
= load_reg(s
, rn
);
7896 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
7900 case 2: /* cps, nop-hint. */
7901 if (((insn
>> 8) & 7) == 0) {
7902 gen_nop_hint(s
, insn
& 0xff);
7904 /* Implemented as NOP in user mode. */
7909 if (insn
& (1 << 10)) {
7910 if (insn
& (1 << 7))
7912 if (insn
& (1 << 6))
7914 if (insn
& (1 << 5))
7916 if (insn
& (1 << 9))
7917 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
7919 if (insn
& (1 << 8)) {
7921 imm
|= (insn
& 0x1f);
7924 gen_set_psr_im(s
, offset
, 0, imm
);
7927 case 3: /* Special control operations. */
7929 op
= (insn
>> 4) & 0xf;
7937 /* These execute as NOPs. */
7944 /* Trivial implementation equivalent to bx. */
7945 tmp
= load_reg(s
, rn
);
7948 case 5: /* Exception return. */
7949 /* Unpredictable in user mode. */
7951 case 6: /* mrs cpsr. */
7954 addr
= tcg_const_i32(insn
& 0xff);
7955 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
7956 tcg_temp_free_i32(addr
);
7958 gen_helper_cpsr_read(tmp
);
7960 store_reg(s
, rd
, tmp
);
7962 case 7: /* mrs spsr. */
7963 /* Not accessible in user mode. */
7964 if (IS_USER(s
) || IS_M(env
))
7966 tmp
= load_cpu_field(spsr
);
7967 store_reg(s
, rd
, tmp
);
7972 /* Conditional branch. */
7973 op
= (insn
>> 22) & 0xf;
7974 /* Generate a conditional jump to next instruction. */
7975 s
->condlabel
= gen_new_label();
7976 gen_test_cc(op
^ 1, s
->condlabel
);
7979 /* offset[11:1] = insn[10:0] */
7980 offset
= (insn
& 0x7ff) << 1;
7981 /* offset[17:12] = insn[21:16]. */
7982 offset
|= (insn
& 0x003f0000) >> 4;
7983 /* offset[31:20] = insn[26]. */
7984 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
7985 /* offset[18] = insn[13]. */
7986 offset
|= (insn
& (1 << 13)) << 5;
7987 /* offset[19] = insn[11]. */
7988 offset
|= (insn
& (1 << 11)) << 8;
7990 /* jump to the offset */
7991 gen_jmp(s
, s
->pc
+ offset
);
7994 /* Data processing immediate. */
7995 if (insn
& (1 << 25)) {
7996 if (insn
& (1 << 24)) {
7997 if (insn
& (1 << 20))
7999 /* Bitfield/Saturate. */
8000 op
= (insn
>> 21) & 7;
8002 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8005 tcg_gen_movi_i32(tmp
, 0);
8007 tmp
= load_reg(s
, rn
);
8010 case 2: /* Signed bitfield extract. */
8012 if (shift
+ imm
> 32)
8015 gen_sbfx(tmp
, shift
, imm
);
8017 case 6: /* Unsigned bitfield extract. */
8019 if (shift
+ imm
> 32)
8022 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8024 case 3: /* Bitfield insert/clear. */
8027 imm
= imm
+ 1 - shift
;
8029 tmp2
= load_reg(s
, rd
);
8030 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8036 default: /* Saturate. */
8039 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8041 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8043 tmp2
= tcg_const_i32(imm
);
8046 if ((op
& 1) && shift
== 0)
8047 gen_helper_usat16(tmp
, tmp
, tmp2
);
8049 gen_helper_usat(tmp
, tmp
, tmp2
);
8052 if ((op
& 1) && shift
== 0)
8053 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8055 gen_helper_ssat(tmp
, tmp
, tmp2
);
8057 tcg_temp_free_i32(tmp2
);
8060 store_reg(s
, rd
, tmp
);
8062 imm
= ((insn
& 0x04000000) >> 15)
8063 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8064 if (insn
& (1 << 22)) {
8065 /* 16-bit immediate. */
8066 imm
|= (insn
>> 4) & 0xf000;
8067 if (insn
& (1 << 23)) {
8069 tmp
= load_reg(s
, rd
);
8070 tcg_gen_ext16u_i32(tmp
, tmp
);
8071 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8075 tcg_gen_movi_i32(tmp
, imm
);
8078 /* Add/sub 12-bit immediate. */
8080 offset
= s
->pc
& ~(uint32_t)3;
8081 if (insn
& (1 << 23))
8086 tcg_gen_movi_i32(tmp
, offset
);
8088 tmp
= load_reg(s
, rn
);
8089 if (insn
& (1 << 23))
8090 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8092 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8095 store_reg(s
, rd
, tmp
);
8098 int shifter_out
= 0;
8099 /* modified 12-bit immediate. */
8100 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8101 imm
= (insn
& 0xff);
8104 /* Nothing to do. */
8106 case 1: /* 00XY00XY */
8109 case 2: /* XY00XY00 */
8113 case 3: /* XYXYXYXY */
8117 default: /* Rotated constant. */
8118 shift
= (shift
<< 1) | (imm
>> 7);
8120 imm
= imm
<< (32 - shift
);
8125 tcg_gen_movi_i32(tmp2
, imm
);
8126 rn
= (insn
>> 16) & 0xf;
8129 tcg_gen_movi_i32(tmp
, 0);
8131 tmp
= load_reg(s
, rn
);
8133 op
= (insn
>> 21) & 0xf;
8134 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8135 shifter_out
, tmp
, tmp2
))
8138 rd
= (insn
>> 8) & 0xf;
8140 store_reg(s
, rd
, tmp
);
8147 case 12: /* Load/store single data item. */
8152 if ((insn
& 0x01100000) == 0x01000000) {
8153 if (disas_neon_ls_insn(env
, s
, insn
))
8161 /* s->pc has already been incremented by 4. */
8162 imm
= s
->pc
& 0xfffffffc;
8163 if (insn
& (1 << 23))
8164 imm
+= insn
& 0xfff;
8166 imm
-= insn
& 0xfff;
8167 tcg_gen_movi_i32(addr
, imm
);
8169 addr
= load_reg(s
, rn
);
8170 if (insn
& (1 << 23)) {
8171 /* Positive offset. */
8173 tcg_gen_addi_i32(addr
, addr
, imm
);
8175 op
= (insn
>> 8) & 7;
8178 case 0: case 8: /* Shifted Register. */
8179 shift
= (insn
>> 4) & 0xf;
8182 tmp
= load_reg(s
, rm
);
8184 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8185 tcg_gen_add_i32(addr
, addr
, tmp
);
8188 case 4: /* Negative offset. */
8189 tcg_gen_addi_i32(addr
, addr
, -imm
);
8191 case 6: /* User privilege. */
8192 tcg_gen_addi_i32(addr
, addr
, imm
);
8195 case 1: /* Post-decrement. */
8198 case 3: /* Post-increment. */
8202 case 5: /* Pre-decrement. */
8205 case 7: /* Pre-increment. */
8206 tcg_gen_addi_i32(addr
, addr
, imm
);
8214 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8215 if (insn
& (1 << 20)) {
8217 if (rs
== 15 && op
!= 2) {
8220 /* Memory hint. Implemented as NOP. */
8223 case 0: tmp
= gen_ld8u(addr
, user
); break;
8224 case 4: tmp
= gen_ld8s(addr
, user
); break;
8225 case 1: tmp
= gen_ld16u(addr
, user
); break;
8226 case 5: tmp
= gen_ld16s(addr
, user
); break;
8227 case 2: tmp
= gen_ld32(addr
, user
); break;
8228 default: goto illegal_op
;
8233 store_reg(s
, rs
, tmp
);
8240 tmp
= load_reg(s
, rs
);
8242 case 0: gen_st8(tmp
, addr
, user
); break;
8243 case 1: gen_st16(tmp
, addr
, user
); break;
8244 case 2: gen_st32(tmp
, addr
, user
); break;
8245 default: goto illegal_op
;
8249 tcg_gen_addi_i32(addr
, addr
, imm
);
8251 store_reg(s
, rn
, addr
);
8265 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
8267 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8274 if (s
->condexec_mask
) {
8275 cond
= s
->condexec_cond
;
8276 s
->condlabel
= gen_new_label();
8277 gen_test_cc(cond
^ 1, s
->condlabel
);
8281 insn
= lduw_code(s
->pc
);
8284 switch (insn
>> 12) {
8288 op
= (insn
>> 11) & 3;
8291 rn
= (insn
>> 3) & 7;
8292 tmp
= load_reg(s
, rn
);
8293 if (insn
& (1 << 10)) {
8296 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
8299 rm
= (insn
>> 6) & 7;
8300 tmp2
= load_reg(s
, rm
);
8302 if (insn
& (1 << 9)) {
8303 if (s
->condexec_mask
)
8304 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8306 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8308 if (s
->condexec_mask
)
8309 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8311 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8314 store_reg(s
, rd
, tmp
);
8316 /* shift immediate */
8317 rm
= (insn
>> 3) & 7;
8318 shift
= (insn
>> 6) & 0x1f;
8319 tmp
= load_reg(s
, rm
);
8320 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
8321 if (!s
->condexec_mask
)
8323 store_reg(s
, rd
, tmp
);
8327 /* arithmetic large immediate */
8328 op
= (insn
>> 11) & 3;
8329 rd
= (insn
>> 8) & 0x7;
8330 if (op
== 0) { /* mov */
8332 tcg_gen_movi_i32(tmp
, insn
& 0xff);
8333 if (!s
->condexec_mask
)
8335 store_reg(s
, rd
, tmp
);
8337 tmp
= load_reg(s
, rd
);
8339 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
8342 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8347 if (s
->condexec_mask
)
8348 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8350 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8352 store_reg(s
, rd
, tmp
);
8355 if (s
->condexec_mask
)
8356 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8358 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8360 store_reg(s
, rd
, tmp
);
8366 if (insn
& (1 << 11)) {
8367 rd
= (insn
>> 8) & 7;
8368 /* load pc-relative. Bit 1 of PC is ignored. */
8369 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
8370 val
&= ~(uint32_t)2;
8372 tcg_gen_movi_i32(addr
, val
);
8373 tmp
= gen_ld32(addr
, IS_USER(s
));
8375 store_reg(s
, rd
, tmp
);
8378 if (insn
& (1 << 10)) {
8379 /* data processing extended or blx */
8380 rd
= (insn
& 7) | ((insn
>> 4) & 8);
8381 rm
= (insn
>> 3) & 0xf;
8382 op
= (insn
>> 8) & 3;
8385 tmp
= load_reg(s
, rd
);
8386 tmp2
= load_reg(s
, rm
);
8387 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8389 store_reg(s
, rd
, tmp
);
8392 tmp
= load_reg(s
, rd
);
8393 tmp2
= load_reg(s
, rm
);
8394 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8398 case 2: /* mov/cpy */
8399 tmp
= load_reg(s
, rm
);
8400 store_reg(s
, rd
, tmp
);
8402 case 3:/* branch [and link] exchange thumb register */
8403 tmp
= load_reg(s
, rm
);
8404 if (insn
& (1 << 7)) {
8405 val
= (uint32_t)s
->pc
| 1;
8407 tcg_gen_movi_i32(tmp2
, val
);
8408 store_reg(s
, 14, tmp2
);
8416 /* data processing register */
8418 rm
= (insn
>> 3) & 7;
8419 op
= (insn
>> 6) & 0xf;
8420 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
8421 /* the shift/rotate ops want the operands backwards */
8430 if (op
== 9) { /* neg */
8432 tcg_gen_movi_i32(tmp
, 0);
8433 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
8434 tmp
= load_reg(s
, rd
);
8439 tmp2
= load_reg(s
, rm
);
8442 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8443 if (!s
->condexec_mask
)
8447 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8448 if (!s
->condexec_mask
)
8452 if (s
->condexec_mask
) {
8453 gen_helper_shl(tmp2
, tmp2
, tmp
);
8455 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
8460 if (s
->condexec_mask
) {
8461 gen_helper_shr(tmp2
, tmp2
, tmp
);
8463 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
8468 if (s
->condexec_mask
) {
8469 gen_helper_sar(tmp2
, tmp2
, tmp
);
8471 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
8476 if (s
->condexec_mask
)
8479 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
8482 if (s
->condexec_mask
)
8483 gen_sub_carry(tmp
, tmp
, tmp2
);
8485 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
8488 if (s
->condexec_mask
) {
8489 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
8490 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
8492 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
8497 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8502 if (s
->condexec_mask
)
8503 tcg_gen_neg_i32(tmp
, tmp2
);
8505 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8508 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8512 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8516 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8517 if (!s
->condexec_mask
)
8521 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8522 if (!s
->condexec_mask
)
8526 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8527 if (!s
->condexec_mask
)
8531 tcg_gen_not_i32(tmp2
, tmp2
);
8532 if (!s
->condexec_mask
)
8540 store_reg(s
, rm
, tmp2
);
8544 store_reg(s
, rd
, tmp
);
8554 /* load/store register offset. */
8556 rn
= (insn
>> 3) & 7;
8557 rm
= (insn
>> 6) & 7;
8558 op
= (insn
>> 9) & 7;
8559 addr
= load_reg(s
, rn
);
8560 tmp
= load_reg(s
, rm
);
8561 tcg_gen_add_i32(addr
, addr
, tmp
);
8564 if (op
< 3) /* store */
8565 tmp
= load_reg(s
, rd
);
8569 gen_st32(tmp
, addr
, IS_USER(s
));
8572 gen_st16(tmp
, addr
, IS_USER(s
));
8575 gen_st8(tmp
, addr
, IS_USER(s
));
8578 tmp
= gen_ld8s(addr
, IS_USER(s
));
8581 tmp
= gen_ld32(addr
, IS_USER(s
));
8584 tmp
= gen_ld16u(addr
, IS_USER(s
));
8587 tmp
= gen_ld8u(addr
, IS_USER(s
));
8590 tmp
= gen_ld16s(addr
, IS_USER(s
));
8593 if (op
>= 3) /* load */
8594 store_reg(s
, rd
, tmp
);
8599 /* load/store word immediate offset */
8601 rn
= (insn
>> 3) & 7;
8602 addr
= load_reg(s
, rn
);
8603 val
= (insn
>> 4) & 0x7c;
8604 tcg_gen_addi_i32(addr
, addr
, val
);
8606 if (insn
& (1 << 11)) {
8608 tmp
= gen_ld32(addr
, IS_USER(s
));
8609 store_reg(s
, rd
, tmp
);
8612 tmp
= load_reg(s
, rd
);
8613 gen_st32(tmp
, addr
, IS_USER(s
));
8619 /* load/store byte immediate offset */
8621 rn
= (insn
>> 3) & 7;
8622 addr
= load_reg(s
, rn
);
8623 val
= (insn
>> 6) & 0x1f;
8624 tcg_gen_addi_i32(addr
, addr
, val
);
8626 if (insn
& (1 << 11)) {
8628 tmp
= gen_ld8u(addr
, IS_USER(s
));
8629 store_reg(s
, rd
, tmp
);
8632 tmp
= load_reg(s
, rd
);
8633 gen_st8(tmp
, addr
, IS_USER(s
));
8639 /* load/store halfword immediate offset */
8641 rn
= (insn
>> 3) & 7;
8642 addr
= load_reg(s
, rn
);
8643 val
= (insn
>> 5) & 0x3e;
8644 tcg_gen_addi_i32(addr
, addr
, val
);
8646 if (insn
& (1 << 11)) {
8648 tmp
= gen_ld16u(addr
, IS_USER(s
));
8649 store_reg(s
, rd
, tmp
);
8652 tmp
= load_reg(s
, rd
);
8653 gen_st16(tmp
, addr
, IS_USER(s
));
8659 /* load/store from stack */
8660 rd
= (insn
>> 8) & 7;
8661 addr
= load_reg(s
, 13);
8662 val
= (insn
& 0xff) * 4;
8663 tcg_gen_addi_i32(addr
, addr
, val
);
8665 if (insn
& (1 << 11)) {
8667 tmp
= gen_ld32(addr
, IS_USER(s
));
8668 store_reg(s
, rd
, tmp
);
8671 tmp
= load_reg(s
, rd
);
8672 gen_st32(tmp
, addr
, IS_USER(s
));
8678 /* add to high reg */
8679 rd
= (insn
>> 8) & 7;
8680 if (insn
& (1 << 11)) {
8682 tmp
= load_reg(s
, 13);
8684 /* PC. bit 1 is ignored. */
8686 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
8688 val
= (insn
& 0xff) * 4;
8689 tcg_gen_addi_i32(tmp
, tmp
, val
);
8690 store_reg(s
, rd
, tmp
);
8695 op
= (insn
>> 8) & 0xf;
8698 /* adjust stack pointer */
8699 tmp
= load_reg(s
, 13);
8700 val
= (insn
& 0x7f) * 4;
8701 if (insn
& (1 << 7))
8702 val
= -(int32_t)val
;
8703 tcg_gen_addi_i32(tmp
, tmp
, val
);
8704 store_reg(s
, 13, tmp
);
8707 case 2: /* sign/zero extend. */
8710 rm
= (insn
>> 3) & 7;
8711 tmp
= load_reg(s
, rm
);
8712 switch ((insn
>> 6) & 3) {
8713 case 0: gen_sxth(tmp
); break;
8714 case 1: gen_sxtb(tmp
); break;
8715 case 2: gen_uxth(tmp
); break;
8716 case 3: gen_uxtb(tmp
); break;
8718 store_reg(s
, rd
, tmp
);
8720 case 4: case 5: case 0xc: case 0xd:
8722 addr
= load_reg(s
, 13);
8723 if (insn
& (1 << 8))
8727 for (i
= 0; i
< 8; i
++) {
8728 if (insn
& (1 << i
))
8731 if ((insn
& (1 << 11)) == 0) {
8732 tcg_gen_addi_i32(addr
, addr
, -offset
);
8734 for (i
= 0; i
< 8; i
++) {
8735 if (insn
& (1 << i
)) {
8736 if (insn
& (1 << 11)) {
8738 tmp
= gen_ld32(addr
, IS_USER(s
));
8739 store_reg(s
, i
, tmp
);
8742 tmp
= load_reg(s
, i
);
8743 gen_st32(tmp
, addr
, IS_USER(s
));
8745 /* advance to the next address. */
8746 tcg_gen_addi_i32(addr
, addr
, 4);
8750 if (insn
& (1 << 8)) {
8751 if (insn
& (1 << 11)) {
8753 tmp
= gen_ld32(addr
, IS_USER(s
));
8754 /* don't set the pc until the rest of the instruction
8758 tmp
= load_reg(s
, 14);
8759 gen_st32(tmp
, addr
, IS_USER(s
));
8761 tcg_gen_addi_i32(addr
, addr
, 4);
8763 if ((insn
& (1 << 11)) == 0) {
8764 tcg_gen_addi_i32(addr
, addr
, -offset
);
8766 /* write back the new stack pointer */
8767 store_reg(s
, 13, addr
);
8768 /* set the new PC value */
8769 if ((insn
& 0x0900) == 0x0900)
8773 case 1: case 3: case 9: case 11: /* czb */
8775 tmp
= load_reg(s
, rm
);
8776 s
->condlabel
= gen_new_label();
8778 if (insn
& (1 << 11))
8779 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
8781 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
8783 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
8784 val
= (uint32_t)s
->pc
+ 2;
8789 case 15: /* IT, nop-hint. */
8790 if ((insn
& 0xf) == 0) {
8791 gen_nop_hint(s
, (insn
>> 4) & 0xf);
8795 s
->condexec_cond
= (insn
>> 4) & 0xe;
8796 s
->condexec_mask
= insn
& 0x1f;
8797 /* No actual code generated for this insn, just setup state. */
8800 case 0xe: /* bkpt */
8801 gen_set_condexec(s
);
8802 gen_set_pc_im(s
->pc
- 2);
8803 gen_exception(EXCP_BKPT
);
8804 s
->is_jmp
= DISAS_JUMP
;
8809 rn
= (insn
>> 3) & 0x7;
8811 tmp
= load_reg(s
, rn
);
8812 switch ((insn
>> 6) & 3) {
8813 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
8814 case 1: gen_rev16(tmp
); break;
8815 case 3: gen_revsh(tmp
); break;
8816 default: goto illegal_op
;
8818 store_reg(s
, rd
, tmp
);
8826 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
8829 addr
= tcg_const_i32(16);
8830 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8831 tcg_temp_free_i32(addr
);
8835 addr
= tcg_const_i32(17);
8836 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8837 tcg_temp_free_i32(addr
);
8839 tcg_temp_free_i32(tmp
);
8842 if (insn
& (1 << 4))
8843 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
8846 gen_set_psr_im(s
, shift
, 0, ((insn
& 7) << 6) & shift
);
8856 /* load/store multiple */
8857 rn
= (insn
>> 8) & 0x7;
8858 addr
= load_reg(s
, rn
);
8859 for (i
= 0; i
< 8; i
++) {
8860 if (insn
& (1 << i
)) {
8861 if (insn
& (1 << 11)) {
8863 tmp
= gen_ld32(addr
, IS_USER(s
));
8864 store_reg(s
, i
, tmp
);
8867 tmp
= load_reg(s
, i
);
8868 gen_st32(tmp
, addr
, IS_USER(s
));
8870 /* advance to the next address */
8871 tcg_gen_addi_i32(addr
, addr
, 4);
8874 /* Base register writeback. */
8875 if ((insn
& (1 << rn
)) == 0) {
8876 store_reg(s
, rn
, addr
);
8883 /* conditional branch or swi */
8884 cond
= (insn
>> 8) & 0xf;
8890 gen_set_condexec(s
);
8891 gen_set_pc_im(s
->pc
);
8892 s
->is_jmp
= DISAS_SWI
;
8895 /* generate a conditional jump to next instruction */
8896 s
->condlabel
= gen_new_label();
8897 gen_test_cc(cond
^ 1, s
->condlabel
);
8900 /* jump to the offset */
8901 val
= (uint32_t)s
->pc
+ 2;
8902 offset
= ((int32_t)insn
<< 24) >> 24;
8908 if (insn
& (1 << 11)) {
8909 if (disas_thumb2_insn(env
, s
, insn
))
8913 /* unconditional branch */
8914 val
= (uint32_t)s
->pc
;
8915 offset
= ((int32_t)insn
<< 21) >> 21;
8916 val
+= (offset
<< 1) + 2;
8921 if (disas_thumb2_insn(env
, s
, insn
))
8927 gen_set_condexec(s
);
8928 gen_set_pc_im(s
->pc
- 4);
8929 gen_exception(EXCP_UDEF
);
8930 s
->is_jmp
= DISAS_JUMP
;
8934 gen_set_condexec(s
);
8935 gen_set_pc_im(s
->pc
- 2);
8936 gen_exception(EXCP_UDEF
);
8937 s
->is_jmp
= DISAS_JUMP
;
8940 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8941 basic block 'tb'. If search_pc is TRUE, also generate PC
8942 information for each intermediate instruction. */
8943 static inline void gen_intermediate_code_internal(CPUState
*env
,
8944 TranslationBlock
*tb
,
8947 DisasContext dc1
, *dc
= &dc1
;
8949 uint16_t *gen_opc_end
;
8951 target_ulong pc_start
;
8952 uint32_t next_page_start
;
8956 /* generate intermediate code */
8963 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
8965 dc
->is_jmp
= DISAS_NEXT
;
8967 dc
->singlestep_enabled
= env
->singlestep_enabled
;
8969 dc
->thumb
= env
->thumb
;
8970 dc
->condexec_mask
= (env
->condexec_bits
& 0xf) << 1;
8971 dc
->condexec_cond
= env
->condexec_bits
>> 4;
8972 #if !defined(CONFIG_USER_ONLY)
8974 dc
->user
= ((env
->v7m
.exception
== 0) && (env
->v7m
.control
& 1));
8976 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
8979 cpu_F0s
= tcg_temp_new_i32();
8980 cpu_F1s
= tcg_temp_new_i32();
8981 cpu_F0d
= tcg_temp_new_i64();
8982 cpu_F1d
= tcg_temp_new_i64();
8985 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8986 cpu_M0
= tcg_temp_new_i64();
8987 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
8990 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8992 max_insns
= CF_COUNT_MASK
;
8995 /* Reset the conditional execution bits immediately. This avoids
8996 complications trying to do it at the end of the block. */
8997 if (env
->condexec_bits
)
8999 TCGv tmp
= new_tmp();
9000 tcg_gen_movi_i32(tmp
, 0);
9001 store_cpu_field(tmp
, condexec_bits
);
9004 #ifdef CONFIG_USER_ONLY
9005 /* Intercept jump to the magic kernel page. */
9006 if (dc
->pc
>= 0xffff0000) {
9007 /* We always get here via a jump, so know we are not in a
9008 conditional execution block. */
9009 gen_exception(EXCP_KERNEL_TRAP
);
9010 dc
->is_jmp
= DISAS_UPDATE
;
9014 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9015 /* We always get here via a jump, so know we are not in a
9016 conditional execution block. */
9017 gen_exception(EXCP_EXCEPTION_EXIT
);
9018 dc
->is_jmp
= DISAS_UPDATE
;
9023 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9024 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9025 if (bp
->pc
== dc
->pc
) {
9026 gen_set_condexec(dc
);
9027 gen_set_pc_im(dc
->pc
);
9028 gen_exception(EXCP_DEBUG
);
9029 dc
->is_jmp
= DISAS_JUMP
;
9030 /* Advance PC so that clearing the breakpoint will
9031 invalidate this TB. */
9033 goto done_generating
;
9039 j
= gen_opc_ptr
- gen_opc_buf
;
9043 gen_opc_instr_start
[lj
++] = 0;
9045 gen_opc_pc
[lj
] = dc
->pc
;
9046 gen_opc_instr_start
[lj
] = 1;
9047 gen_opc_icount
[lj
] = num_insns
;
9050 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9054 disas_thumb_insn(env
, dc
);
9055 if (dc
->condexec_mask
) {
9056 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9057 | ((dc
->condexec_mask
>> 4) & 1);
9058 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9059 if (dc
->condexec_mask
== 0) {
9060 dc
->condexec_cond
= 0;
9064 disas_arm_insn(env
, dc
);
9067 fprintf(stderr
, "Internal resource leak before %08x\n", dc
->pc
);
9071 if (dc
->condjmp
&& !dc
->is_jmp
) {
9072 gen_set_label(dc
->condlabel
);
9075 /* Translation stops when a conditional branch is encountered.
9076 * Otherwise the subsequent code could get translated several times.
9077 * Also stop translation when a page boundary is reached. This
9078 * ensures prefetch aborts occur at the right place. */
9080 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9081 !env
->singlestep_enabled
&&
9083 dc
->pc
< next_page_start
&&
9084 num_insns
< max_insns
);
9086 if (tb
->cflags
& CF_LAST_IO
) {
9088 /* FIXME: This can theoretically happen with self-modifying
9090 cpu_abort(env
, "IO on conditional branch instruction");
9095 /* At this stage dc->condjmp will only be set when the skipped
9096 instruction was a conditional branch or trap, and the PC has
9097 already been written. */
9098 if (unlikely(env
->singlestep_enabled
)) {
9099 /* Make sure the pc is updated, and raise a debug exception. */
9101 gen_set_condexec(dc
);
9102 if (dc
->is_jmp
== DISAS_SWI
) {
9103 gen_exception(EXCP_SWI
);
9105 gen_exception(EXCP_DEBUG
);
9107 gen_set_label(dc
->condlabel
);
9109 if (dc
->condjmp
|| !dc
->is_jmp
) {
9110 gen_set_pc_im(dc
->pc
);
9113 gen_set_condexec(dc
);
9114 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9115 gen_exception(EXCP_SWI
);
9117 /* FIXME: Single stepping a WFI insn will not halt
9119 gen_exception(EXCP_DEBUG
);
9122 /* While branches must always occur at the end of an IT block,
9123 there are a few other things that can cause us to terminate
9124 the TB in the middel of an IT block:
9125 - Exception generating instructions (bkpt, swi, undefined).
9127 - Hardware watchpoints.
9128 Hardware breakpoints have already been handled and skip this code.
9130 gen_set_condexec(dc
);
9131 switch(dc
->is_jmp
) {
9133 gen_goto_tb(dc
, 1, dc
->pc
);
9138 /* indicate that the hash table must be used to find the next TB */
9142 /* nothing more to generate */
9148 gen_exception(EXCP_SWI
);
9152 gen_set_label(dc
->condlabel
);
9153 gen_set_condexec(dc
);
9154 gen_goto_tb(dc
, 1, dc
->pc
);
9160 gen_icount_end(tb
, num_insns
);
9161 *gen_opc_ptr
= INDEX_op_end
;
9164 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9165 qemu_log("----------------\n");
9166 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9167 log_target_disas(pc_start
, dc
->pc
- pc_start
, env
->thumb
);
9172 j
= gen_opc_ptr
- gen_opc_buf
;
9175 gen_opc_instr_start
[lj
++] = 0;
9177 tb
->size
= dc
->pc
- pc_start
;
9178 tb
->icount
= num_insns
;
9182 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
9184 gen_intermediate_code_internal(env
, tb
, 0);
9187 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
9189 gen_intermediate_code_internal(env
, tb
, 1);
9192 static const char *cpu_mode_names
[16] = {
9193 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9194 "???", "???", "???", "und", "???", "???", "???", "sys"
9197 void cpu_dump_state(CPUState
*env
, FILE *f
,
9198 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
9208 /* ??? This assumes float64 and double have the same layout.
9209 Oh well, it's only debug dumps. */
9218 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
9220 cpu_fprintf(f
, "\n");
9222 cpu_fprintf(f
, " ");
9224 psr
= cpsr_read(env
);
9225 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
9227 psr
& (1 << 31) ? 'N' : '-',
9228 psr
& (1 << 30) ? 'Z' : '-',
9229 psr
& (1 << 29) ? 'C' : '-',
9230 psr
& (1 << 28) ? 'V' : '-',
9231 psr
& CPSR_T
? 'T' : 'A',
9232 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
9235 for (i
= 0; i
< 16; i
++) {
9236 d
.d
= env
->vfp
.regs
[i
];
9240 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9241 i
* 2, (int)s0
.i
, s0
.s
,
9242 i
* 2 + 1, (int)s1
.i
, s1
.s
,
9243 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
9246 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
9250 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
9251 unsigned long searched_pc
, int pc_pos
, void *puc
)
9253 env
->regs
[15] = gen_opc_pc
[pc_pos
];