4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
45 /* internal defines */
46 typedef struct DisasContext
{
49 /* Nonzero if this instruction has been conditionally skipped. */
51 /* The label that will be jumped to when the instruction is skipped. */
53 /* Thumb-2 condtional execution bits. */
56 struct TranslationBlock
*tb
;
57 int singlestep_enabled
;
59 #if !defined(CONFIG_USER_ONLY)
64 #if defined(CONFIG_USER_ONLY)
67 #define IS_USER(s) (s->user)
70 /* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
75 static TCGv_ptr cpu_env
;
76 /* We reuse the same 64-bit temporaries for efficiency. */
77 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
78 static TCGv_i32 cpu_R
[16];
79 static TCGv_i32 cpu_exclusive_addr
;
80 static TCGv_i32 cpu_exclusive_val
;
81 static TCGv_i32 cpu_exclusive_high
;
82 #ifdef CONFIG_USER_ONLY
83 static TCGv_i32 cpu_exclusive_test
;
84 static TCGv_i32 cpu_exclusive_info
;
87 /* FIXME: These should be removed. */
88 static TCGv cpu_F0s
, cpu_F1s
;
89 static TCGv_i64 cpu_F0d
, cpu_F1d
;
91 #include "gen-icount.h"
93 static const char *regnames
[] =
94 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
97 /* initialize TCG globals. */
98 void arm_translate_init(void)
102 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
104 for (i
= 0; i
< 16; i
++) {
105 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
106 offsetof(CPUState
, regs
[i
]),
109 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
110 offsetof(CPUState
, exclusive_addr
), "exclusive_addr");
111 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
112 offsetof(CPUState
, exclusive_val
), "exclusive_val");
113 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
114 offsetof(CPUState
, exclusive_high
), "exclusive_high");
115 #ifdef CONFIG_USER_ONLY
116 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUState
, exclusive_test
), "exclusive_test");
118 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUState
, exclusive_info
), "exclusive_info");
126 static int num_temps
;
128 /* Allocate a temporary variable. */
129 static TCGv_i32
new_tmp(void)
132 return tcg_temp_new_i32();
135 /* Release a temporary variable. */
136 static void dead_tmp(TCGv tmp
)
142 static inline TCGv
load_cpu_offset(int offset
)
144 TCGv tmp
= new_tmp();
145 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
149 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
151 static inline void store_cpu_offset(TCGv var
, int offset
)
153 tcg_gen_st_i32(var
, cpu_env
, offset
);
157 #define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUState, name))
160 /* Set a variable to the value of a CPU register. */
161 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
165 /* normaly, since we updated PC, we need only to add one insn */
167 addr
= (long)s
->pc
+ 2;
169 addr
= (long)s
->pc
+ 4;
170 tcg_gen_movi_i32(var
, addr
);
172 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
176 /* Create a new temporary and set it to the value of a CPU register. */
177 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
179 TCGv tmp
= new_tmp();
180 load_reg_var(s
, tmp
, reg
);
184 /* Set a CPU register. The source must be a temporary and will be
186 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
189 tcg_gen_andi_i32(var
, var
, ~1);
190 s
->is_jmp
= DISAS_JUMP
;
192 tcg_gen_mov_i32(cpu_R
[reg
], var
);
196 /* Value extensions. */
197 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
199 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
202 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
206 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
208 TCGv tmp_mask
= tcg_const_i32(mask
);
209 gen_helper_cpsr_write(var
, tmp_mask
);
210 tcg_temp_free_i32(tmp_mask
);
212 /* Set NZCV flags from the high 4 bits of var. */
213 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
215 static void gen_exception(int excp
)
217 TCGv tmp
= new_tmp();
218 tcg_gen_movi_i32(tmp
, excp
);
219 gen_helper_exception(tmp
);
223 static void gen_smul_dual(TCGv a
, TCGv b
)
225 TCGv tmp1
= new_tmp();
226 TCGv tmp2
= new_tmp();
227 tcg_gen_ext16s_i32(tmp1
, a
);
228 tcg_gen_ext16s_i32(tmp2
, b
);
229 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
231 tcg_gen_sari_i32(a
, a
, 16);
232 tcg_gen_sari_i32(b
, b
, 16);
233 tcg_gen_mul_i32(b
, b
, a
);
234 tcg_gen_mov_i32(a
, tmp1
);
238 /* Byteswap each halfword. */
239 static void gen_rev16(TCGv var
)
241 TCGv tmp
= new_tmp();
242 tcg_gen_shri_i32(tmp
, var
, 8);
243 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
244 tcg_gen_shli_i32(var
, var
, 8);
245 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
246 tcg_gen_or_i32(var
, var
, tmp
);
250 /* Byteswap low halfword and sign extend. */
251 static void gen_revsh(TCGv var
)
253 TCGv tmp
= new_tmp();
254 tcg_gen_shri_i32(tmp
, var
, 8);
255 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff);
256 tcg_gen_shli_i32(var
, var
, 8);
257 tcg_gen_ext8s_i32(var
, var
);
258 tcg_gen_or_i32(var
, var
, tmp
);
262 /* Unsigned bitfield extract. */
263 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
266 tcg_gen_shri_i32(var
, var
, shift
);
267 tcg_gen_andi_i32(var
, var
, mask
);
270 /* Signed bitfield extract. */
271 static void gen_sbfx(TCGv var
, int shift
, int width
)
276 tcg_gen_sari_i32(var
, var
, shift
);
277 if (shift
+ width
< 32) {
278 signbit
= 1u << (width
- 1);
279 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
280 tcg_gen_xori_i32(var
, var
, signbit
);
281 tcg_gen_subi_i32(var
, var
, signbit
);
285 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
286 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
288 tcg_gen_andi_i32(val
, val
, mask
);
289 tcg_gen_shli_i32(val
, val
, shift
);
290 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
291 tcg_gen_or_i32(dest
, base
, val
);
294 /* Round the top 32 bits of a 64-bit value. */
295 static void gen_roundqd(TCGv a
, TCGv b
)
297 tcg_gen_shri_i32(a
, a
, 31);
298 tcg_gen_add_i32(a
, a
, b
);
301 /* FIXME: Most targets have native widening multiplication.
302 It would be good to use that instead of a full wide multiply. */
303 /* 32x32->64 multiply. Marks inputs as dead. */
304 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
306 TCGv_i64 tmp1
= tcg_temp_new_i64();
307 TCGv_i64 tmp2
= tcg_temp_new_i64();
309 tcg_gen_extu_i32_i64(tmp1
, a
);
311 tcg_gen_extu_i32_i64(tmp2
, b
);
313 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
314 tcg_temp_free_i64(tmp2
);
318 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
320 TCGv_i64 tmp1
= tcg_temp_new_i64();
321 TCGv_i64 tmp2
= tcg_temp_new_i64();
323 tcg_gen_ext_i32_i64(tmp1
, a
);
325 tcg_gen_ext_i32_i64(tmp2
, b
);
327 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
328 tcg_temp_free_i64(tmp2
);
332 /* Signed 32x32->64 multiply. */
333 static void gen_imull(TCGv a
, TCGv b
)
335 TCGv_i64 tmp1
= tcg_temp_new_i64();
336 TCGv_i64 tmp2
= tcg_temp_new_i64();
338 tcg_gen_ext_i32_i64(tmp1
, a
);
339 tcg_gen_ext_i32_i64(tmp2
, b
);
340 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
341 tcg_temp_free_i64(tmp2
);
342 tcg_gen_trunc_i64_i32(a
, tmp1
);
343 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
344 tcg_gen_trunc_i64_i32(b
, tmp1
);
345 tcg_temp_free_i64(tmp1
);
348 /* Swap low and high halfwords. */
349 static void gen_swap_half(TCGv var
)
351 TCGv tmp
= new_tmp();
352 tcg_gen_shri_i32(tmp
, var
, 16);
353 tcg_gen_shli_i32(var
, var
, 16);
354 tcg_gen_or_i32(var
, var
, tmp
);
358 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
359 tmp = (t0 ^ t1) & 0x8000;
362 t0 = (t0 + t1) ^ tmp;
365 static void gen_add16(TCGv t0
, TCGv t1
)
367 TCGv tmp
= new_tmp();
368 tcg_gen_xor_i32(tmp
, t0
, t1
);
369 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
370 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
371 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
372 tcg_gen_add_i32(t0
, t0
, t1
);
373 tcg_gen_xor_i32(t0
, t0
, tmp
);
378 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
380 /* Set CF to the top bit of var. */
381 static void gen_set_CF_bit31(TCGv var
)
383 TCGv tmp
= new_tmp();
384 tcg_gen_shri_i32(tmp
, var
, 31);
389 /* Set N and Z flags from var. */
390 static inline void gen_logic_CC(TCGv var
)
392 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
393 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
397 static void gen_adc(TCGv t0
, TCGv t1
)
400 tcg_gen_add_i32(t0
, t0
, t1
);
401 tmp
= load_cpu_field(CF
);
402 tcg_gen_add_i32(t0
, t0
, tmp
);
406 /* dest = T0 + T1 + CF. */
407 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
410 tcg_gen_add_i32(dest
, t0
, t1
);
411 tmp
= load_cpu_field(CF
);
412 tcg_gen_add_i32(dest
, dest
, tmp
);
416 /* dest = T0 - T1 + CF - 1. */
417 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
420 tcg_gen_sub_i32(dest
, t0
, t1
);
421 tmp
= load_cpu_field(CF
);
422 tcg_gen_add_i32(dest
, dest
, tmp
);
423 tcg_gen_subi_i32(dest
, dest
, 1);
427 /* FIXME: Implement this natively. */
428 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
430 static void shifter_out_im(TCGv var
, int shift
)
432 TCGv tmp
= new_tmp();
434 tcg_gen_andi_i32(tmp
, var
, 1);
436 tcg_gen_shri_i32(tmp
, var
, shift
);
438 tcg_gen_andi_i32(tmp
, tmp
, 1);
444 /* Shift by immediate. Includes special handling for shift == 0. */
445 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
451 shifter_out_im(var
, 32 - shift
);
452 tcg_gen_shli_i32(var
, var
, shift
);
458 tcg_gen_shri_i32(var
, var
, 31);
461 tcg_gen_movi_i32(var
, 0);
464 shifter_out_im(var
, shift
- 1);
465 tcg_gen_shri_i32(var
, var
, shift
);
472 shifter_out_im(var
, shift
- 1);
475 tcg_gen_sari_i32(var
, var
, shift
);
477 case 3: /* ROR/RRX */
480 shifter_out_im(var
, shift
- 1);
481 tcg_gen_rotri_i32(var
, var
, shift
); break;
483 TCGv tmp
= load_cpu_field(CF
);
485 shifter_out_im(var
, 0);
486 tcg_gen_shri_i32(var
, var
, 1);
487 tcg_gen_shli_i32(tmp
, tmp
, 31);
488 tcg_gen_or_i32(var
, var
, tmp
);
494 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
495 TCGv shift
, int flags
)
499 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
500 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
501 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
502 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
506 case 0: gen_helper_shl(var
, var
, shift
); break;
507 case 1: gen_helper_shr(var
, var
, shift
); break;
508 case 2: gen_helper_sar(var
, var
, shift
); break;
509 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
510 tcg_gen_rotr_i32(var
, var
, shift
); break;
516 #define PAS_OP(pfx) \
518 case 0: gen_pas_helper(glue(pfx,add16)); break; \
519 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
520 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
521 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
522 case 4: gen_pas_helper(glue(pfx,add8)); break; \
523 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
525 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
530 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
532 tmp
= tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
535 tcg_temp_free_ptr(tmp
);
538 tmp
= tcg_temp_new_ptr();
539 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
541 tcg_temp_free_ptr(tmp
);
543 #undef gen_pas_helper
544 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
557 #undef gen_pas_helper
562 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
563 #define PAS_OP(pfx) \
565 case 0: gen_pas_helper(glue(pfx,add8)); break; \
566 case 1: gen_pas_helper(glue(pfx,add16)); break; \
567 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
568 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
569 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
570 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
572 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
577 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
579 tmp
= tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
582 tcg_temp_free_ptr(tmp
);
585 tmp
= tcg_temp_new_ptr();
586 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
588 tcg_temp_free_ptr(tmp
);
590 #undef gen_pas_helper
591 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
604 #undef gen_pas_helper
609 static void gen_test_cc(int cc
, int label
)
617 tmp
= load_cpu_field(ZF
);
618 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
621 tmp
= load_cpu_field(ZF
);
622 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
625 tmp
= load_cpu_field(CF
);
626 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
629 tmp
= load_cpu_field(CF
);
630 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
633 tmp
= load_cpu_field(NF
);
634 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
637 tmp
= load_cpu_field(NF
);
638 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
641 tmp
= load_cpu_field(VF
);
642 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
645 tmp
= load_cpu_field(VF
);
646 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
648 case 8: /* hi: C && !Z */
649 inv
= gen_new_label();
650 tmp
= load_cpu_field(CF
);
651 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
653 tmp
= load_cpu_field(ZF
);
654 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
657 case 9: /* ls: !C || Z */
658 tmp
= load_cpu_field(CF
);
659 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
661 tmp
= load_cpu_field(ZF
);
662 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
664 case 10: /* ge: N == V -> N ^ V == 0 */
665 tmp
= load_cpu_field(VF
);
666 tmp2
= load_cpu_field(NF
);
667 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
669 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
671 case 11: /* lt: N != V -> N ^ V != 0 */
672 tmp
= load_cpu_field(VF
);
673 tmp2
= load_cpu_field(NF
);
674 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
676 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
678 case 12: /* gt: !Z && N == V */
679 inv
= gen_new_label();
680 tmp
= load_cpu_field(ZF
);
681 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
683 tmp
= load_cpu_field(VF
);
684 tmp2
= load_cpu_field(NF
);
685 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
687 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
690 case 13: /* le: Z || N != V */
691 tmp
= load_cpu_field(ZF
);
692 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
694 tmp
= load_cpu_field(VF
);
695 tmp2
= load_cpu_field(NF
);
696 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
698 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
701 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
707 static const uint8_t table_logic_cc
[16] = {
726 /* Set PC and Thumb state from an immediate address. */
727 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
731 s
->is_jmp
= DISAS_UPDATE
;
732 if (s
->thumb
!= (addr
& 1)) {
734 tcg_gen_movi_i32(tmp
, addr
& 1);
735 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
738 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
741 /* Set PC and Thumb state from var. var is marked as dead. */
742 static inline void gen_bx(DisasContext
*s
, TCGv var
)
744 s
->is_jmp
= DISAS_UPDATE
;
745 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
746 tcg_gen_andi_i32(var
, var
, 1);
747 store_cpu_field(var
, thumb
);
750 /* Variant of store_reg which uses branch&exchange logic when storing
751 to r15 in ARM architecture v7 and above. The source must be a temporary
752 and will be marked as dead. */
753 static inline void store_reg_bx(CPUState
*env
, DisasContext
*s
,
756 if (reg
== 15 && ENABLE_ARCH_7
) {
759 store_reg(s
, reg
, var
);
763 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
765 TCGv tmp
= new_tmp();
766 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
769 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
771 TCGv tmp
= new_tmp();
772 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
775 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
777 TCGv tmp
= new_tmp();
778 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
781 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
783 TCGv tmp
= new_tmp();
784 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
787 static inline TCGv
gen_ld32(TCGv addr
, int index
)
789 TCGv tmp
= new_tmp();
790 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
793 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
795 TCGv_i64 tmp
= tcg_temp_new_i64();
796 tcg_gen_qemu_ld64(tmp
, addr
, index
);
799 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
801 tcg_gen_qemu_st8(val
, addr
, index
);
804 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
806 tcg_gen_qemu_st16(val
, addr
, index
);
809 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
811 tcg_gen_qemu_st32(val
, addr
, index
);
814 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
816 tcg_gen_qemu_st64(val
, addr
, index
);
817 tcg_temp_free_i64(val
);
820 static inline void gen_set_pc_im(uint32_t val
)
822 tcg_gen_movi_i32(cpu_R
[15], val
);
825 /* Force a TB lookup after an instruction that changes the CPU state. */
826 static inline void gen_lookup_tb(DisasContext
*s
)
828 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
829 s
->is_jmp
= DISAS_UPDATE
;
832 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
835 int val
, rm
, shift
, shiftop
;
838 if (!(insn
& (1 << 25))) {
841 if (!(insn
& (1 << 23)))
844 tcg_gen_addi_i32(var
, var
, val
);
848 shift
= (insn
>> 7) & 0x1f;
849 shiftop
= (insn
>> 5) & 3;
850 offset
= load_reg(s
, rm
);
851 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
852 if (!(insn
& (1 << 23)))
853 tcg_gen_sub_i32(var
, var
, offset
);
855 tcg_gen_add_i32(var
, var
, offset
);
860 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
866 if (insn
& (1 << 22)) {
868 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
869 if (!(insn
& (1 << 23)))
873 tcg_gen_addi_i32(var
, var
, val
);
877 tcg_gen_addi_i32(var
, var
, extra
);
879 offset
= load_reg(s
, rm
);
880 if (!(insn
& (1 << 23)))
881 tcg_gen_sub_i32(var
, var
, offset
);
883 tcg_gen_add_i32(var
, var
, offset
);
888 #define VFP_OP2(name) \
889 static inline void gen_vfp_##name(int dp) \
892 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
894 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
904 static inline void gen_vfp_abs(int dp
)
907 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
909 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
912 static inline void gen_vfp_neg(int dp
)
915 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
917 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
920 static inline void gen_vfp_sqrt(int dp
)
923 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
925 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
928 static inline void gen_vfp_cmp(int dp
)
931 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
933 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
936 static inline void gen_vfp_cmpe(int dp
)
939 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
941 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
944 static inline void gen_vfp_F1_ld0(int dp
)
947 tcg_gen_movi_i64(cpu_F1d
, 0);
949 tcg_gen_movi_i32(cpu_F1s
, 0);
952 static inline void gen_vfp_uito(int dp
)
955 gen_helper_vfp_uitod(cpu_F0d
, cpu_F0s
, cpu_env
);
957 gen_helper_vfp_uitos(cpu_F0s
, cpu_F0s
, cpu_env
);
960 static inline void gen_vfp_sito(int dp
)
963 gen_helper_vfp_sitod(cpu_F0d
, cpu_F0s
, cpu_env
);
965 gen_helper_vfp_sitos(cpu_F0s
, cpu_F0s
, cpu_env
);
968 static inline void gen_vfp_toui(int dp
)
971 gen_helper_vfp_touid(cpu_F0s
, cpu_F0d
, cpu_env
);
973 gen_helper_vfp_touis(cpu_F0s
, cpu_F0s
, cpu_env
);
976 static inline void gen_vfp_touiz(int dp
)
979 gen_helper_vfp_touizd(cpu_F0s
, cpu_F0d
, cpu_env
);
981 gen_helper_vfp_touizs(cpu_F0s
, cpu_F0s
, cpu_env
);
984 static inline void gen_vfp_tosi(int dp
)
987 gen_helper_vfp_tosid(cpu_F0s
, cpu_F0d
, cpu_env
);
989 gen_helper_vfp_tosis(cpu_F0s
, cpu_F0s
, cpu_env
);
992 static inline void gen_vfp_tosiz(int dp
)
995 gen_helper_vfp_tosizd(cpu_F0s
, cpu_F0d
, cpu_env
);
997 gen_helper_vfp_tosizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1000 #define VFP_GEN_FIX(name) \
1001 static inline void gen_vfp_##name(int dp, int shift) \
1003 TCGv tmp_shift = tcg_const_i32(shift); \
1005 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1007 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1008 tcg_temp_free_i32(tmp_shift); \
1020 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1023 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1025 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1028 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1031 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1033 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1037 vfp_reg_offset (int dp
, int reg
)
1040 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1042 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1043 + offsetof(CPU_DoubleU
, l
.upper
);
1045 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1046 + offsetof(CPU_DoubleU
, l
.lower
);
1050 /* Return the offset of a 32-bit piece of a NEON register.
1051 zero is the least significant end of the register. */
1053 neon_reg_offset (int reg
, int n
)
1057 return vfp_reg_offset(0, sreg
);
1060 static TCGv
neon_load_reg(int reg
, int pass
)
1062 TCGv tmp
= new_tmp();
1063 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1067 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1069 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1073 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1075 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1078 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1080 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1083 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1084 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1085 #define tcg_gen_st_f32 tcg_gen_st_i32
1086 #define tcg_gen_st_f64 tcg_gen_st_i64
1088 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1091 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1093 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1096 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1099 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1101 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1104 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1107 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1109 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1112 #define ARM_CP_RW_BIT (1 << 20)
1114 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1116 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1119 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1121 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1124 static inline TCGv
iwmmxt_load_creg(int reg
)
1126 TCGv var
= new_tmp();
1127 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1131 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1133 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1137 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1139 iwmmxt_store_reg(cpu_M0
, rn
);
1142 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1144 iwmmxt_load_reg(cpu_M0
, rn
);
1147 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1149 iwmmxt_load_reg(cpu_V1
, rn
);
1150 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1153 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1155 iwmmxt_load_reg(cpu_V1
, rn
);
1156 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1159 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1161 iwmmxt_load_reg(cpu_V1
, rn
);
1162 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1165 #define IWMMXT_OP(name) \
1166 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1168 iwmmxt_load_reg(cpu_V1, rn); \
1169 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1172 #define IWMMXT_OP_ENV(name) \
1173 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1175 iwmmxt_load_reg(cpu_V1, rn); \
1176 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1179 #define IWMMXT_OP_ENV_SIZE(name) \
1180 IWMMXT_OP_ENV(name##b) \
1181 IWMMXT_OP_ENV(name##w) \
1182 IWMMXT_OP_ENV(name##l)
1184 #define IWMMXT_OP_ENV1(name) \
1185 static inline void gen_op_iwmmxt_##name##_M0(void) \
1187 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1201 IWMMXT_OP_ENV_SIZE(unpackl
)
1202 IWMMXT_OP_ENV_SIZE(unpackh
)
1204 IWMMXT_OP_ENV1(unpacklub
)
1205 IWMMXT_OP_ENV1(unpackluw
)
1206 IWMMXT_OP_ENV1(unpacklul
)
1207 IWMMXT_OP_ENV1(unpackhub
)
1208 IWMMXT_OP_ENV1(unpackhuw
)
1209 IWMMXT_OP_ENV1(unpackhul
)
1210 IWMMXT_OP_ENV1(unpacklsb
)
1211 IWMMXT_OP_ENV1(unpacklsw
)
1212 IWMMXT_OP_ENV1(unpacklsl
)
1213 IWMMXT_OP_ENV1(unpackhsb
)
1214 IWMMXT_OP_ENV1(unpackhsw
)
1215 IWMMXT_OP_ENV1(unpackhsl
)
1217 IWMMXT_OP_ENV_SIZE(cmpeq
)
1218 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1219 IWMMXT_OP_ENV_SIZE(cmpgts
)
1221 IWMMXT_OP_ENV_SIZE(mins
)
1222 IWMMXT_OP_ENV_SIZE(minu
)
1223 IWMMXT_OP_ENV_SIZE(maxs
)
1224 IWMMXT_OP_ENV_SIZE(maxu
)
1226 IWMMXT_OP_ENV_SIZE(subn
)
1227 IWMMXT_OP_ENV_SIZE(addn
)
1228 IWMMXT_OP_ENV_SIZE(subu
)
1229 IWMMXT_OP_ENV_SIZE(addu
)
1230 IWMMXT_OP_ENV_SIZE(subs
)
1231 IWMMXT_OP_ENV_SIZE(adds
)
1233 IWMMXT_OP_ENV(avgb0
)
1234 IWMMXT_OP_ENV(avgb1
)
1235 IWMMXT_OP_ENV(avgw0
)
1236 IWMMXT_OP_ENV(avgw1
)
1240 IWMMXT_OP_ENV(packuw
)
1241 IWMMXT_OP_ENV(packul
)
1242 IWMMXT_OP_ENV(packuq
)
1243 IWMMXT_OP_ENV(packsw
)
1244 IWMMXT_OP_ENV(packsl
)
1245 IWMMXT_OP_ENV(packsq
)
1247 static void gen_op_iwmmxt_set_mup(void)
1250 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1251 tcg_gen_ori_i32(tmp
, tmp
, 2);
1252 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1255 static void gen_op_iwmmxt_set_cup(void)
1258 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1259 tcg_gen_ori_i32(tmp
, tmp
, 1);
1260 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1263 static void gen_op_iwmmxt_setpsr_nz(void)
1265 TCGv tmp
= new_tmp();
1266 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1267 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1270 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1272 iwmmxt_load_reg(cpu_V1
, rn
);
1273 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1274 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1277 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1283 rd
= (insn
>> 16) & 0xf;
1284 tmp
= load_reg(s
, rd
);
1286 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1287 if (insn
& (1 << 24)) {
1289 if (insn
& (1 << 23))
1290 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1292 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1293 tcg_gen_mov_i32(dest
, tmp
);
1294 if (insn
& (1 << 21))
1295 store_reg(s
, rd
, tmp
);
1298 } else if (insn
& (1 << 21)) {
1300 tcg_gen_mov_i32(dest
, tmp
);
1301 if (insn
& (1 << 23))
1302 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1304 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1305 store_reg(s
, rd
, tmp
);
1306 } else if (!(insn
& (1 << 23)))
1311 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1313 int rd
= (insn
>> 0) & 0xf;
1316 if (insn
& (1 << 8)) {
1317 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1320 tmp
= iwmmxt_load_creg(rd
);
1324 iwmmxt_load_reg(cpu_V0
, rd
);
1325 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1327 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1328 tcg_gen_mov_i32(dest
, tmp
);
1333 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1334 (ie. an undefined instruction). */
1335 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1338 int rdhi
, rdlo
, rd0
, rd1
, i
;
1340 TCGv tmp
, tmp2
, tmp3
;
1342 if ((insn
& 0x0e000e00) == 0x0c000000) {
1343 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1345 rdlo
= (insn
>> 12) & 0xf;
1346 rdhi
= (insn
>> 16) & 0xf;
1347 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1348 iwmmxt_load_reg(cpu_V0
, wrd
);
1349 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1350 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1351 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1352 } else { /* TMCRR */
1353 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1354 iwmmxt_store_reg(cpu_V0
, wrd
);
1355 gen_op_iwmmxt_set_mup();
1360 wrd
= (insn
>> 12) & 0xf;
1362 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1366 if (insn
& ARM_CP_RW_BIT
) {
1367 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1369 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1370 iwmmxt_store_creg(wrd
, tmp
);
1373 if (insn
& (1 << 8)) {
1374 if (insn
& (1 << 22)) { /* WLDRD */
1375 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1377 } else { /* WLDRW wRd */
1378 tmp
= gen_ld32(addr
, IS_USER(s
));
1381 if (insn
& (1 << 22)) { /* WLDRH */
1382 tmp
= gen_ld16u(addr
, IS_USER(s
));
1383 } else { /* WLDRB */
1384 tmp
= gen_ld8u(addr
, IS_USER(s
));
1388 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1391 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1394 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1395 tmp
= iwmmxt_load_creg(wrd
);
1396 gen_st32(tmp
, addr
, IS_USER(s
));
1398 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1400 if (insn
& (1 << 8)) {
1401 if (insn
& (1 << 22)) { /* WSTRD */
1403 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1404 } else { /* WSTRW wRd */
1405 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1406 gen_st32(tmp
, addr
, IS_USER(s
));
1409 if (insn
& (1 << 22)) { /* WSTRH */
1410 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1411 gen_st16(tmp
, addr
, IS_USER(s
));
1412 } else { /* WSTRB */
1413 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1414 gen_st8(tmp
, addr
, IS_USER(s
));
1423 if ((insn
& 0x0f000000) != 0x0e000000)
1426 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1427 case 0x000: /* WOR */
1428 wrd
= (insn
>> 12) & 0xf;
1429 rd0
= (insn
>> 0) & 0xf;
1430 rd1
= (insn
>> 16) & 0xf;
1431 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1432 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1433 gen_op_iwmmxt_setpsr_nz();
1434 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1435 gen_op_iwmmxt_set_mup();
1436 gen_op_iwmmxt_set_cup();
1438 case 0x011: /* TMCR */
1441 rd
= (insn
>> 12) & 0xf;
1442 wrd
= (insn
>> 16) & 0xf;
1444 case ARM_IWMMXT_wCID
:
1445 case ARM_IWMMXT_wCASF
:
1447 case ARM_IWMMXT_wCon
:
1448 gen_op_iwmmxt_set_cup();
1450 case ARM_IWMMXT_wCSSF
:
1451 tmp
= iwmmxt_load_creg(wrd
);
1452 tmp2
= load_reg(s
, rd
);
1453 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1455 iwmmxt_store_creg(wrd
, tmp
);
1457 case ARM_IWMMXT_wCGR0
:
1458 case ARM_IWMMXT_wCGR1
:
1459 case ARM_IWMMXT_wCGR2
:
1460 case ARM_IWMMXT_wCGR3
:
1461 gen_op_iwmmxt_set_cup();
1462 tmp
= load_reg(s
, rd
);
1463 iwmmxt_store_creg(wrd
, tmp
);
1469 case 0x100: /* WXOR */
1470 wrd
= (insn
>> 12) & 0xf;
1471 rd0
= (insn
>> 0) & 0xf;
1472 rd1
= (insn
>> 16) & 0xf;
1473 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1474 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1475 gen_op_iwmmxt_setpsr_nz();
1476 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1477 gen_op_iwmmxt_set_mup();
1478 gen_op_iwmmxt_set_cup();
1480 case 0x111: /* TMRC */
1483 rd
= (insn
>> 12) & 0xf;
1484 wrd
= (insn
>> 16) & 0xf;
1485 tmp
= iwmmxt_load_creg(wrd
);
1486 store_reg(s
, rd
, tmp
);
1488 case 0x300: /* WANDN */
1489 wrd
= (insn
>> 12) & 0xf;
1490 rd0
= (insn
>> 0) & 0xf;
1491 rd1
= (insn
>> 16) & 0xf;
1492 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1493 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1494 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1495 gen_op_iwmmxt_setpsr_nz();
1496 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1497 gen_op_iwmmxt_set_mup();
1498 gen_op_iwmmxt_set_cup();
1500 case 0x200: /* WAND */
1501 wrd
= (insn
>> 12) & 0xf;
1502 rd0
= (insn
>> 0) & 0xf;
1503 rd1
= (insn
>> 16) & 0xf;
1504 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1505 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1506 gen_op_iwmmxt_setpsr_nz();
1507 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1508 gen_op_iwmmxt_set_mup();
1509 gen_op_iwmmxt_set_cup();
1511 case 0x810: case 0xa10: /* WMADD */
1512 wrd
= (insn
>> 12) & 0xf;
1513 rd0
= (insn
>> 0) & 0xf;
1514 rd1
= (insn
>> 16) & 0xf;
1515 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1516 if (insn
& (1 << 21))
1517 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1519 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1520 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1521 gen_op_iwmmxt_set_mup();
1523 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1524 wrd
= (insn
>> 12) & 0xf;
1525 rd0
= (insn
>> 16) & 0xf;
1526 rd1
= (insn
>> 0) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1528 switch ((insn
>> 22) & 3) {
1530 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1533 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1536 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1541 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1545 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1546 wrd
= (insn
>> 12) & 0xf;
1547 rd0
= (insn
>> 16) & 0xf;
1548 rd1
= (insn
>> 0) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1550 switch ((insn
>> 22) & 3) {
1552 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1555 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1558 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1563 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1564 gen_op_iwmmxt_set_mup();
1565 gen_op_iwmmxt_set_cup();
1567 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1568 wrd
= (insn
>> 12) & 0xf;
1569 rd0
= (insn
>> 16) & 0xf;
1570 rd1
= (insn
>> 0) & 0xf;
1571 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1572 if (insn
& (1 << 22))
1573 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1575 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1576 if (!(insn
& (1 << 20)))
1577 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1578 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1579 gen_op_iwmmxt_set_mup();
1581 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1582 wrd
= (insn
>> 12) & 0xf;
1583 rd0
= (insn
>> 16) & 0xf;
1584 rd1
= (insn
>> 0) & 0xf;
1585 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1586 if (insn
& (1 << 21)) {
1587 if (insn
& (1 << 20))
1588 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1590 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1592 if (insn
& (1 << 20))
1593 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1595 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1597 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1598 gen_op_iwmmxt_set_mup();
1600 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1601 wrd
= (insn
>> 12) & 0xf;
1602 rd0
= (insn
>> 16) & 0xf;
1603 rd1
= (insn
>> 0) & 0xf;
1604 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1605 if (insn
& (1 << 21))
1606 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1608 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1609 if (!(insn
& (1 << 20))) {
1610 iwmmxt_load_reg(cpu_V1
, wrd
);
1611 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1613 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1614 gen_op_iwmmxt_set_mup();
1616 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1617 wrd
= (insn
>> 12) & 0xf;
1618 rd0
= (insn
>> 16) & 0xf;
1619 rd1
= (insn
>> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1621 switch ((insn
>> 22) & 3) {
1623 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1626 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1629 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1634 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1635 gen_op_iwmmxt_set_mup();
1636 gen_op_iwmmxt_set_cup();
1638 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1639 wrd
= (insn
>> 12) & 0xf;
1640 rd0
= (insn
>> 16) & 0xf;
1641 rd1
= (insn
>> 0) & 0xf;
1642 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1643 if (insn
& (1 << 22)) {
1644 if (insn
& (1 << 20))
1645 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1647 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1649 if (insn
& (1 << 20))
1650 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1652 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1654 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1655 gen_op_iwmmxt_set_mup();
1656 gen_op_iwmmxt_set_cup();
1658 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1659 wrd
= (insn
>> 12) & 0xf;
1660 rd0
= (insn
>> 16) & 0xf;
1661 rd1
= (insn
>> 0) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1663 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1664 tcg_gen_andi_i32(tmp
, tmp
, 7);
1665 iwmmxt_load_reg(cpu_V1
, rd1
);
1666 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1668 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1669 gen_op_iwmmxt_set_mup();
1671 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1672 if (((insn
>> 6) & 3) == 3)
1674 rd
= (insn
>> 12) & 0xf;
1675 wrd
= (insn
>> 16) & 0xf;
1676 tmp
= load_reg(s
, rd
);
1677 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1678 switch ((insn
>> 6) & 3) {
1680 tmp2
= tcg_const_i32(0xff);
1681 tmp3
= tcg_const_i32((insn
& 7) << 3);
1684 tmp2
= tcg_const_i32(0xffff);
1685 tmp3
= tcg_const_i32((insn
& 3) << 4);
1688 tmp2
= tcg_const_i32(0xffffffff);
1689 tmp3
= tcg_const_i32((insn
& 1) << 5);
1695 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1696 tcg_temp_free(tmp3
);
1697 tcg_temp_free(tmp2
);
1699 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1700 gen_op_iwmmxt_set_mup();
1702 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1703 rd
= (insn
>> 12) & 0xf;
1704 wrd
= (insn
>> 16) & 0xf;
1705 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1707 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1709 switch ((insn
>> 22) & 3) {
1711 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1712 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1714 tcg_gen_ext8s_i32(tmp
, tmp
);
1716 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1720 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1721 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1723 tcg_gen_ext16s_i32(tmp
, tmp
);
1725 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1729 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1730 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1733 store_reg(s
, rd
, tmp
);
1735 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1736 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1738 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1739 switch ((insn
>> 22) & 3) {
1741 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1744 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1747 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1750 tcg_gen_shli_i32(tmp
, tmp
, 28);
1754 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1755 if (((insn
>> 6) & 3) == 3)
1757 rd
= (insn
>> 12) & 0xf;
1758 wrd
= (insn
>> 16) & 0xf;
1759 tmp
= load_reg(s
, rd
);
1760 switch ((insn
>> 6) & 3) {
1762 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1765 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1768 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1772 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1773 gen_op_iwmmxt_set_mup();
1775 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1776 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1778 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1780 tcg_gen_mov_i32(tmp2
, tmp
);
1781 switch ((insn
>> 22) & 3) {
1783 for (i
= 0; i
< 7; i
++) {
1784 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1785 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1789 for (i
= 0; i
< 3; i
++) {
1790 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1791 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1795 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1796 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1803 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1804 wrd
= (insn
>> 12) & 0xf;
1805 rd0
= (insn
>> 16) & 0xf;
1806 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1807 switch ((insn
>> 22) & 3) {
1809 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1812 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1815 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1820 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1821 gen_op_iwmmxt_set_mup();
1823 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1824 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1826 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1828 tcg_gen_mov_i32(tmp2
, tmp
);
1829 switch ((insn
>> 22) & 3) {
1831 for (i
= 0; i
< 7; i
++) {
1832 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1833 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1837 for (i
= 0; i
< 3; i
++) {
1838 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1839 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1843 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1844 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1851 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1852 rd
= (insn
>> 12) & 0xf;
1853 rd0
= (insn
>> 16) & 0xf;
1854 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1856 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1858 switch ((insn
>> 22) & 3) {
1860 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1863 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1866 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1869 store_reg(s
, rd
, tmp
);
1871 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1872 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1873 wrd
= (insn
>> 12) & 0xf;
1874 rd0
= (insn
>> 16) & 0xf;
1875 rd1
= (insn
>> 0) & 0xf;
1876 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1877 switch ((insn
>> 22) & 3) {
1879 if (insn
& (1 << 21))
1880 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1882 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1885 if (insn
& (1 << 21))
1886 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1888 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1891 if (insn
& (1 << 21))
1892 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1894 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1899 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1900 gen_op_iwmmxt_set_mup();
1901 gen_op_iwmmxt_set_cup();
1903 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1904 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1905 wrd
= (insn
>> 12) & 0xf;
1906 rd0
= (insn
>> 16) & 0xf;
1907 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1908 switch ((insn
>> 22) & 3) {
1910 if (insn
& (1 << 21))
1911 gen_op_iwmmxt_unpacklsb_M0();
1913 gen_op_iwmmxt_unpacklub_M0();
1916 if (insn
& (1 << 21))
1917 gen_op_iwmmxt_unpacklsw_M0();
1919 gen_op_iwmmxt_unpackluw_M0();
1922 if (insn
& (1 << 21))
1923 gen_op_iwmmxt_unpacklsl_M0();
1925 gen_op_iwmmxt_unpacklul_M0();
1930 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1931 gen_op_iwmmxt_set_mup();
1932 gen_op_iwmmxt_set_cup();
1934 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1935 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1936 wrd
= (insn
>> 12) & 0xf;
1937 rd0
= (insn
>> 16) & 0xf;
1938 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1939 switch ((insn
>> 22) & 3) {
1941 if (insn
& (1 << 21))
1942 gen_op_iwmmxt_unpackhsb_M0();
1944 gen_op_iwmmxt_unpackhub_M0();
1947 if (insn
& (1 << 21))
1948 gen_op_iwmmxt_unpackhsw_M0();
1950 gen_op_iwmmxt_unpackhuw_M0();
1953 if (insn
& (1 << 21))
1954 gen_op_iwmmxt_unpackhsl_M0();
1956 gen_op_iwmmxt_unpackhul_M0();
1961 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1962 gen_op_iwmmxt_set_mup();
1963 gen_op_iwmmxt_set_cup();
1965 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1966 case 0x214: case 0x614: case 0xa14: case 0xe14:
1967 if (((insn
>> 22) & 3) == 0)
1969 wrd
= (insn
>> 12) & 0xf;
1970 rd0
= (insn
>> 16) & 0xf;
1971 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1973 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
1977 switch ((insn
>> 22) & 3) {
1979 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1982 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1985 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1989 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1990 gen_op_iwmmxt_set_mup();
1991 gen_op_iwmmxt_set_cup();
1993 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1994 case 0x014: case 0x414: case 0x814: case 0xc14:
1995 if (((insn
>> 22) & 3) == 0)
1997 wrd
= (insn
>> 12) & 0xf;
1998 rd0
= (insn
>> 16) & 0xf;
1999 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2001 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2005 switch ((insn
>> 22) & 3) {
2007 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2010 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2013 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2017 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2018 gen_op_iwmmxt_set_mup();
2019 gen_op_iwmmxt_set_cup();
2021 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2022 case 0x114: case 0x514: case 0x914: case 0xd14:
2023 if (((insn
>> 22) & 3) == 0)
2025 wrd
= (insn
>> 12) & 0xf;
2026 rd0
= (insn
>> 16) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2029 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2033 switch ((insn
>> 22) & 3) {
2035 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2038 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2041 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2045 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2046 gen_op_iwmmxt_set_mup();
2047 gen_op_iwmmxt_set_cup();
2049 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2050 case 0x314: case 0x714: case 0xb14: case 0xf14:
2051 if (((insn
>> 22) & 3) == 0)
2053 wrd
= (insn
>> 12) & 0xf;
2054 rd0
= (insn
>> 16) & 0xf;
2055 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2057 switch ((insn
>> 22) & 3) {
2059 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2063 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2066 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2070 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2073 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2077 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2081 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2085 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2086 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2087 wrd
= (insn
>> 12) & 0xf;
2088 rd0
= (insn
>> 16) & 0xf;
2089 rd1
= (insn
>> 0) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2091 switch ((insn
>> 22) & 3) {
2093 if (insn
& (1 << 21))
2094 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2096 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2099 if (insn
& (1 << 21))
2100 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2102 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2105 if (insn
& (1 << 21))
2106 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2108 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2113 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2114 gen_op_iwmmxt_set_mup();
2116 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2117 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2118 wrd
= (insn
>> 12) & 0xf;
2119 rd0
= (insn
>> 16) & 0xf;
2120 rd1
= (insn
>> 0) & 0xf;
2121 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2122 switch ((insn
>> 22) & 3) {
2124 if (insn
& (1 << 21))
2125 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2127 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2130 if (insn
& (1 << 21))
2131 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2133 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2136 if (insn
& (1 << 21))
2137 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2139 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2144 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2145 gen_op_iwmmxt_set_mup();
2147 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2148 case 0x402: case 0x502: case 0x602: case 0x702:
2149 wrd
= (insn
>> 12) & 0xf;
2150 rd0
= (insn
>> 16) & 0xf;
2151 rd1
= (insn
>> 0) & 0xf;
2152 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2153 tmp
= tcg_const_i32((insn
>> 20) & 3);
2154 iwmmxt_load_reg(cpu_V1
, rd1
);
2155 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2157 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2158 gen_op_iwmmxt_set_mup();
2160 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2161 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2162 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2163 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2164 wrd
= (insn
>> 12) & 0xf;
2165 rd0
= (insn
>> 16) & 0xf;
2166 rd1
= (insn
>> 0) & 0xf;
2167 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2168 switch ((insn
>> 20) & 0xf) {
2170 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2173 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2176 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2179 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2182 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2185 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2188 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2191 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2194 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2199 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2200 gen_op_iwmmxt_set_mup();
2201 gen_op_iwmmxt_set_cup();
2203 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2204 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2205 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2206 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2207 wrd
= (insn
>> 12) & 0xf;
2208 rd0
= (insn
>> 16) & 0xf;
2209 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2210 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2211 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2213 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2214 gen_op_iwmmxt_set_mup();
2215 gen_op_iwmmxt_set_cup();
2217 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2218 case 0x418: case 0x518: case 0x618: case 0x718:
2219 case 0x818: case 0x918: case 0xa18: case 0xb18:
2220 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2221 wrd
= (insn
>> 12) & 0xf;
2222 rd0
= (insn
>> 16) & 0xf;
2223 rd1
= (insn
>> 0) & 0xf;
2224 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2225 switch ((insn
>> 20) & 0xf) {
2227 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2230 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2233 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2236 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2239 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2242 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2245 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2248 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2251 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2256 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2257 gen_op_iwmmxt_set_mup();
2258 gen_op_iwmmxt_set_cup();
2260 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2261 case 0x408: case 0x508: case 0x608: case 0x708:
2262 case 0x808: case 0x908: case 0xa08: case 0xb08:
2263 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2264 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2266 wrd
= (insn
>> 12) & 0xf;
2267 rd0
= (insn
>> 16) & 0xf;
2268 rd1
= (insn
>> 0) & 0xf;
2269 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2270 switch ((insn
>> 22) & 3) {
2272 if (insn
& (1 << 21))
2273 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2275 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2278 if (insn
& (1 << 21))
2279 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2281 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2284 if (insn
& (1 << 21))
2285 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2287 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2290 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2294 case 0x201: case 0x203: case 0x205: case 0x207:
2295 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2296 case 0x211: case 0x213: case 0x215: case 0x217:
2297 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2298 wrd
= (insn
>> 5) & 0xf;
2299 rd0
= (insn
>> 12) & 0xf;
2300 rd1
= (insn
>> 0) & 0xf;
2301 if (rd0
== 0xf || rd1
== 0xf)
2303 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2304 tmp
= load_reg(s
, rd0
);
2305 tmp2
= load_reg(s
, rd1
);
2306 switch ((insn
>> 16) & 0xf) {
2307 case 0x0: /* TMIA */
2308 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2310 case 0x8: /* TMIAPH */
2311 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2313 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2314 if (insn
& (1 << 16))
2315 tcg_gen_shri_i32(tmp
, tmp
, 16);
2316 if (insn
& (1 << 17))
2317 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2318 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2327 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2328 gen_op_iwmmxt_set_mup();
2337 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2338 (ie. an undefined instruction). */
2339 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2341 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2344 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2345 /* Multiply with Internal Accumulate Format */
2346 rd0
= (insn
>> 12) & 0xf;
2348 acc
= (insn
>> 5) & 7;
2353 tmp
= load_reg(s
, rd0
);
2354 tmp2
= load_reg(s
, rd1
);
2355 switch ((insn
>> 16) & 0xf) {
2357 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2359 case 0x8: /* MIAPH */
2360 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2362 case 0xc: /* MIABB */
2363 case 0xd: /* MIABT */
2364 case 0xe: /* MIATB */
2365 case 0xf: /* MIATT */
2366 if (insn
& (1 << 16))
2367 tcg_gen_shri_i32(tmp
, tmp
, 16);
2368 if (insn
& (1 << 17))
2369 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2370 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2378 gen_op_iwmmxt_movq_wRn_M0(acc
);
2382 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2383 /* Internal Accumulator Access Format */
2384 rdhi
= (insn
>> 16) & 0xf;
2385 rdlo
= (insn
>> 12) & 0xf;
2391 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2392 iwmmxt_load_reg(cpu_V0
, acc
);
2393 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2394 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2395 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2396 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2398 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2399 iwmmxt_store_reg(cpu_V0
, acc
);
2407 /* Disassemble system coprocessor instruction. Return nonzero if
2408 instruction is not defined. */
2409 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2412 uint32_t rd
= (insn
>> 12) & 0xf;
2413 uint32_t cp
= (insn
>> 8) & 0xf;
2418 if (insn
& ARM_CP_RW_BIT
) {
2419 if (!env
->cp
[cp
].cp_read
)
2421 gen_set_pc_im(s
->pc
);
2423 tmp2
= tcg_const_i32(insn
);
2424 gen_helper_get_cp(tmp
, cpu_env
, tmp2
);
2425 tcg_temp_free(tmp2
);
2426 store_reg(s
, rd
, tmp
);
2428 if (!env
->cp
[cp
].cp_write
)
2430 gen_set_pc_im(s
->pc
);
2431 tmp
= load_reg(s
, rd
);
2432 tmp2
= tcg_const_i32(insn
);
2433 gen_helper_set_cp(cpu_env
, tmp2
, tmp
);
2434 tcg_temp_free(tmp2
);
2440 static int cp15_user_ok(uint32_t insn
)
2442 int cpn
= (insn
>> 16) & 0xf;
2443 int cpm
= insn
& 0xf;
2444 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2446 if (cpn
== 13 && cpm
== 0) {
2448 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2452 /* ISB, DSB, DMB. */
2453 if ((cpm
== 5 && op
== 4)
2454 || (cpm
== 10 && (op
== 4 || op
== 5)))
2460 static int cp15_tls_load_store(CPUState
*env
, DisasContext
*s
, uint32_t insn
, uint32_t rd
)
2463 int cpn
= (insn
>> 16) & 0xf;
2464 int cpm
= insn
& 0xf;
2465 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2467 if (!arm_feature(env
, ARM_FEATURE_V6K
))
2470 if (!(cpn
== 13 && cpm
== 0))
2473 if (insn
& ARM_CP_RW_BIT
) {
2476 tmp
= load_cpu_field(cp15
.c13_tls1
);
2479 tmp
= load_cpu_field(cp15
.c13_tls2
);
2482 tmp
= load_cpu_field(cp15
.c13_tls3
);
2487 store_reg(s
, rd
, tmp
);
2490 tmp
= load_reg(s
, rd
);
2493 store_cpu_field(tmp
, cp15
.c13_tls1
);
2496 store_cpu_field(tmp
, cp15
.c13_tls2
);
2499 store_cpu_field(tmp
, cp15
.c13_tls3
);
2509 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2510 instruction is not defined. */
2511 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2516 /* M profile cores use memory mapped registers instead of cp15. */
2517 if (arm_feature(env
, ARM_FEATURE_M
))
2520 if ((insn
& (1 << 25)) == 0) {
2521 if (insn
& (1 << 20)) {
2525 /* mcrr. Used for block cache operations, so implement as no-op. */
2528 if ((insn
& (1 << 4)) == 0) {
2532 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
2535 if ((insn
& 0x0fff0fff) == 0x0e070f90
2536 || (insn
& 0x0fff0fff) == 0x0e070f58) {
2537 /* Wait for interrupt. */
2538 gen_set_pc_im(s
->pc
);
2539 s
->is_jmp
= DISAS_WFI
;
2542 rd
= (insn
>> 12) & 0xf;
2544 if (cp15_tls_load_store(env
, s
, insn
, rd
))
2547 tmp2
= tcg_const_i32(insn
);
2548 if (insn
& ARM_CP_RW_BIT
) {
2550 gen_helper_get_cp15(tmp
, cpu_env
, tmp2
);
2551 /* If the destination register is r15 then sets condition codes. */
2553 store_reg(s
, rd
, tmp
);
2557 tmp
= load_reg(s
, rd
);
2558 gen_helper_set_cp15(cpu_env
, tmp2
, tmp
);
2560 /* Normally we would always end the TB here, but Linux
2561 * arch/arm/mach-pxa/sleep.S expects two instructions following
2562 * an MMU enable to execute from cache. Imitate this behaviour. */
2563 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2564 (insn
& 0x0fff0fff) != 0x0e010f10)
2567 tcg_temp_free_i32(tmp2
);
2571 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2572 #define VFP_SREG(insn, bigbit, smallbit) \
2573 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2574 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2575 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2576 reg = (((insn) >> (bigbit)) & 0x0f) \
2577 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2579 if (insn & (1 << (smallbit))) \
2581 reg = ((insn) >> (bigbit)) & 0x0f; \
2584 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2585 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2586 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2587 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2588 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2589 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2591 /* Move between integer and VFP cores. */
2592 static TCGv
gen_vfp_mrs(void)
2594 TCGv tmp
= new_tmp();
2595 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2599 static void gen_vfp_msr(TCGv tmp
)
2601 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2606 vfp_enabled(CPUState
* env
)
2608 return ((env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) != 0);
2611 static void gen_neon_dup_u8(TCGv var
, int shift
)
2613 TCGv tmp
= new_tmp();
2615 tcg_gen_shri_i32(var
, var
, shift
);
2616 tcg_gen_ext8u_i32(var
, var
);
2617 tcg_gen_shli_i32(tmp
, var
, 8);
2618 tcg_gen_or_i32(var
, var
, tmp
);
2619 tcg_gen_shli_i32(tmp
, var
, 16);
2620 tcg_gen_or_i32(var
, var
, tmp
);
2624 static void gen_neon_dup_low16(TCGv var
)
2626 TCGv tmp
= new_tmp();
2627 tcg_gen_ext16u_i32(var
, var
);
2628 tcg_gen_shli_i32(tmp
, var
, 16);
2629 tcg_gen_or_i32(var
, var
, tmp
);
2633 static void gen_neon_dup_high16(TCGv var
)
2635 TCGv tmp
= new_tmp();
2636 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2637 tcg_gen_shri_i32(tmp
, var
, 16);
2638 tcg_gen_or_i32(var
, var
, tmp
);
2642 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2643 (ie. an undefined instruction). */
2644 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2646 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2652 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2655 if (!vfp_enabled(env
)) {
2656 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2657 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2659 rn
= (insn
>> 16) & 0xf;
2660 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2661 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2664 dp
= ((insn
& 0xf00) == 0xb00);
2665 switch ((insn
>> 24) & 0xf) {
2667 if (insn
& (1 << 4)) {
2668 /* single register transfer */
2669 rd
= (insn
>> 12) & 0xf;
2674 VFP_DREG_N(rn
, insn
);
2677 if (insn
& 0x00c00060
2678 && !arm_feature(env
, ARM_FEATURE_NEON
))
2681 pass
= (insn
>> 21) & 1;
2682 if (insn
& (1 << 22)) {
2684 offset
= ((insn
>> 5) & 3) * 8;
2685 } else if (insn
& (1 << 5)) {
2687 offset
= (insn
& (1 << 6)) ? 16 : 0;
2692 if (insn
& ARM_CP_RW_BIT
) {
2694 tmp
= neon_load_reg(rn
, pass
);
2698 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2699 if (insn
& (1 << 23))
2705 if (insn
& (1 << 23)) {
2707 tcg_gen_shri_i32(tmp
, tmp
, 16);
2713 tcg_gen_sari_i32(tmp
, tmp
, 16);
2722 store_reg(s
, rd
, tmp
);
2725 tmp
= load_reg(s
, rd
);
2726 if (insn
& (1 << 23)) {
2729 gen_neon_dup_u8(tmp
, 0);
2730 } else if (size
== 1) {
2731 gen_neon_dup_low16(tmp
);
2733 for (n
= 0; n
<= pass
* 2; n
++) {
2735 tcg_gen_mov_i32(tmp2
, tmp
);
2736 neon_store_reg(rn
, n
, tmp2
);
2738 neon_store_reg(rn
, n
, tmp
);
2743 tmp2
= neon_load_reg(rn
, pass
);
2744 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2748 tmp2
= neon_load_reg(rn
, pass
);
2749 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2755 neon_store_reg(rn
, pass
, tmp
);
2759 if ((insn
& 0x6f) != 0x00)
2761 rn
= VFP_SREG_N(insn
);
2762 if (insn
& ARM_CP_RW_BIT
) {
2764 if (insn
& (1 << 21)) {
2765 /* system register */
2770 /* VFP2 allows access to FSID from userspace.
2771 VFP3 restricts all id registers to privileged
2774 && arm_feature(env
, ARM_FEATURE_VFP3
))
2776 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2781 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2783 case ARM_VFP_FPINST
:
2784 case ARM_VFP_FPINST2
:
2785 /* Not present in VFP3. */
2787 || arm_feature(env
, ARM_FEATURE_VFP3
))
2789 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2793 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2794 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2797 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2803 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2805 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2811 gen_mov_F0_vreg(0, rn
);
2812 tmp
= gen_vfp_mrs();
2815 /* Set the 4 flag bits in the CPSR. */
2819 store_reg(s
, rd
, tmp
);
2823 tmp
= load_reg(s
, rd
);
2824 if (insn
& (1 << 21)) {
2826 /* system register */
2831 /* Writes are ignored. */
2834 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2841 /* TODO: VFP subarchitecture support.
2842 * For now, keep the EN bit only */
2843 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2844 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2847 case ARM_VFP_FPINST
:
2848 case ARM_VFP_FPINST2
:
2849 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2856 gen_mov_vreg_F0(0, rn
);
2861 /* data processing */
2862 /* The opcode is in bits 23, 21, 20 and 6. */
2863 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2867 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2869 /* rn is register number */
2870 VFP_DREG_N(rn
, insn
);
2873 if (op
== 15 && (rn
== 15 || rn
> 17)) {
2874 /* Integer or single precision destination. */
2875 rd
= VFP_SREG_D(insn
);
2877 VFP_DREG_D(rd
, insn
);
2880 if (op
== 15 && (rn
== 16 || rn
== 17)) {
2881 /* Integer source. */
2882 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
2884 VFP_DREG_M(rm
, insn
);
2887 rn
= VFP_SREG_N(insn
);
2888 if (op
== 15 && rn
== 15) {
2889 /* Double precision destination. */
2890 VFP_DREG_D(rd
, insn
);
2892 rd
= VFP_SREG_D(insn
);
2894 rm
= VFP_SREG_M(insn
);
2897 veclen
= env
->vfp
.vec_len
;
2898 if (op
== 15 && rn
> 3)
2901 /* Shut up compiler warnings. */
2912 /* Figure out what type of vector operation this is. */
2913 if ((rd
& bank_mask
) == 0) {
2918 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
2920 delta_d
= env
->vfp
.vec_stride
+ 1;
2922 if ((rm
& bank_mask
) == 0) {
2923 /* mixed scalar/vector */
2932 /* Load the initial operands. */
2937 /* Integer source */
2938 gen_mov_F0_vreg(0, rm
);
2943 gen_mov_F0_vreg(dp
, rd
);
2944 gen_mov_F1_vreg(dp
, rm
);
2948 /* Compare with zero */
2949 gen_mov_F0_vreg(dp
, rd
);
2960 /* Source and destination the same. */
2961 gen_mov_F0_vreg(dp
, rd
);
2964 /* One source operand. */
2965 gen_mov_F0_vreg(dp
, rm
);
2969 /* Two source operands. */
2970 gen_mov_F0_vreg(dp
, rn
);
2971 gen_mov_F1_vreg(dp
, rm
);
2975 /* Perform the calculation. */
2977 case 0: /* mac: fd + (fn * fm) */
2979 gen_mov_F1_vreg(dp
, rd
);
2982 case 1: /* nmac: fd - (fn * fm) */
2985 gen_mov_F1_vreg(dp
, rd
);
2988 case 2: /* msc: -fd + (fn * fm) */
2990 gen_mov_F1_vreg(dp
, rd
);
2993 case 3: /* nmsc: -fd - (fn * fm) */
2996 gen_mov_F1_vreg(dp
, rd
);
2999 case 4: /* mul: fn * fm */
3002 case 5: /* nmul: -(fn * fm) */
3006 case 6: /* add: fn + fm */
3009 case 7: /* sub: fn - fm */
3012 case 8: /* div: fn / fm */
3015 case 14: /* fconst */
3016 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3019 n
= (insn
<< 12) & 0x80000000;
3020 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3027 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3034 tcg_gen_movi_i32(cpu_F0s
, n
);
3037 case 15: /* extension space */
3051 case 4: /* vcvtb.f32.f16 */
3052 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3054 tmp
= gen_vfp_mrs();
3055 tcg_gen_ext16u_i32(tmp
, tmp
);
3056 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3059 case 5: /* vcvtt.f32.f16 */
3060 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3062 tmp
= gen_vfp_mrs();
3063 tcg_gen_shri_i32(tmp
, tmp
, 16);
3064 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3067 case 6: /* vcvtb.f16.f32 */
3068 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3071 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3072 gen_mov_F0_vreg(0, rd
);
3073 tmp2
= gen_vfp_mrs();
3074 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3075 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3079 case 7: /* vcvtt.f16.f32 */
3080 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3083 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3084 tcg_gen_shli_i32(tmp
, tmp
, 16);
3085 gen_mov_F0_vreg(0, rd
);
3086 tmp2
= gen_vfp_mrs();
3087 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3088 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3101 case 11: /* cmpez */
3105 case 15: /* single<->double conversion */
3107 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3109 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3111 case 16: /* fuito */
3114 case 17: /* fsito */
3117 case 20: /* fshto */
3118 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3120 gen_vfp_shto(dp
, 16 - rm
);
3122 case 21: /* fslto */
3123 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3125 gen_vfp_slto(dp
, 32 - rm
);
3127 case 22: /* fuhto */
3128 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3130 gen_vfp_uhto(dp
, 16 - rm
);
3132 case 23: /* fulto */
3133 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3135 gen_vfp_ulto(dp
, 32 - rm
);
3137 case 24: /* ftoui */
3140 case 25: /* ftouiz */
3143 case 26: /* ftosi */
3146 case 27: /* ftosiz */
3149 case 28: /* ftosh */
3150 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3152 gen_vfp_tosh(dp
, 16 - rm
);
3154 case 29: /* ftosl */
3155 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3157 gen_vfp_tosl(dp
, 32 - rm
);
3159 case 30: /* ftouh */
3160 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3162 gen_vfp_touh(dp
, 16 - rm
);
3164 case 31: /* ftoul */
3165 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3167 gen_vfp_toul(dp
, 32 - rm
);
3169 default: /* undefined */
3170 printf ("rn:%d\n", rn
);
3174 default: /* undefined */
3175 printf ("op:%d\n", op
);
3179 /* Write back the result. */
3180 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3181 ; /* Comparison, do nothing. */
3182 else if (op
== 15 && rn
> 17)
3183 /* Integer result. */
3184 gen_mov_vreg_F0(0, rd
);
3185 else if (op
== 15 && rn
== 15)
3187 gen_mov_vreg_F0(!dp
, rd
);
3189 gen_mov_vreg_F0(dp
, rd
);
3191 /* break out of the loop if we have finished */
3195 if (op
== 15 && delta_m
== 0) {
3196 /* single source one-many */
3198 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3200 gen_mov_vreg_F0(dp
, rd
);
3204 /* Setup the next operands. */
3206 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3210 /* One source operand. */
3211 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3213 gen_mov_F0_vreg(dp
, rm
);
3215 /* Two source operands. */
3216 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3218 gen_mov_F0_vreg(dp
, rn
);
3220 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3222 gen_mov_F1_vreg(dp
, rm
);
3230 if (dp
&& (insn
& 0x03e00000) == 0x00400000) {
3231 /* two-register transfer */
3232 rn
= (insn
>> 16) & 0xf;
3233 rd
= (insn
>> 12) & 0xf;
3235 VFP_DREG_M(rm
, insn
);
3237 rm
= VFP_SREG_M(insn
);
3240 if (insn
& ARM_CP_RW_BIT
) {
3243 gen_mov_F0_vreg(0, rm
* 2);
3244 tmp
= gen_vfp_mrs();
3245 store_reg(s
, rd
, tmp
);
3246 gen_mov_F0_vreg(0, rm
* 2 + 1);
3247 tmp
= gen_vfp_mrs();
3248 store_reg(s
, rn
, tmp
);
3250 gen_mov_F0_vreg(0, rm
);
3251 tmp
= gen_vfp_mrs();
3252 store_reg(s
, rn
, tmp
);
3253 gen_mov_F0_vreg(0, rm
+ 1);
3254 tmp
= gen_vfp_mrs();
3255 store_reg(s
, rd
, tmp
);
3260 tmp
= load_reg(s
, rd
);
3262 gen_mov_vreg_F0(0, rm
* 2);
3263 tmp
= load_reg(s
, rn
);
3265 gen_mov_vreg_F0(0, rm
* 2 + 1);
3267 tmp
= load_reg(s
, rn
);
3269 gen_mov_vreg_F0(0, rm
);
3270 tmp
= load_reg(s
, rd
);
3272 gen_mov_vreg_F0(0, rm
+ 1);
3277 rn
= (insn
>> 16) & 0xf;
3279 VFP_DREG_D(rd
, insn
);
3281 rd
= VFP_SREG_D(insn
);
3282 if (s
->thumb
&& rn
== 15) {
3284 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3286 addr
= load_reg(s
, rn
);
3288 if ((insn
& 0x01200000) == 0x01000000) {
3289 /* Single load/store */
3290 offset
= (insn
& 0xff) << 2;
3291 if ((insn
& (1 << 23)) == 0)
3293 tcg_gen_addi_i32(addr
, addr
, offset
);
3294 if (insn
& (1 << 20)) {
3295 gen_vfp_ld(s
, dp
, addr
);
3296 gen_mov_vreg_F0(dp
, rd
);
3298 gen_mov_F0_vreg(dp
, rd
);
3299 gen_vfp_st(s
, dp
, addr
);
3303 /* load/store multiple */
3305 n
= (insn
>> 1) & 0x7f;
3309 if (insn
& (1 << 24)) /* pre-decrement */
3310 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3316 for (i
= 0; i
< n
; i
++) {
3317 if (insn
& ARM_CP_RW_BIT
) {
3319 gen_vfp_ld(s
, dp
, addr
);
3320 gen_mov_vreg_F0(dp
, rd
+ i
);
3323 gen_mov_F0_vreg(dp
, rd
+ i
);
3324 gen_vfp_st(s
, dp
, addr
);
3326 tcg_gen_addi_i32(addr
, addr
, offset
);
3328 if (insn
& (1 << 21)) {
3330 if (insn
& (1 << 24))
3331 offset
= -offset
* n
;
3332 else if (dp
&& (insn
& 1))
3338 tcg_gen_addi_i32(addr
, addr
, offset
);
3339 store_reg(s
, rn
, addr
);
3347 /* Should never happen. */
3353 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3355 TranslationBlock
*tb
;
3358 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3360 gen_set_pc_im(dest
);
3361 tcg_gen_exit_tb((long)tb
+ n
);
3363 gen_set_pc_im(dest
);
3368 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3370 if (unlikely(s
->singlestep_enabled
)) {
3371 /* An indirect jump so that we still trigger the debug exception. */
3376 gen_goto_tb(s
, 0, dest
);
3377 s
->is_jmp
= DISAS_TB_JUMP
;
3381 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3384 tcg_gen_sari_i32(t0
, t0
, 16);
3388 tcg_gen_sari_i32(t1
, t1
, 16);
3391 tcg_gen_mul_i32(t0
, t0
, t1
);
3394 /* Return the mask of PSR bits set by a MSR instruction. */
3395 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3399 if (flags
& (1 << 0))
3401 if (flags
& (1 << 1))
3403 if (flags
& (1 << 2))
3405 if (flags
& (1 << 3))
3408 /* Mask out undefined bits. */
3409 mask
&= ~CPSR_RESERVED
;
3410 if (!arm_feature(env
, ARM_FEATURE_V6
))
3411 mask
&= ~(CPSR_E
| CPSR_GE
);
3412 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3414 /* Mask out execution state bits. */
3417 /* Mask out privileged bits. */
3423 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3424 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3428 /* ??? This is also undefined in system mode. */
3432 tmp
= load_cpu_field(spsr
);
3433 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3434 tcg_gen_andi_i32(t0
, t0
, mask
);
3435 tcg_gen_or_i32(tmp
, tmp
, t0
);
3436 store_cpu_field(tmp
, spsr
);
3438 gen_set_cpsr(t0
, mask
);
3445 /* Returns nonzero if access to the PSR is not permitted. */
3446 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3450 tcg_gen_movi_i32(tmp
, val
);
3451 return gen_set_psr(s
, mask
, spsr
, tmp
);
3454 /* Generate an old-style exception return. Marks pc as dead. */
3455 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3458 store_reg(s
, 15, pc
);
3459 tmp
= load_cpu_field(spsr
);
3460 gen_set_cpsr(tmp
, 0xffffffff);
3462 s
->is_jmp
= DISAS_UPDATE
;
3465 /* Generate a v6 exception return. Marks both values as dead. */
3466 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3468 gen_set_cpsr(cpsr
, 0xffffffff);
3470 store_reg(s
, 15, pc
);
3471 s
->is_jmp
= DISAS_UPDATE
;
3475 gen_set_condexec (DisasContext
*s
)
3477 if (s
->condexec_mask
) {
3478 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3479 TCGv tmp
= new_tmp();
3480 tcg_gen_movi_i32(tmp
, val
);
3481 store_cpu_field(tmp
, condexec_bits
);
3485 static void gen_nop_hint(DisasContext
*s
, int val
)
3489 gen_set_pc_im(s
->pc
);
3490 s
->is_jmp
= DISAS_WFI
;
3494 /* TODO: Implement SEV and WFE. May help SMP performance. */
3500 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3502 static inline int gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3505 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3506 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3507 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3513 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3516 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3517 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3518 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3523 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3524 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3525 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3526 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3527 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3529 /* FIXME: This is wrong. They set the wrong overflow bit. */
3530 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3531 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3532 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3533 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3535 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3536 switch ((size << 1) | u) { \
3538 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3541 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3544 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3547 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3550 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3553 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3555 default: return 1; \
3558 #define GEN_NEON_INTEGER_OP(name) do { \
3559 switch ((size << 1) | u) { \
3561 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3564 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3567 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3570 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3573 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3576 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3578 default: return 1; \
3581 static TCGv
neon_load_scratch(int scratch
)
3583 TCGv tmp
= new_tmp();
3584 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3588 static void neon_store_scratch(int scratch
, TCGv var
)
3590 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3594 static inline TCGv
neon_get_scalar(int size
, int reg
)
3598 tmp
= neon_load_reg(reg
>> 1, reg
& 1);
3600 tmp
= neon_load_reg(reg
>> 2, (reg
>> 1) & 1);
3602 gen_neon_dup_low16(tmp
);
3604 gen_neon_dup_high16(tmp
);
3610 static void gen_neon_unzip_u8(TCGv t0
, TCGv t1
)
3618 tcg_gen_andi_i32(rd
, t0
, 0xff);
3619 tcg_gen_shri_i32(tmp
, t0
, 8);
3620 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3621 tcg_gen_or_i32(rd
, rd
, tmp
);
3622 tcg_gen_shli_i32(tmp
, t1
, 16);
3623 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3624 tcg_gen_or_i32(rd
, rd
, tmp
);
3625 tcg_gen_shli_i32(tmp
, t1
, 8);
3626 tcg_gen_andi_i32(tmp
, tmp
, 0xff000000);
3627 tcg_gen_or_i32(rd
, rd
, tmp
);
3629 tcg_gen_shri_i32(rm
, t0
, 8);
3630 tcg_gen_andi_i32(rm
, rm
, 0xff);
3631 tcg_gen_shri_i32(tmp
, t0
, 16);
3632 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3633 tcg_gen_or_i32(rm
, rm
, tmp
);
3634 tcg_gen_shli_i32(tmp
, t1
, 8);
3635 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3636 tcg_gen_or_i32(rm
, rm
, tmp
);
3637 tcg_gen_andi_i32(tmp
, t1
, 0xff000000);
3638 tcg_gen_or_i32(t1
, rm
, tmp
);
3639 tcg_gen_mov_i32(t0
, rd
);
3646 static void gen_neon_zip_u8(TCGv t0
, TCGv t1
)
3654 tcg_gen_andi_i32(rd
, t0
, 0xff);
3655 tcg_gen_shli_i32(tmp
, t1
, 8);
3656 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3657 tcg_gen_or_i32(rd
, rd
, tmp
);
3658 tcg_gen_shli_i32(tmp
, t0
, 16);
3659 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3660 tcg_gen_or_i32(rd
, rd
, tmp
);
3661 tcg_gen_shli_i32(tmp
, t1
, 24);
3662 tcg_gen_andi_i32(tmp
, tmp
, 0xff000000);
3663 tcg_gen_or_i32(rd
, rd
, tmp
);
3665 tcg_gen_andi_i32(rm
, t1
, 0xff000000);
3666 tcg_gen_shri_i32(tmp
, t0
, 8);
3667 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3668 tcg_gen_or_i32(rm
, rm
, tmp
);
3669 tcg_gen_shri_i32(tmp
, t1
, 8);
3670 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3671 tcg_gen_or_i32(rm
, rm
, tmp
);
3672 tcg_gen_shri_i32(tmp
, t0
, 16);
3673 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
3674 tcg_gen_or_i32(t1
, rm
, tmp
);
3675 tcg_gen_mov_i32(t0
, rd
);
3682 static void gen_neon_zip_u16(TCGv t0
, TCGv t1
)
3689 tcg_gen_andi_i32(tmp
, t0
, 0xffff);
3690 tcg_gen_shli_i32(tmp2
, t1
, 16);
3691 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3692 tcg_gen_andi_i32(t1
, t1
, 0xffff0000);
3693 tcg_gen_shri_i32(tmp2
, t0
, 16);
3694 tcg_gen_or_i32(t1
, t1
, tmp2
);
3695 tcg_gen_mov_i32(t0
, tmp
);
3701 static void gen_neon_unzip(int reg
, int q
, int tmp
, int size
)
3706 for (n
= 0; n
< q
+ 1; n
+= 2) {
3707 t0
= neon_load_reg(reg
, n
);
3708 t1
= neon_load_reg(reg
, n
+ 1);
3710 case 0: gen_neon_unzip_u8(t0
, t1
); break;
3711 case 1: gen_neon_zip_u16(t0
, t1
); break; /* zip and unzip are the same. */
3712 case 2: /* no-op */; break;
3715 neon_store_scratch(tmp
+ n
, t0
);
3716 neon_store_scratch(tmp
+ n
+ 1, t1
);
3720 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3727 tcg_gen_shli_i32(rd
, t0
, 8);
3728 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3729 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3730 tcg_gen_or_i32(rd
, rd
, tmp
);
3732 tcg_gen_shri_i32(t1
, t1
, 8);
3733 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3734 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3735 tcg_gen_or_i32(t1
, t1
, tmp
);
3736 tcg_gen_mov_i32(t0
, rd
);
3742 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3749 tcg_gen_shli_i32(rd
, t0
, 16);
3750 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3751 tcg_gen_or_i32(rd
, rd
, tmp
);
3752 tcg_gen_shri_i32(t1
, t1
, 16);
3753 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3754 tcg_gen_or_i32(t1
, t1
, tmp
);
3755 tcg_gen_mov_i32(t0
, rd
);
3766 } neon_ls_element_type
[11] = {
3780 /* Translate a NEON load/store element instruction. Return nonzero if the
3781 instruction is invalid. */
3782 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3801 if (!vfp_enabled(env
))
3803 VFP_DREG_D(rd
, insn
);
3804 rn
= (insn
>> 16) & 0xf;
3806 load
= (insn
& (1 << 21)) != 0;
3808 if ((insn
& (1 << 23)) == 0) {
3809 /* Load store all elements. */
3810 op
= (insn
>> 8) & 0xf;
3811 size
= (insn
>> 6) & 3;
3814 nregs
= neon_ls_element_type
[op
].nregs
;
3815 interleave
= neon_ls_element_type
[op
].interleave
;
3816 spacing
= neon_ls_element_type
[op
].spacing
;
3817 if (size
== 3 && (interleave
| spacing
) != 1)
3819 load_reg_var(s
, addr
, rn
);
3820 stride
= (1 << size
) * interleave
;
3821 for (reg
= 0; reg
< nregs
; reg
++) {
3822 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3823 load_reg_var(s
, addr
, rn
);
3824 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3825 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3826 load_reg_var(s
, addr
, rn
);
3827 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3831 tmp64
= gen_ld64(addr
, IS_USER(s
));
3832 neon_store_reg64(tmp64
, rd
);
3833 tcg_temp_free_i64(tmp64
);
3835 tmp64
= tcg_temp_new_i64();
3836 neon_load_reg64(tmp64
, rd
);
3837 gen_st64(tmp64
, addr
, IS_USER(s
));
3839 tcg_gen_addi_i32(addr
, addr
, stride
);
3841 for (pass
= 0; pass
< 2; pass
++) {
3844 tmp
= gen_ld32(addr
, IS_USER(s
));
3845 neon_store_reg(rd
, pass
, tmp
);
3847 tmp
= neon_load_reg(rd
, pass
);
3848 gen_st32(tmp
, addr
, IS_USER(s
));
3850 tcg_gen_addi_i32(addr
, addr
, stride
);
3851 } else if (size
== 1) {
3853 tmp
= gen_ld16u(addr
, IS_USER(s
));
3854 tcg_gen_addi_i32(addr
, addr
, stride
);
3855 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3856 tcg_gen_addi_i32(addr
, addr
, stride
);
3857 gen_bfi(tmp
, tmp
, tmp2
, 16, 0xffff);
3859 neon_store_reg(rd
, pass
, tmp
);
3861 tmp
= neon_load_reg(rd
, pass
);
3863 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3864 gen_st16(tmp
, addr
, IS_USER(s
));
3865 tcg_gen_addi_i32(addr
, addr
, stride
);
3866 gen_st16(tmp2
, addr
, IS_USER(s
));
3867 tcg_gen_addi_i32(addr
, addr
, stride
);
3869 } else /* size == 0 */ {
3872 for (n
= 0; n
< 4; n
++) {
3873 tmp
= gen_ld8u(addr
, IS_USER(s
));
3874 tcg_gen_addi_i32(addr
, addr
, stride
);
3878 gen_bfi(tmp2
, tmp2
, tmp
, n
* 8, 0xff);
3882 neon_store_reg(rd
, pass
, tmp2
);
3884 tmp2
= neon_load_reg(rd
, pass
);
3885 for (n
= 0; n
< 4; n
++) {
3888 tcg_gen_mov_i32(tmp
, tmp2
);
3890 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3892 gen_st8(tmp
, addr
, IS_USER(s
));
3893 tcg_gen_addi_i32(addr
, addr
, stride
);
3904 size
= (insn
>> 10) & 3;
3906 /* Load single element to all lanes. */
3909 size
= (insn
>> 6) & 3;
3910 nregs
= ((insn
>> 8) & 3) + 1;
3911 stride
= (insn
& (1 << 5)) ? 2 : 1;
3912 load_reg_var(s
, addr
, rn
);
3913 for (reg
= 0; reg
< nregs
; reg
++) {
3916 tmp
= gen_ld8u(addr
, IS_USER(s
));
3917 gen_neon_dup_u8(tmp
, 0);
3920 tmp
= gen_ld16u(addr
, IS_USER(s
));
3921 gen_neon_dup_low16(tmp
);
3924 tmp
= gen_ld32(addr
, IS_USER(s
));
3928 default: /* Avoid compiler warnings. */
3931 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3933 tcg_gen_mov_i32(tmp2
, tmp
);
3934 neon_store_reg(rd
, 0, tmp2
);
3935 neon_store_reg(rd
, 1, tmp
);
3938 stride
= (1 << size
) * nregs
;
3940 /* Single element. */
3941 pass
= (insn
>> 7) & 1;
3944 shift
= ((insn
>> 5) & 3) * 8;
3948 shift
= ((insn
>> 6) & 1) * 16;
3949 stride
= (insn
& (1 << 5)) ? 2 : 1;
3953 stride
= (insn
& (1 << 6)) ? 2 : 1;
3958 nregs
= ((insn
>> 8) & 3) + 1;
3959 load_reg_var(s
, addr
, rn
);
3960 for (reg
= 0; reg
< nregs
; reg
++) {
3964 tmp
= gen_ld8u(addr
, IS_USER(s
));
3967 tmp
= gen_ld16u(addr
, IS_USER(s
));
3970 tmp
= gen_ld32(addr
, IS_USER(s
));
3972 default: /* Avoid compiler warnings. */
3976 tmp2
= neon_load_reg(rd
, pass
);
3977 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
3980 neon_store_reg(rd
, pass
, tmp
);
3981 } else { /* Store */
3982 tmp
= neon_load_reg(rd
, pass
);
3984 tcg_gen_shri_i32(tmp
, tmp
, shift
);
3987 gen_st8(tmp
, addr
, IS_USER(s
));
3990 gen_st16(tmp
, addr
, IS_USER(s
));
3993 gen_st32(tmp
, addr
, IS_USER(s
));
3998 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4000 stride
= nregs
* (1 << size
);
4007 base
= load_reg(s
, rn
);
4009 tcg_gen_addi_i32(base
, base
, stride
);
4012 index
= load_reg(s
, rm
);
4013 tcg_gen_add_i32(base
, base
, index
);
4016 store_reg(s
, rn
, base
);
4021 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4022 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4024 tcg_gen_and_i32(t
, t
, c
);
4025 tcg_gen_andc_i32(f
, f
, c
);
4026 tcg_gen_or_i32(dest
, t
, f
);
4029 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4032 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4033 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4034 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4039 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4042 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4043 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4044 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4049 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4052 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4053 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4054 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4059 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4065 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4066 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4071 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4072 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4079 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4080 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4085 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4086 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4093 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4097 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4098 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4099 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4104 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4105 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4106 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4113 static inline void gen_neon_addl(int size
)
4116 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4117 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4118 case 2: tcg_gen_add_i64(CPU_V001
); break;
4123 static inline void gen_neon_subl(int size
)
4126 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4127 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4128 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4133 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4136 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4137 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4138 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4143 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4146 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4147 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4152 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4156 switch ((size
<< 1) | u
) {
4157 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4158 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4159 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4160 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4162 tmp
= gen_muls_i64_i32(a
, b
);
4163 tcg_gen_mov_i64(dest
, tmp
);
4166 tmp
= gen_mulu_i64_i32(a
, b
);
4167 tcg_gen_mov_i64(dest
, tmp
);
4173 /* Translate a NEON data processing instruction. Return nonzero if the
4174 instruction is invalid.
4175 We process data in a mixture of 32-bit and 64-bit chunks.
4176 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4178 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4191 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4194 if (!vfp_enabled(env
))
4196 q
= (insn
& (1 << 6)) != 0;
4197 u
= (insn
>> 24) & 1;
4198 VFP_DREG_D(rd
, insn
);
4199 VFP_DREG_N(rn
, insn
);
4200 VFP_DREG_M(rm
, insn
);
4201 size
= (insn
>> 20) & 3;
4202 if ((insn
& (1 << 23)) == 0) {
4203 /* Three register same length. */
4204 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4205 if (size
== 3 && (op
== 1 || op
== 5 || op
== 8 || op
== 9
4206 || op
== 10 || op
== 11 || op
== 16)) {
4207 /* 64-bit element instructions. */
4208 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4209 neon_load_reg64(cpu_V0
, rn
+ pass
);
4210 neon_load_reg64(cpu_V1
, rm
+ pass
);
4214 gen_helper_neon_add_saturate_u64(CPU_V001
);
4216 gen_helper_neon_add_saturate_s64(CPU_V001
);
4221 gen_helper_neon_sub_saturate_u64(CPU_V001
);
4223 gen_helper_neon_sub_saturate_s64(CPU_V001
);
4228 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4230 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4235 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4238 gen_helper_neon_qshl_s64(cpu_V1
, cpu_env
,
4242 case 10: /* VRSHL */
4244 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4246 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4249 case 11: /* VQRSHL */
4251 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4254 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4260 tcg_gen_sub_i64(CPU_V001
);
4262 tcg_gen_add_i64(CPU_V001
);
4268 neon_store_reg64(cpu_V0
, rd
+ pass
);
4275 case 10: /* VRSHL */
4276 case 11: /* VQRSHL */
4279 /* Shift instruction operands are reversed. */
4286 case 20: /* VPMAX */
4287 case 21: /* VPMIN */
4288 case 23: /* VPADD */
4291 case 26: /* VPADD (float) */
4292 pairwise
= (u
&& size
< 2);
4294 case 30: /* VPMIN/VPMAX (float) */
4302 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4311 tmp
= neon_load_reg(rn
, n
);
4312 tmp2
= neon_load_reg(rn
, n
+ 1);
4314 tmp
= neon_load_reg(rm
, n
);
4315 tmp2
= neon_load_reg(rm
, n
+ 1);
4319 tmp
= neon_load_reg(rn
, pass
);
4320 tmp2
= neon_load_reg(rm
, pass
);
4324 GEN_NEON_INTEGER_OP(hadd
);
4327 GEN_NEON_INTEGER_OP_ENV(qadd
);
4329 case 2: /* VRHADD */
4330 GEN_NEON_INTEGER_OP(rhadd
);
4332 case 3: /* Logic ops. */
4333 switch ((u
<< 2) | size
) {
4335 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4338 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4341 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4344 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4347 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4350 tmp3
= neon_load_reg(rd
, pass
);
4351 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4355 tmp3
= neon_load_reg(rd
, pass
);
4356 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4360 tmp3
= neon_load_reg(rd
, pass
);
4361 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4367 GEN_NEON_INTEGER_OP(hsub
);
4370 GEN_NEON_INTEGER_OP_ENV(qsub
);
4373 GEN_NEON_INTEGER_OP(cgt
);
4376 GEN_NEON_INTEGER_OP(cge
);
4379 GEN_NEON_INTEGER_OP(shl
);
4382 GEN_NEON_INTEGER_OP_ENV(qshl
);
4384 case 10: /* VRSHL */
4385 GEN_NEON_INTEGER_OP(rshl
);
4387 case 11: /* VQRSHL */
4388 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4391 GEN_NEON_INTEGER_OP(max
);
4394 GEN_NEON_INTEGER_OP(min
);
4397 GEN_NEON_INTEGER_OP(abd
);
4400 GEN_NEON_INTEGER_OP(abd
);
4402 tmp2
= neon_load_reg(rd
, pass
);
4403 gen_neon_add(size
, tmp
, tmp2
);
4406 if (!u
) { /* VADD */
4407 if (gen_neon_add(size
, tmp
, tmp2
))
4411 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4412 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4413 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4419 if (!u
) { /* VTST */
4421 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4422 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4423 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4428 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4429 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4430 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4435 case 18: /* Multiply. */
4437 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4438 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4439 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4443 tmp2
= neon_load_reg(rd
, pass
);
4445 gen_neon_rsb(size
, tmp
, tmp2
);
4447 gen_neon_add(size
, tmp
, tmp2
);
4451 if (u
) { /* polynomial */
4452 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4453 } else { /* Integer */
4455 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4456 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4457 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4462 case 20: /* VPMAX */
4463 GEN_NEON_INTEGER_OP(pmax
);
4465 case 21: /* VPMIN */
4466 GEN_NEON_INTEGER_OP(pmin
);
4468 case 22: /* Hultiply high. */
4469 if (!u
) { /* VQDMULH */
4471 case 1: gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
); break;
4472 case 2: gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
); break;
4475 } else { /* VQRDHMUL */
4477 case 1: gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
); break;
4478 case 2: gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
); break;
4483 case 23: /* VPADD */
4487 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4488 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4489 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4493 case 26: /* Floating point arithnetic. */
4494 switch ((u
<< 2) | size
) {
4496 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4499 gen_helper_neon_sub_f32(tmp
, tmp
, tmp2
);
4502 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4505 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
);
4511 case 27: /* Float multiply. */
4512 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
4515 tmp2
= neon_load_reg(rd
, pass
);
4517 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4519 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
4523 case 28: /* Float compare. */
4525 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
4528 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
4530 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
4533 case 29: /* Float compare absolute. */
4537 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
);
4539 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
);
4541 case 30: /* Float min/max. */
4543 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
);
4545 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
);
4549 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4551 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4558 /* Save the result. For elementwise operations we can put it
4559 straight into the destination register. For pairwise operations
4560 we have to be careful to avoid clobbering the source operands. */
4561 if (pairwise
&& rd
== rm
) {
4562 neon_store_scratch(pass
, tmp
);
4564 neon_store_reg(rd
, pass
, tmp
);
4568 if (pairwise
&& rd
== rm
) {
4569 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4570 tmp
= neon_load_scratch(pass
);
4571 neon_store_reg(rd
, pass
, tmp
);
4574 /* End of 3 register same size operations. */
4575 } else if (insn
& (1 << 4)) {
4576 if ((insn
& 0x00380080) != 0) {
4577 /* Two registers and shift. */
4578 op
= (insn
>> 8) & 0xf;
4579 if (insn
& (1 << 7)) {
4584 while ((insn
& (1 << (size
+ 19))) == 0)
4587 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4588 /* To avoid excessive dumplication of ops we implement shift
4589 by immediate using the variable shift operations. */
4591 /* Shift by immediate:
4592 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4593 /* Right shifts are encoded as N - shift, where N is the
4594 element size in bits. */
4596 shift
= shift
- (1 << (size
+ 3));
4604 imm
= (uint8_t) shift
;
4609 imm
= (uint16_t) shift
;
4620 for (pass
= 0; pass
< count
; pass
++) {
4622 neon_load_reg64(cpu_V0
, rm
+ pass
);
4623 tcg_gen_movi_i64(cpu_V1
, imm
);
4628 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4630 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4635 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4637 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4642 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4644 case 5: /* VSHL, VSLI */
4645 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4649 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4651 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4653 case 7: /* VQSHLU */
4654 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4657 if (op
== 1 || op
== 3) {
4659 neon_load_reg64(cpu_V0
, rd
+ pass
);
4660 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4661 } else if (op
== 4 || (op
== 5 && u
)) {
4663 cpu_abort(env
, "VS[LR]I.64 not implemented");
4665 neon_store_reg64(cpu_V0
, rd
+ pass
);
4666 } else { /* size < 3 */
4667 /* Operands in T0 and T1. */
4668 tmp
= neon_load_reg(rm
, pass
);
4670 tcg_gen_movi_i32(tmp2
, imm
);
4674 GEN_NEON_INTEGER_OP(shl
);
4678 GEN_NEON_INTEGER_OP(rshl
);
4683 GEN_NEON_INTEGER_OP(shl
);
4685 case 5: /* VSHL, VSLI */
4687 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
4688 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
4689 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
4694 GEN_NEON_INTEGER_OP_ENV(qshl
);
4696 case 7: /* VQSHLU */
4698 case 0: gen_helper_neon_qshl_u8(tmp
, cpu_env
, tmp
, tmp2
); break;
4699 case 1: gen_helper_neon_qshl_u16(tmp
, cpu_env
, tmp
, tmp2
); break;
4700 case 2: gen_helper_neon_qshl_u32(tmp
, cpu_env
, tmp
, tmp2
); break;
4707 if (op
== 1 || op
== 3) {
4709 tmp2
= neon_load_reg(rd
, pass
);
4710 gen_neon_add(size
, tmp2
, tmp
);
4712 } else if (op
== 4 || (op
== 5 && u
)) {
4717 mask
= 0xff >> -shift
;
4719 mask
= (uint8_t)(0xff << shift
);
4725 mask
= 0xffff >> -shift
;
4727 mask
= (uint16_t)(0xffff << shift
);
4731 if (shift
< -31 || shift
> 31) {
4735 mask
= 0xffffffffu
>> -shift
;
4737 mask
= 0xffffffffu
<< shift
;
4743 tmp2
= neon_load_reg(rd
, pass
);
4744 tcg_gen_andi_i32(tmp
, tmp
, mask
);
4745 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
4746 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4749 neon_store_reg(rd
, pass
, tmp
);
4752 } else if (op
< 10) {
4753 /* Shift by immediate and narrow:
4754 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4755 shift
= shift
- (1 << (size
+ 3));
4759 imm
= (uint16_t)shift
;
4761 tmp2
= tcg_const_i32(imm
);
4762 TCGV_UNUSED_I64(tmp64
);
4765 imm
= (uint32_t)shift
;
4766 tmp2
= tcg_const_i32(imm
);
4767 TCGV_UNUSED_I64(tmp64
);
4770 tmp64
= tcg_const_i64(shift
);
4777 for (pass
= 0; pass
< 2; pass
++) {
4779 neon_load_reg64(cpu_V0
, rm
+ pass
);
4782 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, tmp64
);
4784 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, tmp64
);
4787 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, tmp64
);
4789 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, tmp64
);
4792 tmp
= neon_load_reg(rm
+ pass
, 0);
4793 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
, u
);
4794 tmp3
= neon_load_reg(rm
+ pass
, 1);
4795 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
, u
);
4796 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
4801 if (op
== 8 && !u
) {
4802 gen_neon_narrow(size
- 1, tmp
, cpu_V0
);
4805 gen_neon_narrow_sats(size
- 1, tmp
, cpu_V0
);
4807 gen_neon_narrow_satu(size
- 1, tmp
, cpu_V0
);
4809 neon_store_reg(rd
, pass
, tmp
);
4812 tcg_temp_free_i64(tmp64
);
4816 } else if (op
== 10) {
4820 tmp
= neon_load_reg(rm
, 0);
4821 tmp2
= neon_load_reg(rm
, 1);
4822 for (pass
= 0; pass
< 2; pass
++) {
4826 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4829 /* The shift is less than the width of the source
4830 type, so we can just shift the whole register. */
4831 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
4832 if (size
< 2 || !u
) {
4835 imm
= (0xffu
>> (8 - shift
));
4838 imm
= 0xffff >> (16 - shift
);
4840 imm64
= imm
| (((uint64_t)imm
) << 32);
4841 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, imm64
);
4844 neon_store_reg64(cpu_V0
, rd
+ pass
);
4846 } else if (op
== 15 || op
== 16) {
4847 /* VCVT fixed-point. */
4848 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4849 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
4852 gen_vfp_ulto(0, shift
);
4854 gen_vfp_slto(0, shift
);
4857 gen_vfp_toul(0, shift
);
4859 gen_vfp_tosl(0, shift
);
4861 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
4866 } else { /* (insn & 0x00380080) == 0 */
4869 op
= (insn
>> 8) & 0xf;
4870 /* One register and immediate. */
4871 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
4872 invert
= (insn
& (1 << 5)) != 0;
4890 imm
= (imm
<< 8) | (imm
<< 24);
4893 imm
= (imm
<< 8) | 0xff;
4896 imm
= (imm
<< 16) | 0xffff;
4899 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
4904 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
4905 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
4911 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4912 if (op
& 1 && op
< 12) {
4913 tmp
= neon_load_reg(rd
, pass
);
4915 /* The immediate value has already been inverted, so
4917 tcg_gen_andi_i32(tmp
, tmp
, imm
);
4919 tcg_gen_ori_i32(tmp
, tmp
, imm
);
4924 if (op
== 14 && invert
) {
4927 for (n
= 0; n
< 4; n
++) {
4928 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
4929 val
|= 0xff << (n
* 8);
4931 tcg_gen_movi_i32(tmp
, val
);
4933 tcg_gen_movi_i32(tmp
, imm
);
4936 neon_store_reg(rd
, pass
, tmp
);
4939 } else { /* (insn & 0x00800010 == 0x00800000) */
4941 op
= (insn
>> 8) & 0xf;
4942 if ((insn
& (1 << 6)) == 0) {
4943 /* Three registers of different lengths. */
4947 /* prewiden, src1_wide, src2_wide */
4948 static const int neon_3reg_wide
[16][3] = {
4949 {1, 0, 0}, /* VADDL */
4950 {1, 1, 0}, /* VADDW */
4951 {1, 0, 0}, /* VSUBL */
4952 {1, 1, 0}, /* VSUBW */
4953 {0, 1, 1}, /* VADDHN */
4954 {0, 0, 0}, /* VABAL */
4955 {0, 1, 1}, /* VSUBHN */
4956 {0, 0, 0}, /* VABDL */
4957 {0, 0, 0}, /* VMLAL */
4958 {0, 0, 0}, /* VQDMLAL */
4959 {0, 0, 0}, /* VMLSL */
4960 {0, 0, 0}, /* VQDMLSL */
4961 {0, 0, 0}, /* Integer VMULL */
4962 {0, 0, 0}, /* VQDMULL */
4963 {0, 0, 0} /* Polynomial VMULL */
4966 prewiden
= neon_3reg_wide
[op
][0];
4967 src1_wide
= neon_3reg_wide
[op
][1];
4968 src2_wide
= neon_3reg_wide
[op
][2];
4970 if (size
== 0 && (op
== 9 || op
== 11 || op
== 13))
4973 /* Avoid overlapping operands. Wide source operands are
4974 always aligned so will never overlap with wide
4975 destinations in problematic ways. */
4976 if (rd
== rm
&& !src2_wide
) {
4977 tmp
= neon_load_reg(rm
, 1);
4978 neon_store_scratch(2, tmp
);
4979 } else if (rd
== rn
&& !src1_wide
) {
4980 tmp
= neon_load_reg(rn
, 1);
4981 neon_store_scratch(2, tmp
);
4984 for (pass
= 0; pass
< 2; pass
++) {
4986 neon_load_reg64(cpu_V0
, rn
+ pass
);
4989 if (pass
== 1 && rd
== rn
) {
4990 tmp
= neon_load_scratch(2);
4992 tmp
= neon_load_reg(rn
, pass
);
4995 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4999 neon_load_reg64(cpu_V1
, rm
+ pass
);
5002 if (pass
== 1 && rd
== rm
) {
5003 tmp2
= neon_load_scratch(2);
5005 tmp2
= neon_load_reg(rm
, pass
);
5008 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5012 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5013 gen_neon_addl(size
);
5015 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5016 gen_neon_subl(size
);
5018 case 5: case 7: /* VABAL, VABDL */
5019 switch ((size
<< 1) | u
) {
5021 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5024 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5027 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5030 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5033 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5036 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5043 case 8: case 9: case 10: case 11: case 12: case 13:
5044 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5045 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5049 case 14: /* Polynomial VMULL */
5050 cpu_abort(env
, "Polynomial VMULL not implemented");
5052 default: /* 15 is RESERVED. */
5055 if (op
== 5 || op
== 13 || (op
>= 8 && op
<= 11)) {
5057 if (op
== 10 || op
== 11) {
5058 gen_neon_negl(cpu_V0
, size
);
5062 neon_load_reg64(cpu_V1
, rd
+ pass
);
5066 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
5067 gen_neon_addl(size
);
5069 case 9: case 11: /* VQDMLAL, VQDMLSL */
5070 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5071 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5074 case 13: /* VQDMULL */
5075 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5080 neon_store_reg64(cpu_V0
, rd
+ pass
);
5081 } else if (op
== 4 || op
== 6) {
5082 /* Narrowing operation. */
5087 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5090 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5093 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5094 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5101 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5104 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5107 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5108 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5109 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5117 neon_store_reg(rd
, 0, tmp3
);
5118 neon_store_reg(rd
, 1, tmp
);
5121 /* Write back the result. */
5122 neon_store_reg64(cpu_V0
, rd
+ pass
);
5126 /* Two registers and a scalar. */
5128 case 0: /* Integer VMLA scalar */
5129 case 1: /* Float VMLA scalar */
5130 case 4: /* Integer VMLS scalar */
5131 case 5: /* Floating point VMLS scalar */
5132 case 8: /* Integer VMUL scalar */
5133 case 9: /* Floating point VMUL scalar */
5134 case 12: /* VQDMULH scalar */
5135 case 13: /* VQRDMULH scalar */
5136 tmp
= neon_get_scalar(size
, rm
);
5137 neon_store_scratch(0, tmp
);
5138 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5139 tmp
= neon_load_scratch(0);
5140 tmp2
= neon_load_reg(rn
, pass
);
5143 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5145 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5147 } else if (op
== 13) {
5149 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5151 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5153 } else if (op
& 1) {
5154 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
5157 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5158 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5159 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5166 tmp2
= neon_load_reg(rd
, pass
);
5169 gen_neon_add(size
, tmp
, tmp2
);
5172 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
5175 gen_neon_rsb(size
, tmp
, tmp2
);
5178 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
5185 neon_store_reg(rd
, pass
, tmp
);
5188 case 2: /* VMLAL sclar */
5189 case 3: /* VQDMLAL scalar */
5190 case 6: /* VMLSL scalar */
5191 case 7: /* VQDMLSL scalar */
5192 case 10: /* VMULL scalar */
5193 case 11: /* VQDMULL scalar */
5194 if (size
== 0 && (op
== 3 || op
== 7 || op
== 11))
5197 tmp2
= neon_get_scalar(size
, rm
);
5198 tmp3
= neon_load_reg(rn
, 1);
5200 for (pass
= 0; pass
< 2; pass
++) {
5202 tmp
= neon_load_reg(rn
, 0);
5206 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5208 if (op
== 6 || op
== 7) {
5209 gen_neon_negl(cpu_V0
, size
);
5212 neon_load_reg64(cpu_V1
, rd
+ pass
);
5216 gen_neon_addl(size
);
5219 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5220 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5226 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5231 neon_store_reg64(cpu_V0
, rd
+ pass
);
5237 default: /* 14 and 15 are RESERVED */
5241 } else { /* size == 3 */
5244 imm
= (insn
>> 8) & 0xf;
5251 neon_load_reg64(cpu_V0
, rn
);
5253 neon_load_reg64(cpu_V1
, rn
+ 1);
5255 } else if (imm
== 8) {
5256 neon_load_reg64(cpu_V0
, rn
+ 1);
5258 neon_load_reg64(cpu_V1
, rm
);
5261 tmp64
= tcg_temp_new_i64();
5263 neon_load_reg64(cpu_V0
, rn
);
5264 neon_load_reg64(tmp64
, rn
+ 1);
5266 neon_load_reg64(cpu_V0
, rn
+ 1);
5267 neon_load_reg64(tmp64
, rm
);
5269 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5270 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5271 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5273 neon_load_reg64(cpu_V1
, rm
);
5275 neon_load_reg64(cpu_V1
, rm
+ 1);
5278 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5279 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5280 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5281 tcg_temp_free_i64(tmp64
);
5284 neon_load_reg64(cpu_V0
, rn
);
5285 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5286 neon_load_reg64(cpu_V1
, rm
);
5287 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5288 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5290 neon_store_reg64(cpu_V0
, rd
);
5292 neon_store_reg64(cpu_V1
, rd
+ 1);
5294 } else if ((insn
& (1 << 11)) == 0) {
5295 /* Two register misc. */
5296 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5297 size
= (insn
>> 18) & 3;
5299 case 0: /* VREV64 */
5302 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5303 tmp
= neon_load_reg(rm
, pass
* 2);
5304 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5306 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5307 case 1: gen_swap_half(tmp
); break;
5308 case 2: /* no-op */ break;
5311 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5313 neon_store_reg(rd
, pass
* 2, tmp2
);
5316 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5317 case 1: gen_swap_half(tmp2
); break;
5320 neon_store_reg(rd
, pass
* 2, tmp2
);
5324 case 4: case 5: /* VPADDL */
5325 case 12: case 13: /* VPADAL */
5328 for (pass
= 0; pass
< q
+ 1; pass
++) {
5329 tmp
= neon_load_reg(rm
, pass
* 2);
5330 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5331 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5332 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5334 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5335 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5336 case 2: tcg_gen_add_i64(CPU_V001
); break;
5341 neon_load_reg64(cpu_V1
, rd
+ pass
);
5342 gen_neon_addl(size
);
5344 neon_store_reg64(cpu_V0
, rd
+ pass
);
5349 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5350 tmp
= neon_load_reg(rm
, n
);
5351 tmp2
= neon_load_reg(rd
, n
+ 1);
5352 neon_store_reg(rm
, n
, tmp2
);
5353 neon_store_reg(rd
, n
+ 1, tmp
);
5361 Rd A3 A2 A1 A0 B2 B0 A2 A0
5362 Rm B3 B2 B1 B0 B3 B1 A3 A1
5366 gen_neon_unzip(rd
, q
, 0, size
);
5367 gen_neon_unzip(rm
, q
, 4, size
);
5369 static int unzip_order_q
[8] =
5370 {0, 2, 4, 6, 1, 3, 5, 7};
5371 for (n
= 0; n
< 8; n
++) {
5372 int reg
= (n
< 4) ? rd
: rm
;
5373 tmp
= neon_load_scratch(unzip_order_q
[n
]);
5374 neon_store_reg(reg
, n
% 4, tmp
);
5377 static int unzip_order
[4] =
5379 for (n
= 0; n
< 4; n
++) {
5380 int reg
= (n
< 2) ? rd
: rm
;
5381 tmp
= neon_load_scratch(unzip_order
[n
]);
5382 neon_store_reg(reg
, n
% 2, tmp
);
5388 Rd A3 A2 A1 A0 B1 A1 B0 A0
5389 Rm B3 B2 B1 B0 B3 A3 B2 A2
5393 count
= (q
? 4 : 2);
5394 for (n
= 0; n
< count
; n
++) {
5395 tmp
= neon_load_reg(rd
, n
);
5396 tmp2
= neon_load_reg(rd
, n
);
5398 case 0: gen_neon_zip_u8(tmp
, tmp2
); break;
5399 case 1: gen_neon_zip_u16(tmp
, tmp2
); break;
5400 case 2: /* no-op */; break;
5403 neon_store_scratch(n
* 2, tmp
);
5404 neon_store_scratch(n
* 2 + 1, tmp2
);
5406 for (n
= 0; n
< count
* 2; n
++) {
5407 int reg
= (n
< count
) ? rd
: rm
;
5408 tmp
= neon_load_scratch(n
);
5409 neon_store_reg(reg
, n
% count
, tmp
);
5412 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5416 for (pass
= 0; pass
< 2; pass
++) {
5417 neon_load_reg64(cpu_V0
, rm
+ pass
);
5419 if (op
== 36 && q
== 0) {
5420 gen_neon_narrow(size
, tmp
, cpu_V0
);
5422 gen_neon_narrow_satu(size
, tmp
, cpu_V0
);
5424 gen_neon_narrow_sats(size
, tmp
, cpu_V0
);
5429 neon_store_reg(rd
, 0, tmp2
);
5430 neon_store_reg(rd
, 1, tmp
);
5434 case 38: /* VSHLL */
5437 tmp
= neon_load_reg(rm
, 0);
5438 tmp2
= neon_load_reg(rm
, 1);
5439 for (pass
= 0; pass
< 2; pass
++) {
5442 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5443 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5444 neon_store_reg64(cpu_V0
, rd
+ pass
);
5447 case 44: /* VCVT.F16.F32 */
5448 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5452 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5453 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5454 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5455 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5456 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5457 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5458 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5459 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5460 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5461 neon_store_reg(rd
, 0, tmp2
);
5463 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5464 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5465 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5466 neon_store_reg(rd
, 1, tmp2
);
5469 case 46: /* VCVT.F32.F16 */
5470 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5473 tmp
= neon_load_reg(rm
, 0);
5474 tmp2
= neon_load_reg(rm
, 1);
5475 tcg_gen_ext16u_i32(tmp3
, tmp
);
5476 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5477 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5478 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5479 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5480 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5482 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5483 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5484 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5485 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5486 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5487 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5493 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5494 if (op
== 30 || op
== 31 || op
>= 58) {
5495 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5496 neon_reg_offset(rm
, pass
));
5499 tmp
= neon_load_reg(rm
, pass
);
5502 case 1: /* VREV32 */
5504 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5505 case 1: gen_swap_half(tmp
); break;
5509 case 2: /* VREV16 */
5516 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5517 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5518 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5524 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5525 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5526 case 2: gen_helper_clz(tmp
, tmp
); break;
5533 gen_helper_neon_cnt_u8(tmp
, tmp
);
5538 tcg_gen_not_i32(tmp
, tmp
);
5540 case 14: /* VQABS */
5542 case 0: gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
); break;
5543 case 1: gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
); break;
5544 case 2: gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
); break;
5548 case 15: /* VQNEG */
5550 case 0: gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
); break;
5551 case 1: gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
); break;
5552 case 2: gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
); break;
5556 case 16: case 19: /* VCGT #0, VCLE #0 */
5557 tmp2
= tcg_const_i32(0);
5559 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5560 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5561 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
5564 tcg_temp_free(tmp2
);
5566 tcg_gen_not_i32(tmp
, tmp
);
5568 case 17: case 20: /* VCGE #0, VCLT #0 */
5569 tmp2
= tcg_const_i32(0);
5571 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
5572 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
5573 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
5576 tcg_temp_free(tmp2
);
5578 tcg_gen_not_i32(tmp
, tmp
);
5580 case 18: /* VCEQ #0 */
5581 tmp2
= tcg_const_i32(0);
5583 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5584 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5585 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5588 tcg_temp_free(tmp2
);
5592 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
5593 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
5594 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
5601 tmp2
= tcg_const_i32(0);
5602 gen_neon_rsb(size
, tmp
, tmp2
);
5603 tcg_temp_free(tmp2
);
5605 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5606 tmp2
= tcg_const_i32(0);
5607 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
5608 tcg_temp_free(tmp2
);
5610 tcg_gen_not_i32(tmp
, tmp
);
5612 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5613 tmp2
= tcg_const_i32(0);
5614 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
5615 tcg_temp_free(tmp2
);
5617 tcg_gen_not_i32(tmp
, tmp
);
5619 case 26: /* Float VCEQ #0 */
5620 tmp2
= tcg_const_i32(0);
5621 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
5622 tcg_temp_free(tmp2
);
5624 case 30: /* Float VABS */
5627 case 31: /* Float VNEG */
5631 tmp2
= neon_load_reg(rd
, pass
);
5632 neon_store_reg(rm
, pass
, tmp2
);
5635 tmp2
= neon_load_reg(rd
, pass
);
5637 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
5638 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
5642 neon_store_reg(rm
, pass
, tmp2
);
5644 case 56: /* Integer VRECPE */
5645 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
5647 case 57: /* Integer VRSQRTE */
5648 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
5650 case 58: /* Float VRECPE */
5651 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5653 case 59: /* Float VRSQRTE */
5654 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5656 case 60: /* VCVT.F32.S32 */
5659 case 61: /* VCVT.F32.U32 */
5662 case 62: /* VCVT.S32.F32 */
5665 case 63: /* VCVT.U32.F32 */
5669 /* Reserved: 21, 29, 39-56 */
5672 if (op
== 30 || op
== 31 || op
>= 58) {
5673 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
5674 neon_reg_offset(rd
, pass
));
5676 neon_store_reg(rd
, pass
, tmp
);
5681 } else if ((insn
& (1 << 10)) == 0) {
5683 n
= ((insn
>> 5) & 0x18) + 8;
5684 if (insn
& (1 << 6)) {
5685 tmp
= neon_load_reg(rd
, 0);
5688 tcg_gen_movi_i32(tmp
, 0);
5690 tmp2
= neon_load_reg(rm
, 0);
5691 tmp4
= tcg_const_i32(rn
);
5692 tmp5
= tcg_const_i32(n
);
5693 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
5695 if (insn
& (1 << 6)) {
5696 tmp
= neon_load_reg(rd
, 1);
5699 tcg_gen_movi_i32(tmp
, 0);
5701 tmp3
= neon_load_reg(rm
, 1);
5702 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
5703 tcg_temp_free_i32(tmp5
);
5704 tcg_temp_free_i32(tmp4
);
5705 neon_store_reg(rd
, 0, tmp2
);
5706 neon_store_reg(rd
, 1, tmp3
);
5708 } else if ((insn
& 0x380) == 0) {
5710 if (insn
& (1 << 19)) {
5711 tmp
= neon_load_reg(rm
, 1);
5713 tmp
= neon_load_reg(rm
, 0);
5715 if (insn
& (1 << 16)) {
5716 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
5717 } else if (insn
& (1 << 17)) {
5718 if ((insn
>> 18) & 1)
5719 gen_neon_dup_high16(tmp
);
5721 gen_neon_dup_low16(tmp
);
5723 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5725 tcg_gen_mov_i32(tmp2
, tmp
);
5726 neon_store_reg(rd
, pass
, tmp2
);
5737 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5739 int crn
= (insn
>> 16) & 0xf;
5740 int crm
= insn
& 0xf;
5741 int op1
= (insn
>> 21) & 7;
5742 int op2
= (insn
>> 5) & 7;
5743 int rt
= (insn
>> 12) & 0xf;
5746 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5747 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5751 tmp
= load_cpu_field(teecr
);
5752 store_reg(s
, rt
, tmp
);
5755 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5757 if (IS_USER(s
) && (env
->teecr
& 1))
5759 tmp
= load_cpu_field(teehbr
);
5760 store_reg(s
, rt
, tmp
);
5764 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5765 op1
, crn
, crm
, op2
);
5769 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5771 int crn
= (insn
>> 16) & 0xf;
5772 int crm
= insn
& 0xf;
5773 int op1
= (insn
>> 21) & 7;
5774 int op2
= (insn
>> 5) & 7;
5775 int rt
= (insn
>> 12) & 0xf;
5778 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5779 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5783 tmp
= load_reg(s
, rt
);
5784 gen_helper_set_teecr(cpu_env
, tmp
);
5788 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5790 if (IS_USER(s
) && (env
->teecr
& 1))
5792 tmp
= load_reg(s
, rt
);
5793 store_cpu_field(tmp
, teehbr
);
5797 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5798 op1
, crn
, crm
, op2
);
5802 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5806 cpnum
= (insn
>> 8) & 0xf;
5807 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
5808 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
5814 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5815 return disas_iwmmxt_insn(env
, s
, insn
);
5816 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5817 return disas_dsp_insn(env
, s
, insn
);
5822 return disas_vfp_insn (env
, s
, insn
);
5824 /* Coprocessors 7-15 are architecturally reserved by ARM.
5825 Unfortunately Intel decided to ignore this. */
5826 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
5828 if (insn
& (1 << 20))
5829 return disas_cp14_read(env
, s
, insn
);
5831 return disas_cp14_write(env
, s
, insn
);
5833 return disas_cp15_insn (env
, s
, insn
);
5836 /* Unknown coprocessor. See if the board has hooked it. */
5837 return disas_cp_insn (env
, s
, insn
);
5842 /* Store a 64-bit value to a register pair. Clobbers val. */
5843 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
5847 tcg_gen_trunc_i64_i32(tmp
, val
);
5848 store_reg(s
, rlow
, tmp
);
5850 tcg_gen_shri_i64(val
, val
, 32);
5851 tcg_gen_trunc_i64_i32(tmp
, val
);
5852 store_reg(s
, rhigh
, tmp
);
5855 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5856 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
5861 /* Load value and extend to 64 bits. */
5862 tmp
= tcg_temp_new_i64();
5863 tmp2
= load_reg(s
, rlow
);
5864 tcg_gen_extu_i32_i64(tmp
, tmp2
);
5866 tcg_gen_add_i64(val
, val
, tmp
);
5867 tcg_temp_free_i64(tmp
);
5870 /* load and add a 64-bit value from a register pair. */
5871 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
5877 /* Load 64-bit value rd:rn. */
5878 tmpl
= load_reg(s
, rlow
);
5879 tmph
= load_reg(s
, rhigh
);
5880 tmp
= tcg_temp_new_i64();
5881 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
5884 tcg_gen_add_i64(val
, val
, tmp
);
5885 tcg_temp_free_i64(tmp
);
5888 /* Set N and Z flags from a 64-bit value. */
5889 static void gen_logicq_cc(TCGv_i64 val
)
5891 TCGv tmp
= new_tmp();
5892 gen_helper_logicq_cc(tmp
, val
);
5897 /* Load/Store exclusive instructions are implemented by remembering
5898 the value/address loaded, and seeing if these are the same
5899 when the store is performed. This should be is sufficient to implement
5900 the architecturally mandated semantics, and avoids having to monitor
5903 In system emulation mode only one CPU will be running at once, so
5904 this sequence is effectively atomic. In user emulation mode we
5905 throw an exception and handle the atomic operation elsewhere. */
5906 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
5907 TCGv addr
, int size
)
5913 tmp
= gen_ld8u(addr
, IS_USER(s
));
5916 tmp
= gen_ld16u(addr
, IS_USER(s
));
5920 tmp
= gen_ld32(addr
, IS_USER(s
));
5925 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
5926 store_reg(s
, rt
, tmp
);
5928 tcg_gen_addi_i32(addr
, addr
, 4);
5929 tmp
= gen_ld32(addr
, IS_USER(s
));
5930 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
5931 store_reg(s
, rt2
, tmp
);
5933 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
5936 static void gen_clrex(DisasContext
*s
)
5938 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
5941 #ifdef CONFIG_USER_ONLY
5942 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
5943 TCGv addr
, int size
)
5945 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
5946 tcg_gen_movi_i32(cpu_exclusive_info
,
5947 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
5948 gen_set_condexec(s
);
5949 gen_set_pc_im(s
->pc
- 4);
5950 gen_exception(EXCP_STREX
);
5951 s
->is_jmp
= DISAS_JUMP
;
5954 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
5955 TCGv addr
, int size
)
5961 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5967 fail_label
= gen_new_label();
5968 done_label
= gen_new_label();
5969 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
5972 tmp
= gen_ld8u(addr
, IS_USER(s
));
5975 tmp
= gen_ld16u(addr
, IS_USER(s
));
5979 tmp
= gen_ld32(addr
, IS_USER(s
));
5984 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
5987 TCGv tmp2
= new_tmp();
5988 tcg_gen_addi_i32(tmp2
, addr
, 4);
5989 tmp
= gen_ld32(addr
, IS_USER(s
));
5991 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
5994 tmp
= load_reg(s
, rt
);
5997 gen_st8(tmp
, addr
, IS_USER(s
));
6000 gen_st16(tmp
, addr
, IS_USER(s
));
6004 gen_st32(tmp
, addr
, IS_USER(s
));
6010 tcg_gen_addi_i32(addr
, addr
, 4);
6011 tmp
= load_reg(s
, rt2
);
6012 gen_st32(tmp
, addr
, IS_USER(s
));
6014 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6015 tcg_gen_br(done_label
);
6016 gen_set_label(fail_label
);
6017 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6018 gen_set_label(done_label
);
6019 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6023 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
6025 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6032 insn
= ldl_code(s
->pc
);
6035 /* M variants do not implement ARM mode. */
6040 /* Unconditional instructions. */
6041 if (((insn
>> 25) & 7) == 1) {
6042 /* NEON Data processing. */
6043 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6046 if (disas_neon_data_insn(env
, s
, insn
))
6050 if ((insn
& 0x0f100000) == 0x04000000) {
6051 /* NEON load/store. */
6052 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6055 if (disas_neon_ls_insn(env
, s
, insn
))
6059 if ((insn
& 0x0d70f000) == 0x0550f000)
6061 else if ((insn
& 0x0ffffdff) == 0x01010000) {
6064 if (insn
& (1 << 9)) {
6065 /* BE8 mode not implemented. */
6069 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6070 switch ((insn
>> 4) & 0xf) {
6079 /* We don't emulate caches so these are a no-op. */
6084 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6090 op1
= (insn
& 0x1f);
6091 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
6092 addr
= load_reg(s
, 13);
6095 tmp
= tcg_const_i32(op1
);
6096 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6097 tcg_temp_free_i32(tmp
);
6099 i
= (insn
>> 23) & 3;
6101 case 0: offset
= -4; break; /* DA */
6102 case 1: offset
= 0; break; /* IA */
6103 case 2: offset
= -8; break; /* DB */
6104 case 3: offset
= 4; break; /* IB */
6108 tcg_gen_addi_i32(addr
, addr
, offset
);
6109 tmp
= load_reg(s
, 14);
6110 gen_st32(tmp
, addr
, 0);
6111 tmp
= load_cpu_field(spsr
);
6112 tcg_gen_addi_i32(addr
, addr
, 4);
6113 gen_st32(tmp
, addr
, 0);
6114 if (insn
& (1 << 21)) {
6115 /* Base writeback. */
6117 case 0: offset
= -8; break;
6118 case 1: offset
= 4; break;
6119 case 2: offset
= -4; break;
6120 case 3: offset
= 0; break;
6124 tcg_gen_addi_i32(addr
, addr
, offset
);
6125 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
6126 store_reg(s
, 13, addr
);
6128 tmp
= tcg_const_i32(op1
);
6129 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6130 tcg_temp_free_i32(tmp
);
6137 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6143 rn
= (insn
>> 16) & 0xf;
6144 addr
= load_reg(s
, rn
);
6145 i
= (insn
>> 23) & 3;
6147 case 0: offset
= -4; break; /* DA */
6148 case 1: offset
= 0; break; /* IA */
6149 case 2: offset
= -8; break; /* DB */
6150 case 3: offset
= 4; break; /* IB */
6154 tcg_gen_addi_i32(addr
, addr
, offset
);
6155 /* Load PC into tmp and CPSR into tmp2. */
6156 tmp
= gen_ld32(addr
, 0);
6157 tcg_gen_addi_i32(addr
, addr
, 4);
6158 tmp2
= gen_ld32(addr
, 0);
6159 if (insn
& (1 << 21)) {
6160 /* Base writeback. */
6162 case 0: offset
= -8; break;
6163 case 1: offset
= 4; break;
6164 case 2: offset
= -4; break;
6165 case 3: offset
= 0; break;
6169 tcg_gen_addi_i32(addr
, addr
, offset
);
6170 store_reg(s
, rn
, addr
);
6174 gen_rfe(s
, tmp
, tmp2
);
6176 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6177 /* branch link and change to thumb (blx <offset>) */
6180 val
= (uint32_t)s
->pc
;
6182 tcg_gen_movi_i32(tmp
, val
);
6183 store_reg(s
, 14, tmp
);
6184 /* Sign-extend the 24-bit offset */
6185 offset
= (((int32_t)insn
) << 8) >> 8;
6186 /* offset * 4 + bit24 * 2 + (thumb bit) */
6187 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6188 /* pipeline offset */
6192 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6193 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6194 /* iWMMXt register transfer. */
6195 if (env
->cp15
.c15_cpar
& (1 << 1))
6196 if (!disas_iwmmxt_insn(env
, s
, insn
))
6199 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6200 /* Coprocessor double register transfer. */
6201 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6202 /* Additional coprocessor register transfer. */
6203 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6206 /* cps (privileged) */
6210 if (insn
& (1 << 19)) {
6211 if (insn
& (1 << 8))
6213 if (insn
& (1 << 7))
6215 if (insn
& (1 << 6))
6217 if (insn
& (1 << 18))
6220 if (insn
& (1 << 17)) {
6222 val
|= (insn
& 0x1f);
6225 gen_set_psr_im(s
, mask
, 0, val
);
6232 /* if not always execute, we generate a conditional jump to
6234 s
->condlabel
= gen_new_label();
6235 gen_test_cc(cond
^ 1, s
->condlabel
);
6238 if ((insn
& 0x0f900000) == 0x03000000) {
6239 if ((insn
& (1 << 21)) == 0) {
6241 rd
= (insn
>> 12) & 0xf;
6242 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6243 if ((insn
& (1 << 22)) == 0) {
6246 tcg_gen_movi_i32(tmp
, val
);
6249 tmp
= load_reg(s
, rd
);
6250 tcg_gen_ext16u_i32(tmp
, tmp
);
6251 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6253 store_reg(s
, rd
, tmp
);
6255 if (((insn
>> 12) & 0xf) != 0xf)
6257 if (((insn
>> 16) & 0xf) == 0) {
6258 gen_nop_hint(s
, insn
& 0xff);
6260 /* CPSR = immediate */
6262 shift
= ((insn
>> 8) & 0xf) * 2;
6264 val
= (val
>> shift
) | (val
<< (32 - shift
));
6265 i
= ((insn
& (1 << 22)) != 0);
6266 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6270 } else if ((insn
& 0x0f900000) == 0x01000000
6271 && (insn
& 0x00000090) != 0x00000090) {
6272 /* miscellaneous instructions */
6273 op1
= (insn
>> 21) & 3;
6274 sh
= (insn
>> 4) & 0xf;
6277 case 0x0: /* move program status register */
6280 tmp
= load_reg(s
, rm
);
6281 i
= ((op1
& 2) != 0);
6282 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6286 rd
= (insn
>> 12) & 0xf;
6290 tmp
= load_cpu_field(spsr
);
6293 gen_helper_cpsr_read(tmp
);
6295 store_reg(s
, rd
, tmp
);
6300 /* branch/exchange thumb (bx). */
6301 tmp
= load_reg(s
, rm
);
6303 } else if (op1
== 3) {
6305 rd
= (insn
>> 12) & 0xf;
6306 tmp
= load_reg(s
, rm
);
6307 gen_helper_clz(tmp
, tmp
);
6308 store_reg(s
, rd
, tmp
);
6316 /* Trivial implementation equivalent to bx. */
6317 tmp
= load_reg(s
, rm
);
6327 /* branch link/exchange thumb (blx) */
6328 tmp
= load_reg(s
, rm
);
6330 tcg_gen_movi_i32(tmp2
, s
->pc
);
6331 store_reg(s
, 14, tmp2
);
6334 case 0x5: /* saturating add/subtract */
6335 rd
= (insn
>> 12) & 0xf;
6336 rn
= (insn
>> 16) & 0xf;
6337 tmp
= load_reg(s
, rm
);
6338 tmp2
= load_reg(s
, rn
);
6340 gen_helper_double_saturate(tmp2
, tmp2
);
6342 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6344 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6346 store_reg(s
, rd
, tmp
);
6349 gen_set_condexec(s
);
6350 gen_set_pc_im(s
->pc
- 4);
6351 gen_exception(EXCP_BKPT
);
6352 s
->is_jmp
= DISAS_JUMP
;
6354 case 0x8: /* signed multiply */
6358 rs
= (insn
>> 8) & 0xf;
6359 rn
= (insn
>> 12) & 0xf;
6360 rd
= (insn
>> 16) & 0xf;
6362 /* (32 * 16) >> 16 */
6363 tmp
= load_reg(s
, rm
);
6364 tmp2
= load_reg(s
, rs
);
6366 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6369 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6370 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6372 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6373 tcg_temp_free_i64(tmp64
);
6374 if ((sh
& 2) == 0) {
6375 tmp2
= load_reg(s
, rn
);
6376 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6379 store_reg(s
, rd
, tmp
);
6382 tmp
= load_reg(s
, rm
);
6383 tmp2
= load_reg(s
, rs
);
6384 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6387 tmp64
= tcg_temp_new_i64();
6388 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6390 gen_addq(s
, tmp64
, rn
, rd
);
6391 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6392 tcg_temp_free_i64(tmp64
);
6395 tmp2
= load_reg(s
, rn
);
6396 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6399 store_reg(s
, rd
, tmp
);
6406 } else if (((insn
& 0x0e000000) == 0 &&
6407 (insn
& 0x00000090) != 0x90) ||
6408 ((insn
& 0x0e000000) == (1 << 25))) {
6409 int set_cc
, logic_cc
, shiftop
;
6411 op1
= (insn
>> 21) & 0xf;
6412 set_cc
= (insn
>> 20) & 1;
6413 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6415 /* data processing instruction */
6416 if (insn
& (1 << 25)) {
6417 /* immediate operand */
6419 shift
= ((insn
>> 8) & 0xf) * 2;
6421 val
= (val
>> shift
) | (val
<< (32 - shift
));
6424 tcg_gen_movi_i32(tmp2
, val
);
6425 if (logic_cc
&& shift
) {
6426 gen_set_CF_bit31(tmp2
);
6431 tmp2
= load_reg(s
, rm
);
6432 shiftop
= (insn
>> 5) & 3;
6433 if (!(insn
& (1 << 4))) {
6434 shift
= (insn
>> 7) & 0x1f;
6435 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6437 rs
= (insn
>> 8) & 0xf;
6438 tmp
= load_reg(s
, rs
);
6439 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6442 if (op1
!= 0x0f && op1
!= 0x0d) {
6443 rn
= (insn
>> 16) & 0xf;
6444 tmp
= load_reg(s
, rn
);
6448 rd
= (insn
>> 12) & 0xf;
6451 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6455 store_reg_bx(env
, s
, rd
, tmp
);
6458 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6462 store_reg_bx(env
, s
, rd
, tmp
);
6465 if (set_cc
&& rd
== 15) {
6466 /* SUBS r15, ... is used for exception return. */
6470 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6471 gen_exception_return(s
, tmp
);
6474 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6476 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6478 store_reg_bx(env
, s
, rd
, tmp
);
6483 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
6485 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6487 store_reg_bx(env
, s
, rd
, tmp
);
6491 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6493 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6495 store_reg_bx(env
, s
, rd
, tmp
);
6499 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
6501 gen_add_carry(tmp
, tmp
, tmp2
);
6503 store_reg_bx(env
, s
, rd
, tmp
);
6507 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
6509 gen_sub_carry(tmp
, tmp
, tmp2
);
6511 store_reg_bx(env
, s
, rd
, tmp
);
6515 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
6517 gen_sub_carry(tmp
, tmp2
, tmp
);
6519 store_reg_bx(env
, s
, rd
, tmp
);
6523 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6530 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6537 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6543 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6548 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6552 store_reg_bx(env
, s
, rd
, tmp
);
6555 if (logic_cc
&& rd
== 15) {
6556 /* MOVS r15, ... is used for exception return. */
6560 gen_exception_return(s
, tmp2
);
6565 store_reg_bx(env
, s
, rd
, tmp2
);
6569 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
6573 store_reg_bx(env
, s
, rd
, tmp
);
6577 tcg_gen_not_i32(tmp2
, tmp2
);
6581 store_reg_bx(env
, s
, rd
, tmp2
);
6584 if (op1
!= 0x0f && op1
!= 0x0d) {
6588 /* other instructions */
6589 op1
= (insn
>> 24) & 0xf;
6593 /* multiplies, extra load/stores */
6594 sh
= (insn
>> 5) & 3;
6597 rd
= (insn
>> 16) & 0xf;
6598 rn
= (insn
>> 12) & 0xf;
6599 rs
= (insn
>> 8) & 0xf;
6601 op1
= (insn
>> 20) & 0xf;
6603 case 0: case 1: case 2: case 3: case 6:
6605 tmp
= load_reg(s
, rs
);
6606 tmp2
= load_reg(s
, rm
);
6607 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
6609 if (insn
& (1 << 22)) {
6610 /* Subtract (mls) */
6612 tmp2
= load_reg(s
, rn
);
6613 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6615 } else if (insn
& (1 << 21)) {
6617 tmp2
= load_reg(s
, rn
);
6618 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6621 if (insn
& (1 << 20))
6623 store_reg(s
, rd
, tmp
);
6627 tmp
= load_reg(s
, rs
);
6628 tmp2
= load_reg(s
, rm
);
6629 if (insn
& (1 << 22))
6630 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6632 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6633 if (insn
& (1 << 21)) /* mult accumulate */
6634 gen_addq(s
, tmp64
, rn
, rd
);
6635 if (!(insn
& (1 << 23))) { /* double accumulate */
6637 gen_addq_lo(s
, tmp64
, rn
);
6638 gen_addq_lo(s
, tmp64
, rd
);
6640 if (insn
& (1 << 20))
6641 gen_logicq_cc(tmp64
);
6642 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6643 tcg_temp_free_i64(tmp64
);
6647 rn
= (insn
>> 16) & 0xf;
6648 rd
= (insn
>> 12) & 0xf;
6649 if (insn
& (1 << 23)) {
6650 /* load/store exclusive */
6651 op1
= (insn
>> 21) & 0x3;
6656 addr
= tcg_temp_local_new_i32();
6657 load_reg_var(s
, addr
, rn
);
6658 if (insn
& (1 << 20)) {
6661 gen_load_exclusive(s
, rd
, 15, addr
, 2);
6663 case 1: /* ldrexd */
6664 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
6666 case 2: /* ldrexb */
6667 gen_load_exclusive(s
, rd
, 15, addr
, 0);
6669 case 3: /* ldrexh */
6670 gen_load_exclusive(s
, rd
, 15, addr
, 1);
6679 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
6681 case 1: /* strexd */
6682 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
6684 case 2: /* strexb */
6685 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
6687 case 3: /* strexh */
6688 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
6694 tcg_temp_free(addr
);
6696 /* SWP instruction */
6699 /* ??? This is not really atomic. However we know
6700 we never have multiple CPUs running in parallel,
6701 so it is good enough. */
6702 addr
= load_reg(s
, rn
);
6703 tmp
= load_reg(s
, rm
);
6704 if (insn
& (1 << 22)) {
6705 tmp2
= gen_ld8u(addr
, IS_USER(s
));
6706 gen_st8(tmp
, addr
, IS_USER(s
));
6708 tmp2
= gen_ld32(addr
, IS_USER(s
));
6709 gen_st32(tmp
, addr
, IS_USER(s
));
6712 store_reg(s
, rd
, tmp2
);
6718 /* Misc load/store */
6719 rn
= (insn
>> 16) & 0xf;
6720 rd
= (insn
>> 12) & 0xf;
6721 addr
= load_reg(s
, rn
);
6722 if (insn
& (1 << 24))
6723 gen_add_datah_offset(s
, insn
, 0, addr
);
6725 if (insn
& (1 << 20)) {
6729 tmp
= gen_ld16u(addr
, IS_USER(s
));
6732 tmp
= gen_ld8s(addr
, IS_USER(s
));
6736 tmp
= gen_ld16s(addr
, IS_USER(s
));
6740 } else if (sh
& 2) {
6744 tmp
= load_reg(s
, rd
);
6745 gen_st32(tmp
, addr
, IS_USER(s
));
6746 tcg_gen_addi_i32(addr
, addr
, 4);
6747 tmp
= load_reg(s
, rd
+ 1);
6748 gen_st32(tmp
, addr
, IS_USER(s
));
6752 tmp
= gen_ld32(addr
, IS_USER(s
));
6753 store_reg(s
, rd
, tmp
);
6754 tcg_gen_addi_i32(addr
, addr
, 4);
6755 tmp
= gen_ld32(addr
, IS_USER(s
));
6759 address_offset
= -4;
6762 tmp
= load_reg(s
, rd
);
6763 gen_st16(tmp
, addr
, IS_USER(s
));
6766 /* Perform base writeback before the loaded value to
6767 ensure correct behavior with overlapping index registers.
6768 ldrd with base writeback is is undefined if the
6769 destination and index registers overlap. */
6770 if (!(insn
& (1 << 24))) {
6771 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
6772 store_reg(s
, rn
, addr
);
6773 } else if (insn
& (1 << 21)) {
6775 tcg_gen_addi_i32(addr
, addr
, address_offset
);
6776 store_reg(s
, rn
, addr
);
6781 /* Complete the load. */
6782 store_reg(s
, rd
, tmp
);
6791 if (insn
& (1 << 4)) {
6793 /* Armv6 Media instructions. */
6795 rn
= (insn
>> 16) & 0xf;
6796 rd
= (insn
>> 12) & 0xf;
6797 rs
= (insn
>> 8) & 0xf;
6798 switch ((insn
>> 23) & 3) {
6799 case 0: /* Parallel add/subtract. */
6800 op1
= (insn
>> 20) & 7;
6801 tmp
= load_reg(s
, rn
);
6802 tmp2
= load_reg(s
, rm
);
6803 sh
= (insn
>> 5) & 7;
6804 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
6806 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
6808 store_reg(s
, rd
, tmp
);
6811 if ((insn
& 0x00700020) == 0) {
6812 /* Halfword pack. */
6813 tmp
= load_reg(s
, rn
);
6814 tmp2
= load_reg(s
, rm
);
6815 shift
= (insn
>> 7) & 0x1f;
6816 if (insn
& (1 << 6)) {
6820 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
6821 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
6822 tcg_gen_ext16u_i32(tmp2
, tmp2
);
6826 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
6827 tcg_gen_ext16u_i32(tmp
, tmp
);
6828 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
6830 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6832 store_reg(s
, rd
, tmp
);
6833 } else if ((insn
& 0x00200020) == 0x00200000) {
6835 tmp
= load_reg(s
, rm
);
6836 shift
= (insn
>> 7) & 0x1f;
6837 if (insn
& (1 << 6)) {
6840 tcg_gen_sari_i32(tmp
, tmp
, shift
);
6842 tcg_gen_shli_i32(tmp
, tmp
, shift
);
6844 sh
= (insn
>> 16) & 0x1f;
6846 tmp2
= tcg_const_i32(sh
);
6847 if (insn
& (1 << 22))
6848 gen_helper_usat(tmp
, tmp
, tmp2
);
6850 gen_helper_ssat(tmp
, tmp
, tmp2
);
6851 tcg_temp_free_i32(tmp2
);
6853 store_reg(s
, rd
, tmp
);
6854 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
6856 tmp
= load_reg(s
, rm
);
6857 sh
= (insn
>> 16) & 0x1f;
6859 tmp2
= tcg_const_i32(sh
);
6860 if (insn
& (1 << 22))
6861 gen_helper_usat16(tmp
, tmp
, tmp2
);
6863 gen_helper_ssat16(tmp
, tmp
, tmp2
);
6864 tcg_temp_free_i32(tmp2
);
6866 store_reg(s
, rd
, tmp
);
6867 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
6869 tmp
= load_reg(s
, rn
);
6870 tmp2
= load_reg(s
, rm
);
6872 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
6873 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
6876 store_reg(s
, rd
, tmp
);
6877 } else if ((insn
& 0x000003e0) == 0x00000060) {
6878 tmp
= load_reg(s
, rm
);
6879 shift
= (insn
>> 10) & 3;
6880 /* ??? In many cases it's not neccessary to do a
6881 rotate, a shift is sufficient. */
6883 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
6884 op1
= (insn
>> 20) & 7;
6886 case 0: gen_sxtb16(tmp
); break;
6887 case 2: gen_sxtb(tmp
); break;
6888 case 3: gen_sxth(tmp
); break;
6889 case 4: gen_uxtb16(tmp
); break;
6890 case 6: gen_uxtb(tmp
); break;
6891 case 7: gen_uxth(tmp
); break;
6892 default: goto illegal_op
;
6895 tmp2
= load_reg(s
, rn
);
6896 if ((op1
& 3) == 0) {
6897 gen_add16(tmp
, tmp2
);
6899 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6903 store_reg(s
, rd
, tmp
);
6904 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
6906 tmp
= load_reg(s
, rm
);
6907 if (insn
& (1 << 22)) {
6908 if (insn
& (1 << 7)) {
6912 gen_helper_rbit(tmp
, tmp
);
6915 if (insn
& (1 << 7))
6918 tcg_gen_bswap32_i32(tmp
, tmp
);
6920 store_reg(s
, rd
, tmp
);
6925 case 2: /* Multiplies (Type 3). */
6926 tmp
= load_reg(s
, rm
);
6927 tmp2
= load_reg(s
, rs
);
6928 if (insn
& (1 << 20)) {
6929 /* Signed multiply most significant [accumulate]. */
6930 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6931 if (insn
& (1 << 5))
6932 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
6933 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6935 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6936 tcg_temp_free_i64(tmp64
);
6938 tmp2
= load_reg(s
, rd
);
6939 if (insn
& (1 << 6)) {
6940 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6942 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6946 store_reg(s
, rn
, tmp
);
6948 if (insn
& (1 << 5))
6949 gen_swap_half(tmp2
);
6950 gen_smul_dual(tmp
, tmp2
);
6951 /* This addition cannot overflow. */
6952 if (insn
& (1 << 6)) {
6953 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6955 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6958 if (insn
& (1 << 22)) {
6959 /* smlald, smlsld */
6960 tmp64
= tcg_temp_new_i64();
6961 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6963 gen_addq(s
, tmp64
, rd
, rn
);
6964 gen_storeq_reg(s
, rd
, rn
, tmp64
);
6965 tcg_temp_free_i64(tmp64
);
6967 /* smuad, smusd, smlad, smlsd */
6970 tmp2
= load_reg(s
, rd
);
6971 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6974 store_reg(s
, rn
, tmp
);
6979 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
6981 case 0: /* Unsigned sum of absolute differences. */
6983 tmp
= load_reg(s
, rm
);
6984 tmp2
= load_reg(s
, rs
);
6985 gen_helper_usad8(tmp
, tmp
, tmp2
);
6988 tmp2
= load_reg(s
, rd
);
6989 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6992 store_reg(s
, rn
, tmp
);
6994 case 0x20: case 0x24: case 0x28: case 0x2c:
6995 /* Bitfield insert/clear. */
6997 shift
= (insn
>> 7) & 0x1f;
6998 i
= (insn
>> 16) & 0x1f;
7002 tcg_gen_movi_i32(tmp
, 0);
7004 tmp
= load_reg(s
, rm
);
7007 tmp2
= load_reg(s
, rd
);
7008 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7011 store_reg(s
, rd
, tmp
);
7013 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7014 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7016 tmp
= load_reg(s
, rm
);
7017 shift
= (insn
>> 7) & 0x1f;
7018 i
= ((insn
>> 16) & 0x1f) + 1;
7023 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7025 gen_sbfx(tmp
, shift
, i
);
7028 store_reg(s
, rd
, tmp
);
7038 /* Check for undefined extension instructions
7039 * per the ARM Bible IE:
7040 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7042 sh
= (0xf << 20) | (0xf << 4);
7043 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7047 /* load/store byte/word */
7048 rn
= (insn
>> 16) & 0xf;
7049 rd
= (insn
>> 12) & 0xf;
7050 tmp2
= load_reg(s
, rn
);
7051 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7052 if (insn
& (1 << 24))
7053 gen_add_data_offset(s
, insn
, tmp2
);
7054 if (insn
& (1 << 20)) {
7056 if (insn
& (1 << 22)) {
7057 tmp
= gen_ld8u(tmp2
, i
);
7059 tmp
= gen_ld32(tmp2
, i
);
7063 tmp
= load_reg(s
, rd
);
7064 if (insn
& (1 << 22))
7065 gen_st8(tmp
, tmp2
, i
);
7067 gen_st32(tmp
, tmp2
, i
);
7069 if (!(insn
& (1 << 24))) {
7070 gen_add_data_offset(s
, insn
, tmp2
);
7071 store_reg(s
, rn
, tmp2
);
7072 } else if (insn
& (1 << 21)) {
7073 store_reg(s
, rn
, tmp2
);
7077 if (insn
& (1 << 20)) {
7078 /* Complete the load. */
7082 store_reg(s
, rd
, tmp
);
7088 int j
, n
, user
, loaded_base
;
7090 /* load/store multiple words */
7091 /* XXX: store correct base if write back */
7093 if (insn
& (1 << 22)) {
7095 goto illegal_op
; /* only usable in supervisor mode */
7097 if ((insn
& (1 << 15)) == 0)
7100 rn
= (insn
>> 16) & 0xf;
7101 addr
= load_reg(s
, rn
);
7103 /* compute total size */
7105 TCGV_UNUSED(loaded_var
);
7108 if (insn
& (1 << i
))
7111 /* XXX: test invalid n == 0 case ? */
7112 if (insn
& (1 << 23)) {
7113 if (insn
& (1 << 24)) {
7115 tcg_gen_addi_i32(addr
, addr
, 4);
7117 /* post increment */
7120 if (insn
& (1 << 24)) {
7122 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7124 /* post decrement */
7126 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7131 if (insn
& (1 << i
)) {
7132 if (insn
& (1 << 20)) {
7134 tmp
= gen_ld32(addr
, IS_USER(s
));
7138 tmp2
= tcg_const_i32(i
);
7139 gen_helper_set_user_reg(tmp2
, tmp
);
7140 tcg_temp_free_i32(tmp2
);
7142 } else if (i
== rn
) {
7146 store_reg(s
, i
, tmp
);
7151 /* special case: r15 = PC + 8 */
7152 val
= (long)s
->pc
+ 4;
7154 tcg_gen_movi_i32(tmp
, val
);
7157 tmp2
= tcg_const_i32(i
);
7158 gen_helper_get_user_reg(tmp
, tmp2
);
7159 tcg_temp_free_i32(tmp2
);
7161 tmp
= load_reg(s
, i
);
7163 gen_st32(tmp
, addr
, IS_USER(s
));
7166 /* no need to add after the last transfer */
7168 tcg_gen_addi_i32(addr
, addr
, 4);
7171 if (insn
& (1 << 21)) {
7173 if (insn
& (1 << 23)) {
7174 if (insn
& (1 << 24)) {
7177 /* post increment */
7178 tcg_gen_addi_i32(addr
, addr
, 4);
7181 if (insn
& (1 << 24)) {
7184 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7186 /* post decrement */
7187 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7190 store_reg(s
, rn
, addr
);
7195 store_reg(s
, rn
, loaded_var
);
7197 if ((insn
& (1 << 22)) && !user
) {
7198 /* Restore CPSR from SPSR. */
7199 tmp
= load_cpu_field(spsr
);
7200 gen_set_cpsr(tmp
, 0xffffffff);
7202 s
->is_jmp
= DISAS_UPDATE
;
7211 /* branch (and link) */
7212 val
= (int32_t)s
->pc
;
7213 if (insn
& (1 << 24)) {
7215 tcg_gen_movi_i32(tmp
, val
);
7216 store_reg(s
, 14, tmp
);
7218 offset
= (((int32_t)insn
<< 8) >> 8);
7219 val
+= (offset
<< 2) + 4;
7227 if (disas_coproc_insn(env
, s
, insn
))
7232 gen_set_pc_im(s
->pc
);
7233 s
->is_jmp
= DISAS_SWI
;
7237 gen_set_condexec(s
);
7238 gen_set_pc_im(s
->pc
- 4);
7239 gen_exception(EXCP_UDEF
);
7240 s
->is_jmp
= DISAS_JUMP
;
7246 /* Return true if this is a Thumb-2 logical op. */
7248 thumb2_logic_op(int op
)
7253 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7254 then set condition code flags based on the result of the operation.
7255 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7256 to the high bit of T1.
7257 Returns zero if the opcode is valid. */
7260 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7267 tcg_gen_and_i32(t0
, t0
, t1
);
7271 tcg_gen_andc_i32(t0
, t0
, t1
);
7275 tcg_gen_or_i32(t0
, t0
, t1
);
7279 tcg_gen_not_i32(t1
, t1
);
7280 tcg_gen_or_i32(t0
, t0
, t1
);
7284 tcg_gen_xor_i32(t0
, t0
, t1
);
7289 gen_helper_add_cc(t0
, t0
, t1
);
7291 tcg_gen_add_i32(t0
, t0
, t1
);
7295 gen_helper_adc_cc(t0
, t0
, t1
);
7301 gen_helper_sbc_cc(t0
, t0
, t1
);
7303 gen_sub_carry(t0
, t0
, t1
);
7307 gen_helper_sub_cc(t0
, t0
, t1
);
7309 tcg_gen_sub_i32(t0
, t0
, t1
);
7313 gen_helper_sub_cc(t0
, t1
, t0
);
7315 tcg_gen_sub_i32(t0
, t1
, t0
);
7317 default: /* 5, 6, 7, 9, 12, 15. */
7323 gen_set_CF_bit31(t1
);
7328 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7330 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7332 uint32_t insn
, imm
, shift
, offset
;
7333 uint32_t rd
, rn
, rm
, rs
;
7344 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7345 || arm_feature (env
, ARM_FEATURE_M
))) {
7346 /* Thumb-1 cores may need to treat bl and blx as a pair of
7347 16-bit instructions to get correct prefetch abort behavior. */
7349 if ((insn
& (1 << 12)) == 0) {
7350 /* Second half of blx. */
7351 offset
= ((insn
& 0x7ff) << 1);
7352 tmp
= load_reg(s
, 14);
7353 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7354 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7357 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7358 store_reg(s
, 14, tmp2
);
7362 if (insn
& (1 << 11)) {
7363 /* Second half of bl. */
7364 offset
= ((insn
& 0x7ff) << 1) | 1;
7365 tmp
= load_reg(s
, 14);
7366 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7369 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7370 store_reg(s
, 14, tmp2
);
7374 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7375 /* Instruction spans a page boundary. Implement it as two
7376 16-bit instructions in case the second half causes an
7378 offset
= ((int32_t)insn
<< 21) >> 9;
7379 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7382 /* Fall through to 32-bit decode. */
7385 insn
= lduw_code(s
->pc
);
7387 insn
|= (uint32_t)insn_hw1
<< 16;
7389 if ((insn
& 0xf800e800) != 0xf000e800) {
7393 rn
= (insn
>> 16) & 0xf;
7394 rs
= (insn
>> 12) & 0xf;
7395 rd
= (insn
>> 8) & 0xf;
7397 switch ((insn
>> 25) & 0xf) {
7398 case 0: case 1: case 2: case 3:
7399 /* 16-bit instructions. Should never happen. */
7402 if (insn
& (1 << 22)) {
7403 /* Other load/store, table branch. */
7404 if (insn
& 0x01200000) {
7405 /* Load/store doubleword. */
7408 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7410 addr
= load_reg(s
, rn
);
7412 offset
= (insn
& 0xff) * 4;
7413 if ((insn
& (1 << 23)) == 0)
7415 if (insn
& (1 << 24)) {
7416 tcg_gen_addi_i32(addr
, addr
, offset
);
7419 if (insn
& (1 << 20)) {
7421 tmp
= gen_ld32(addr
, IS_USER(s
));
7422 store_reg(s
, rs
, tmp
);
7423 tcg_gen_addi_i32(addr
, addr
, 4);
7424 tmp
= gen_ld32(addr
, IS_USER(s
));
7425 store_reg(s
, rd
, tmp
);
7428 tmp
= load_reg(s
, rs
);
7429 gen_st32(tmp
, addr
, IS_USER(s
));
7430 tcg_gen_addi_i32(addr
, addr
, 4);
7431 tmp
= load_reg(s
, rd
);
7432 gen_st32(tmp
, addr
, IS_USER(s
));
7434 if (insn
& (1 << 21)) {
7435 /* Base writeback. */
7438 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7439 store_reg(s
, rn
, addr
);
7443 } else if ((insn
& (1 << 23)) == 0) {
7444 /* Load/store exclusive word. */
7445 addr
= tcg_temp_local_new();
7446 load_reg_var(s
, addr
, rn
);
7447 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
7448 if (insn
& (1 << 20)) {
7449 gen_load_exclusive(s
, rs
, 15, addr
, 2);
7451 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
7453 tcg_temp_free(addr
);
7454 } else if ((insn
& (1 << 6)) == 0) {
7458 tcg_gen_movi_i32(addr
, s
->pc
);
7460 addr
= load_reg(s
, rn
);
7462 tmp
= load_reg(s
, rm
);
7463 tcg_gen_add_i32(addr
, addr
, tmp
);
7464 if (insn
& (1 << 4)) {
7466 tcg_gen_add_i32(addr
, addr
, tmp
);
7468 tmp
= gen_ld16u(addr
, IS_USER(s
));
7471 tmp
= gen_ld8u(addr
, IS_USER(s
));
7474 tcg_gen_shli_i32(tmp
, tmp
, 1);
7475 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
7476 store_reg(s
, 15, tmp
);
7478 /* Load/store exclusive byte/halfword/doubleword. */
7480 op
= (insn
>> 4) & 0x3;
7484 addr
= tcg_temp_local_new();
7485 load_reg_var(s
, addr
, rn
);
7486 if (insn
& (1 << 20)) {
7487 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
7489 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
7491 tcg_temp_free(addr
);
7494 /* Load/store multiple, RFE, SRS. */
7495 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
7496 /* Not available in user mode. */
7499 if (insn
& (1 << 20)) {
7501 addr
= load_reg(s
, rn
);
7502 if ((insn
& (1 << 24)) == 0)
7503 tcg_gen_addi_i32(addr
, addr
, -8);
7504 /* Load PC into tmp and CPSR into tmp2. */
7505 tmp
= gen_ld32(addr
, 0);
7506 tcg_gen_addi_i32(addr
, addr
, 4);
7507 tmp2
= gen_ld32(addr
, 0);
7508 if (insn
& (1 << 21)) {
7509 /* Base writeback. */
7510 if (insn
& (1 << 24)) {
7511 tcg_gen_addi_i32(addr
, addr
, 4);
7513 tcg_gen_addi_i32(addr
, addr
, -4);
7515 store_reg(s
, rn
, addr
);
7519 gen_rfe(s
, tmp
, tmp2
);
7523 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7524 addr
= load_reg(s
, 13);
7527 tmp
= tcg_const_i32(op
);
7528 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7529 tcg_temp_free_i32(tmp
);
7531 if ((insn
& (1 << 24)) == 0) {
7532 tcg_gen_addi_i32(addr
, addr
, -8);
7534 tmp
= load_reg(s
, 14);
7535 gen_st32(tmp
, addr
, 0);
7536 tcg_gen_addi_i32(addr
, addr
, 4);
7538 gen_helper_cpsr_read(tmp
);
7539 gen_st32(tmp
, addr
, 0);
7540 if (insn
& (1 << 21)) {
7541 if ((insn
& (1 << 24)) == 0) {
7542 tcg_gen_addi_i32(addr
, addr
, -4);
7544 tcg_gen_addi_i32(addr
, addr
, 4);
7546 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7547 store_reg(s
, 13, addr
);
7549 tmp
= tcg_const_i32(op
);
7550 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7551 tcg_temp_free_i32(tmp
);
7559 /* Load/store multiple. */
7560 addr
= load_reg(s
, rn
);
7562 for (i
= 0; i
< 16; i
++) {
7563 if (insn
& (1 << i
))
7566 if (insn
& (1 << 24)) {
7567 tcg_gen_addi_i32(addr
, addr
, -offset
);
7570 for (i
= 0; i
< 16; i
++) {
7571 if ((insn
& (1 << i
)) == 0)
7573 if (insn
& (1 << 20)) {
7575 tmp
= gen_ld32(addr
, IS_USER(s
));
7579 store_reg(s
, i
, tmp
);
7583 tmp
= load_reg(s
, i
);
7584 gen_st32(tmp
, addr
, IS_USER(s
));
7586 tcg_gen_addi_i32(addr
, addr
, 4);
7588 if (insn
& (1 << 21)) {
7589 /* Base register writeback. */
7590 if (insn
& (1 << 24)) {
7591 tcg_gen_addi_i32(addr
, addr
, -offset
);
7593 /* Fault if writeback register is in register list. */
7594 if (insn
& (1 << rn
))
7596 store_reg(s
, rn
, addr
);
7603 case 5: /* Data processing register constant shift. */
7606 tcg_gen_movi_i32(tmp
, 0);
7608 tmp
= load_reg(s
, rn
);
7610 tmp2
= load_reg(s
, rm
);
7611 op
= (insn
>> 21) & 0xf;
7612 shiftop
= (insn
>> 4) & 3;
7613 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7614 conds
= (insn
& (1 << 20)) != 0;
7615 logic_cc
= (conds
&& thumb2_logic_op(op
));
7616 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7617 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
7621 store_reg(s
, rd
, tmp
);
7626 case 13: /* Misc data processing. */
7627 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
7628 if (op
< 4 && (insn
& 0xf000) != 0xf000)
7631 case 0: /* Register controlled shift. */
7632 tmp
= load_reg(s
, rn
);
7633 tmp2
= load_reg(s
, rm
);
7634 if ((insn
& 0x70) != 0)
7636 op
= (insn
>> 21) & 3;
7637 logic_cc
= (insn
& (1 << 20)) != 0;
7638 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
7641 store_reg_bx(env
, s
, rd
, tmp
);
7643 case 1: /* Sign/zero extend. */
7644 tmp
= load_reg(s
, rm
);
7645 shift
= (insn
>> 4) & 3;
7646 /* ??? In many cases it's not neccessary to do a
7647 rotate, a shift is sufficient. */
7649 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7650 op
= (insn
>> 20) & 7;
7652 case 0: gen_sxth(tmp
); break;
7653 case 1: gen_uxth(tmp
); break;
7654 case 2: gen_sxtb16(tmp
); break;
7655 case 3: gen_uxtb16(tmp
); break;
7656 case 4: gen_sxtb(tmp
); break;
7657 case 5: gen_uxtb(tmp
); break;
7658 default: goto illegal_op
;
7661 tmp2
= load_reg(s
, rn
);
7662 if ((op
>> 1) == 1) {
7663 gen_add16(tmp
, tmp2
);
7665 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7669 store_reg(s
, rd
, tmp
);
7671 case 2: /* SIMD add/subtract. */
7672 op
= (insn
>> 20) & 7;
7673 shift
= (insn
>> 4) & 7;
7674 if ((op
& 3) == 3 || (shift
& 3) == 3)
7676 tmp
= load_reg(s
, rn
);
7677 tmp2
= load_reg(s
, rm
);
7678 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
7680 store_reg(s
, rd
, tmp
);
7682 case 3: /* Other data processing. */
7683 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
7685 /* Saturating add/subtract. */
7686 tmp
= load_reg(s
, rn
);
7687 tmp2
= load_reg(s
, rm
);
7689 gen_helper_double_saturate(tmp
, tmp
);
7691 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
7693 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
7696 tmp
= load_reg(s
, rn
);
7698 case 0x0a: /* rbit */
7699 gen_helper_rbit(tmp
, tmp
);
7701 case 0x08: /* rev */
7702 tcg_gen_bswap32_i32(tmp
, tmp
);
7704 case 0x09: /* rev16 */
7707 case 0x0b: /* revsh */
7710 case 0x10: /* sel */
7711 tmp2
= load_reg(s
, rm
);
7713 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7714 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7718 case 0x18: /* clz */
7719 gen_helper_clz(tmp
, tmp
);
7725 store_reg(s
, rd
, tmp
);
7727 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7728 op
= (insn
>> 4) & 0xf;
7729 tmp
= load_reg(s
, rn
);
7730 tmp2
= load_reg(s
, rm
);
7731 switch ((insn
>> 20) & 7) {
7732 case 0: /* 32 x 32 -> 32 */
7733 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7736 tmp2
= load_reg(s
, rs
);
7738 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7740 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7744 case 1: /* 16 x 16 -> 32 */
7745 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7748 tmp2
= load_reg(s
, rs
);
7749 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7753 case 2: /* Dual multiply add. */
7754 case 4: /* Dual multiply subtract. */
7756 gen_swap_half(tmp2
);
7757 gen_smul_dual(tmp
, tmp2
);
7758 /* This addition cannot overflow. */
7759 if (insn
& (1 << 22)) {
7760 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7762 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7767 tmp2
= load_reg(s
, rs
);
7768 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7772 case 3: /* 32 * 16 -> 32msb */
7774 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7777 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7778 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7780 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7781 tcg_temp_free_i64(tmp64
);
7784 tmp2
= load_reg(s
, rs
);
7785 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7789 case 5: case 6: /* 32 * 32 -> 32msb */
7790 gen_imull(tmp
, tmp2
);
7791 if (insn
& (1 << 5)) {
7792 gen_roundqd(tmp
, tmp2
);
7799 tmp2
= load_reg(s
, rs
);
7800 if (insn
& (1 << 21)) {
7801 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7803 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7808 case 7: /* Unsigned sum of absolute differences. */
7809 gen_helper_usad8(tmp
, tmp
, tmp2
);
7812 tmp2
= load_reg(s
, rs
);
7813 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7818 store_reg(s
, rd
, tmp
);
7820 case 6: case 7: /* 64-bit multiply, Divide. */
7821 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
7822 tmp
= load_reg(s
, rn
);
7823 tmp2
= load_reg(s
, rm
);
7824 if ((op
& 0x50) == 0x10) {
7826 if (!arm_feature(env
, ARM_FEATURE_DIV
))
7829 gen_helper_udiv(tmp
, tmp
, tmp2
);
7831 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7833 store_reg(s
, rd
, tmp
);
7834 } else if ((op
& 0xe) == 0xc) {
7835 /* Dual multiply accumulate long. */
7837 gen_swap_half(tmp2
);
7838 gen_smul_dual(tmp
, tmp2
);
7840 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7842 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7846 tmp64
= tcg_temp_new_i64();
7847 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7849 gen_addq(s
, tmp64
, rs
, rd
);
7850 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7851 tcg_temp_free_i64(tmp64
);
7854 /* Unsigned 64-bit multiply */
7855 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7859 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7861 tmp64
= tcg_temp_new_i64();
7862 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7865 /* Signed 64-bit multiply */
7866 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7871 gen_addq_lo(s
, tmp64
, rs
);
7872 gen_addq_lo(s
, tmp64
, rd
);
7873 } else if (op
& 0x40) {
7874 /* 64-bit accumulate. */
7875 gen_addq(s
, tmp64
, rs
, rd
);
7877 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7878 tcg_temp_free_i64(tmp64
);
7883 case 6: case 7: case 14: case 15:
7885 if (((insn
>> 24) & 3) == 3) {
7886 /* Translate into the equivalent ARM encoding. */
7887 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4);
7888 if (disas_neon_data_insn(env
, s
, insn
))
7891 if (insn
& (1 << 28))
7893 if (disas_coproc_insn (env
, s
, insn
))
7897 case 8: case 9: case 10: case 11:
7898 if (insn
& (1 << 15)) {
7899 /* Branches, misc control. */
7900 if (insn
& 0x5000) {
7901 /* Unconditional branch. */
7902 /* signextend(hw1[10:0]) -> offset[:12]. */
7903 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
7904 /* hw1[10:0] -> offset[11:1]. */
7905 offset
|= (insn
& 0x7ff) << 1;
7906 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7907 offset[24:22] already have the same value because of the
7908 sign extension above. */
7909 offset
^= ((~insn
) & (1 << 13)) << 10;
7910 offset
^= ((~insn
) & (1 << 11)) << 11;
7912 if (insn
& (1 << 14)) {
7913 /* Branch and link. */
7914 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
7918 if (insn
& (1 << 12)) {
7923 offset
&= ~(uint32_t)2;
7924 gen_bx_im(s
, offset
);
7926 } else if (((insn
>> 23) & 7) == 7) {
7928 if (insn
& (1 << 13))
7931 if (insn
& (1 << 26)) {
7932 /* Secure monitor call (v6Z) */
7933 goto illegal_op
; /* not implemented. */
7935 op
= (insn
>> 20) & 7;
7937 case 0: /* msr cpsr. */
7939 tmp
= load_reg(s
, rn
);
7940 addr
= tcg_const_i32(insn
& 0xff);
7941 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
7942 tcg_temp_free_i32(addr
);
7948 case 1: /* msr spsr. */
7951 tmp
= load_reg(s
, rn
);
7953 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
7957 case 2: /* cps, nop-hint. */
7958 if (((insn
>> 8) & 7) == 0) {
7959 gen_nop_hint(s
, insn
& 0xff);
7961 /* Implemented as NOP in user mode. */
7966 if (insn
& (1 << 10)) {
7967 if (insn
& (1 << 7))
7969 if (insn
& (1 << 6))
7971 if (insn
& (1 << 5))
7973 if (insn
& (1 << 9))
7974 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
7976 if (insn
& (1 << 8)) {
7978 imm
|= (insn
& 0x1f);
7981 gen_set_psr_im(s
, offset
, 0, imm
);
7984 case 3: /* Special control operations. */
7986 op
= (insn
>> 4) & 0xf;
7994 /* These execute as NOPs. */
8001 /* Trivial implementation equivalent to bx. */
8002 tmp
= load_reg(s
, rn
);
8005 case 5: /* Exception return. */
8009 if (rn
!= 14 || rd
!= 15) {
8012 tmp
= load_reg(s
, rn
);
8013 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8014 gen_exception_return(s
, tmp
);
8016 case 6: /* mrs cpsr. */
8019 addr
= tcg_const_i32(insn
& 0xff);
8020 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8021 tcg_temp_free_i32(addr
);
8023 gen_helper_cpsr_read(tmp
);
8025 store_reg(s
, rd
, tmp
);
8027 case 7: /* mrs spsr. */
8028 /* Not accessible in user mode. */
8029 if (IS_USER(s
) || IS_M(env
))
8031 tmp
= load_cpu_field(spsr
);
8032 store_reg(s
, rd
, tmp
);
8037 /* Conditional branch. */
8038 op
= (insn
>> 22) & 0xf;
8039 /* Generate a conditional jump to next instruction. */
8040 s
->condlabel
= gen_new_label();
8041 gen_test_cc(op
^ 1, s
->condlabel
);
8044 /* offset[11:1] = insn[10:0] */
8045 offset
= (insn
& 0x7ff) << 1;
8046 /* offset[17:12] = insn[21:16]. */
8047 offset
|= (insn
& 0x003f0000) >> 4;
8048 /* offset[31:20] = insn[26]. */
8049 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8050 /* offset[18] = insn[13]. */
8051 offset
|= (insn
& (1 << 13)) << 5;
8052 /* offset[19] = insn[11]. */
8053 offset
|= (insn
& (1 << 11)) << 8;
8055 /* jump to the offset */
8056 gen_jmp(s
, s
->pc
+ offset
);
8059 /* Data processing immediate. */
8060 if (insn
& (1 << 25)) {
8061 if (insn
& (1 << 24)) {
8062 if (insn
& (1 << 20))
8064 /* Bitfield/Saturate. */
8065 op
= (insn
>> 21) & 7;
8067 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8070 tcg_gen_movi_i32(tmp
, 0);
8072 tmp
= load_reg(s
, rn
);
8075 case 2: /* Signed bitfield extract. */
8077 if (shift
+ imm
> 32)
8080 gen_sbfx(tmp
, shift
, imm
);
8082 case 6: /* Unsigned bitfield extract. */
8084 if (shift
+ imm
> 32)
8087 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8089 case 3: /* Bitfield insert/clear. */
8092 imm
= imm
+ 1 - shift
;
8094 tmp2
= load_reg(s
, rd
);
8095 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8101 default: /* Saturate. */
8104 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8106 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8108 tmp2
= tcg_const_i32(imm
);
8111 if ((op
& 1) && shift
== 0)
8112 gen_helper_usat16(tmp
, tmp
, tmp2
);
8114 gen_helper_usat(tmp
, tmp
, tmp2
);
8117 if ((op
& 1) && shift
== 0)
8118 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8120 gen_helper_ssat(tmp
, tmp
, tmp2
);
8122 tcg_temp_free_i32(tmp2
);
8125 store_reg(s
, rd
, tmp
);
8127 imm
= ((insn
& 0x04000000) >> 15)
8128 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8129 if (insn
& (1 << 22)) {
8130 /* 16-bit immediate. */
8131 imm
|= (insn
>> 4) & 0xf000;
8132 if (insn
& (1 << 23)) {
8134 tmp
= load_reg(s
, rd
);
8135 tcg_gen_ext16u_i32(tmp
, tmp
);
8136 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8140 tcg_gen_movi_i32(tmp
, imm
);
8143 /* Add/sub 12-bit immediate. */
8145 offset
= s
->pc
& ~(uint32_t)3;
8146 if (insn
& (1 << 23))
8151 tcg_gen_movi_i32(tmp
, offset
);
8153 tmp
= load_reg(s
, rn
);
8154 if (insn
& (1 << 23))
8155 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8157 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8160 store_reg(s
, rd
, tmp
);
8163 int shifter_out
= 0;
8164 /* modified 12-bit immediate. */
8165 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8166 imm
= (insn
& 0xff);
8169 /* Nothing to do. */
8171 case 1: /* 00XY00XY */
8174 case 2: /* XY00XY00 */
8178 case 3: /* XYXYXYXY */
8182 default: /* Rotated constant. */
8183 shift
= (shift
<< 1) | (imm
>> 7);
8185 imm
= imm
<< (32 - shift
);
8190 tcg_gen_movi_i32(tmp2
, imm
);
8191 rn
= (insn
>> 16) & 0xf;
8194 tcg_gen_movi_i32(tmp
, 0);
8196 tmp
= load_reg(s
, rn
);
8198 op
= (insn
>> 21) & 0xf;
8199 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8200 shifter_out
, tmp
, tmp2
))
8203 rd
= (insn
>> 8) & 0xf;
8205 store_reg(s
, rd
, tmp
);
8212 case 12: /* Load/store single data item. */
8217 if ((insn
& 0x01100000) == 0x01000000) {
8218 if (disas_neon_ls_insn(env
, s
, insn
))
8226 /* s->pc has already been incremented by 4. */
8227 imm
= s
->pc
& 0xfffffffc;
8228 if (insn
& (1 << 23))
8229 imm
+= insn
& 0xfff;
8231 imm
-= insn
& 0xfff;
8232 tcg_gen_movi_i32(addr
, imm
);
8234 addr
= load_reg(s
, rn
);
8235 if (insn
& (1 << 23)) {
8236 /* Positive offset. */
8238 tcg_gen_addi_i32(addr
, addr
, imm
);
8240 op
= (insn
>> 8) & 7;
8243 case 0: case 8: /* Shifted Register. */
8244 shift
= (insn
>> 4) & 0xf;
8247 tmp
= load_reg(s
, rm
);
8249 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8250 tcg_gen_add_i32(addr
, addr
, tmp
);
8253 case 4: /* Negative offset. */
8254 tcg_gen_addi_i32(addr
, addr
, -imm
);
8256 case 6: /* User privilege. */
8257 tcg_gen_addi_i32(addr
, addr
, imm
);
8260 case 1: /* Post-decrement. */
8263 case 3: /* Post-increment. */
8267 case 5: /* Pre-decrement. */
8270 case 7: /* Pre-increment. */
8271 tcg_gen_addi_i32(addr
, addr
, imm
);
8279 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8280 if (insn
& (1 << 20)) {
8282 if (rs
== 15 && op
!= 2) {
8285 /* Memory hint. Implemented as NOP. */
8288 case 0: tmp
= gen_ld8u(addr
, user
); break;
8289 case 4: tmp
= gen_ld8s(addr
, user
); break;
8290 case 1: tmp
= gen_ld16u(addr
, user
); break;
8291 case 5: tmp
= gen_ld16s(addr
, user
); break;
8292 case 2: tmp
= gen_ld32(addr
, user
); break;
8293 default: goto illegal_op
;
8298 store_reg(s
, rs
, tmp
);
8305 tmp
= load_reg(s
, rs
);
8307 case 0: gen_st8(tmp
, addr
, user
); break;
8308 case 1: gen_st16(tmp
, addr
, user
); break;
8309 case 2: gen_st32(tmp
, addr
, user
); break;
8310 default: goto illegal_op
;
8314 tcg_gen_addi_i32(addr
, addr
, imm
);
8316 store_reg(s
, rn
, addr
);
8330 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
8332 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8339 if (s
->condexec_mask
) {
8340 cond
= s
->condexec_cond
;
8341 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
8342 s
->condlabel
= gen_new_label();
8343 gen_test_cc(cond
^ 1, s
->condlabel
);
8348 insn
= lduw_code(s
->pc
);
8351 switch (insn
>> 12) {
8355 op
= (insn
>> 11) & 3;
8358 rn
= (insn
>> 3) & 7;
8359 tmp
= load_reg(s
, rn
);
8360 if (insn
& (1 << 10)) {
8363 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
8366 rm
= (insn
>> 6) & 7;
8367 tmp2
= load_reg(s
, rm
);
8369 if (insn
& (1 << 9)) {
8370 if (s
->condexec_mask
)
8371 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8373 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8375 if (s
->condexec_mask
)
8376 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8378 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8381 store_reg(s
, rd
, tmp
);
8383 /* shift immediate */
8384 rm
= (insn
>> 3) & 7;
8385 shift
= (insn
>> 6) & 0x1f;
8386 tmp
= load_reg(s
, rm
);
8387 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
8388 if (!s
->condexec_mask
)
8390 store_reg(s
, rd
, tmp
);
8394 /* arithmetic large immediate */
8395 op
= (insn
>> 11) & 3;
8396 rd
= (insn
>> 8) & 0x7;
8397 if (op
== 0) { /* mov */
8399 tcg_gen_movi_i32(tmp
, insn
& 0xff);
8400 if (!s
->condexec_mask
)
8402 store_reg(s
, rd
, tmp
);
8404 tmp
= load_reg(s
, rd
);
8406 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
8409 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8414 if (s
->condexec_mask
)
8415 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8417 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8419 store_reg(s
, rd
, tmp
);
8422 if (s
->condexec_mask
)
8423 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8425 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8427 store_reg(s
, rd
, tmp
);
8433 if (insn
& (1 << 11)) {
8434 rd
= (insn
>> 8) & 7;
8435 /* load pc-relative. Bit 1 of PC is ignored. */
8436 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
8437 val
&= ~(uint32_t)2;
8439 tcg_gen_movi_i32(addr
, val
);
8440 tmp
= gen_ld32(addr
, IS_USER(s
));
8442 store_reg(s
, rd
, tmp
);
8445 if (insn
& (1 << 10)) {
8446 /* data processing extended or blx */
8447 rd
= (insn
& 7) | ((insn
>> 4) & 8);
8448 rm
= (insn
>> 3) & 0xf;
8449 op
= (insn
>> 8) & 3;
8452 tmp
= load_reg(s
, rd
);
8453 tmp2
= load_reg(s
, rm
);
8454 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8456 store_reg(s
, rd
, tmp
);
8459 tmp
= load_reg(s
, rd
);
8460 tmp2
= load_reg(s
, rm
);
8461 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8465 case 2: /* mov/cpy */
8466 tmp
= load_reg(s
, rm
);
8467 store_reg(s
, rd
, tmp
);
8469 case 3:/* branch [and link] exchange thumb register */
8470 tmp
= load_reg(s
, rm
);
8471 if (insn
& (1 << 7)) {
8472 val
= (uint32_t)s
->pc
| 1;
8474 tcg_gen_movi_i32(tmp2
, val
);
8475 store_reg(s
, 14, tmp2
);
8483 /* data processing register */
8485 rm
= (insn
>> 3) & 7;
8486 op
= (insn
>> 6) & 0xf;
8487 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
8488 /* the shift/rotate ops want the operands backwards */
8497 if (op
== 9) { /* neg */
8499 tcg_gen_movi_i32(tmp
, 0);
8500 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
8501 tmp
= load_reg(s
, rd
);
8506 tmp2
= load_reg(s
, rm
);
8509 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8510 if (!s
->condexec_mask
)
8514 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8515 if (!s
->condexec_mask
)
8519 if (s
->condexec_mask
) {
8520 gen_helper_shl(tmp2
, tmp2
, tmp
);
8522 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
8527 if (s
->condexec_mask
) {
8528 gen_helper_shr(tmp2
, tmp2
, tmp
);
8530 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
8535 if (s
->condexec_mask
) {
8536 gen_helper_sar(tmp2
, tmp2
, tmp
);
8538 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
8543 if (s
->condexec_mask
)
8546 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
8549 if (s
->condexec_mask
)
8550 gen_sub_carry(tmp
, tmp
, tmp2
);
8552 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
8555 if (s
->condexec_mask
) {
8556 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
8557 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
8559 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
8564 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8569 if (s
->condexec_mask
)
8570 tcg_gen_neg_i32(tmp
, tmp2
);
8572 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8575 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8579 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8583 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8584 if (!s
->condexec_mask
)
8588 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8589 if (!s
->condexec_mask
)
8593 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8594 if (!s
->condexec_mask
)
8598 tcg_gen_not_i32(tmp2
, tmp2
);
8599 if (!s
->condexec_mask
)
8607 store_reg(s
, rm
, tmp2
);
8611 store_reg(s
, rd
, tmp
);
8621 /* load/store register offset. */
8623 rn
= (insn
>> 3) & 7;
8624 rm
= (insn
>> 6) & 7;
8625 op
= (insn
>> 9) & 7;
8626 addr
= load_reg(s
, rn
);
8627 tmp
= load_reg(s
, rm
);
8628 tcg_gen_add_i32(addr
, addr
, tmp
);
8631 if (op
< 3) /* store */
8632 tmp
= load_reg(s
, rd
);
8636 gen_st32(tmp
, addr
, IS_USER(s
));
8639 gen_st16(tmp
, addr
, IS_USER(s
));
8642 gen_st8(tmp
, addr
, IS_USER(s
));
8645 tmp
= gen_ld8s(addr
, IS_USER(s
));
8648 tmp
= gen_ld32(addr
, IS_USER(s
));
8651 tmp
= gen_ld16u(addr
, IS_USER(s
));
8654 tmp
= gen_ld8u(addr
, IS_USER(s
));
8657 tmp
= gen_ld16s(addr
, IS_USER(s
));
8660 if (op
>= 3) /* load */
8661 store_reg(s
, rd
, tmp
);
8666 /* load/store word immediate offset */
8668 rn
= (insn
>> 3) & 7;
8669 addr
= load_reg(s
, rn
);
8670 val
= (insn
>> 4) & 0x7c;
8671 tcg_gen_addi_i32(addr
, addr
, val
);
8673 if (insn
& (1 << 11)) {
8675 tmp
= gen_ld32(addr
, IS_USER(s
));
8676 store_reg(s
, rd
, tmp
);
8679 tmp
= load_reg(s
, rd
);
8680 gen_st32(tmp
, addr
, IS_USER(s
));
8686 /* load/store byte immediate offset */
8688 rn
= (insn
>> 3) & 7;
8689 addr
= load_reg(s
, rn
);
8690 val
= (insn
>> 6) & 0x1f;
8691 tcg_gen_addi_i32(addr
, addr
, val
);
8693 if (insn
& (1 << 11)) {
8695 tmp
= gen_ld8u(addr
, IS_USER(s
));
8696 store_reg(s
, rd
, tmp
);
8699 tmp
= load_reg(s
, rd
);
8700 gen_st8(tmp
, addr
, IS_USER(s
));
8706 /* load/store halfword immediate offset */
8708 rn
= (insn
>> 3) & 7;
8709 addr
= load_reg(s
, rn
);
8710 val
= (insn
>> 5) & 0x3e;
8711 tcg_gen_addi_i32(addr
, addr
, val
);
8713 if (insn
& (1 << 11)) {
8715 tmp
= gen_ld16u(addr
, IS_USER(s
));
8716 store_reg(s
, rd
, tmp
);
8719 tmp
= load_reg(s
, rd
);
8720 gen_st16(tmp
, addr
, IS_USER(s
));
8726 /* load/store from stack */
8727 rd
= (insn
>> 8) & 7;
8728 addr
= load_reg(s
, 13);
8729 val
= (insn
& 0xff) * 4;
8730 tcg_gen_addi_i32(addr
, addr
, val
);
8732 if (insn
& (1 << 11)) {
8734 tmp
= gen_ld32(addr
, IS_USER(s
));
8735 store_reg(s
, rd
, tmp
);
8738 tmp
= load_reg(s
, rd
);
8739 gen_st32(tmp
, addr
, IS_USER(s
));
8745 /* add to high reg */
8746 rd
= (insn
>> 8) & 7;
8747 if (insn
& (1 << 11)) {
8749 tmp
= load_reg(s
, 13);
8751 /* PC. bit 1 is ignored. */
8753 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
8755 val
= (insn
& 0xff) * 4;
8756 tcg_gen_addi_i32(tmp
, tmp
, val
);
8757 store_reg(s
, rd
, tmp
);
8762 op
= (insn
>> 8) & 0xf;
8765 /* adjust stack pointer */
8766 tmp
= load_reg(s
, 13);
8767 val
= (insn
& 0x7f) * 4;
8768 if (insn
& (1 << 7))
8769 val
= -(int32_t)val
;
8770 tcg_gen_addi_i32(tmp
, tmp
, val
);
8771 store_reg(s
, 13, tmp
);
8774 case 2: /* sign/zero extend. */
8777 rm
= (insn
>> 3) & 7;
8778 tmp
= load_reg(s
, rm
);
8779 switch ((insn
>> 6) & 3) {
8780 case 0: gen_sxth(tmp
); break;
8781 case 1: gen_sxtb(tmp
); break;
8782 case 2: gen_uxth(tmp
); break;
8783 case 3: gen_uxtb(tmp
); break;
8785 store_reg(s
, rd
, tmp
);
8787 case 4: case 5: case 0xc: case 0xd:
8789 addr
= load_reg(s
, 13);
8790 if (insn
& (1 << 8))
8794 for (i
= 0; i
< 8; i
++) {
8795 if (insn
& (1 << i
))
8798 if ((insn
& (1 << 11)) == 0) {
8799 tcg_gen_addi_i32(addr
, addr
, -offset
);
8801 for (i
= 0; i
< 8; i
++) {
8802 if (insn
& (1 << i
)) {
8803 if (insn
& (1 << 11)) {
8805 tmp
= gen_ld32(addr
, IS_USER(s
));
8806 store_reg(s
, i
, tmp
);
8809 tmp
= load_reg(s
, i
);
8810 gen_st32(tmp
, addr
, IS_USER(s
));
8812 /* advance to the next address. */
8813 tcg_gen_addi_i32(addr
, addr
, 4);
8817 if (insn
& (1 << 8)) {
8818 if (insn
& (1 << 11)) {
8820 tmp
= gen_ld32(addr
, IS_USER(s
));
8821 /* don't set the pc until the rest of the instruction
8825 tmp
= load_reg(s
, 14);
8826 gen_st32(tmp
, addr
, IS_USER(s
));
8828 tcg_gen_addi_i32(addr
, addr
, 4);
8830 if ((insn
& (1 << 11)) == 0) {
8831 tcg_gen_addi_i32(addr
, addr
, -offset
);
8833 /* write back the new stack pointer */
8834 store_reg(s
, 13, addr
);
8835 /* set the new PC value */
8836 if ((insn
& 0x0900) == 0x0900)
8840 case 1: case 3: case 9: case 11: /* czb */
8842 tmp
= load_reg(s
, rm
);
8843 s
->condlabel
= gen_new_label();
8845 if (insn
& (1 << 11))
8846 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
8848 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
8850 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
8851 val
= (uint32_t)s
->pc
+ 2;
8856 case 15: /* IT, nop-hint. */
8857 if ((insn
& 0xf) == 0) {
8858 gen_nop_hint(s
, (insn
>> 4) & 0xf);
8862 s
->condexec_cond
= (insn
>> 4) & 0xe;
8863 s
->condexec_mask
= insn
& 0x1f;
8864 /* No actual code generated for this insn, just setup state. */
8867 case 0xe: /* bkpt */
8868 gen_set_condexec(s
);
8869 gen_set_pc_im(s
->pc
- 2);
8870 gen_exception(EXCP_BKPT
);
8871 s
->is_jmp
= DISAS_JUMP
;
8876 rn
= (insn
>> 3) & 0x7;
8878 tmp
= load_reg(s
, rn
);
8879 switch ((insn
>> 6) & 3) {
8880 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
8881 case 1: gen_rev16(tmp
); break;
8882 case 3: gen_revsh(tmp
); break;
8883 default: goto illegal_op
;
8885 store_reg(s
, rd
, tmp
);
8893 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
8896 addr
= tcg_const_i32(16);
8897 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8898 tcg_temp_free_i32(addr
);
8902 addr
= tcg_const_i32(17);
8903 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8904 tcg_temp_free_i32(addr
);
8906 tcg_temp_free_i32(tmp
);
8909 if (insn
& (1 << 4))
8910 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
8913 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
8923 /* load/store multiple */
8924 rn
= (insn
>> 8) & 0x7;
8925 addr
= load_reg(s
, rn
);
8926 for (i
= 0; i
< 8; i
++) {
8927 if (insn
& (1 << i
)) {
8928 if (insn
& (1 << 11)) {
8930 tmp
= gen_ld32(addr
, IS_USER(s
));
8931 store_reg(s
, i
, tmp
);
8934 tmp
= load_reg(s
, i
);
8935 gen_st32(tmp
, addr
, IS_USER(s
));
8937 /* advance to the next address */
8938 tcg_gen_addi_i32(addr
, addr
, 4);
8941 /* Base register writeback. */
8942 if ((insn
& (1 << rn
)) == 0) {
8943 store_reg(s
, rn
, addr
);
8950 /* conditional branch or swi */
8951 cond
= (insn
>> 8) & 0xf;
8957 gen_set_condexec(s
);
8958 gen_set_pc_im(s
->pc
);
8959 s
->is_jmp
= DISAS_SWI
;
8962 /* generate a conditional jump to next instruction */
8963 s
->condlabel
= gen_new_label();
8964 gen_test_cc(cond
^ 1, s
->condlabel
);
8967 /* jump to the offset */
8968 val
= (uint32_t)s
->pc
+ 2;
8969 offset
= ((int32_t)insn
<< 24) >> 24;
8975 if (insn
& (1 << 11)) {
8976 if (disas_thumb2_insn(env
, s
, insn
))
8980 /* unconditional branch */
8981 val
= (uint32_t)s
->pc
;
8982 offset
= ((int32_t)insn
<< 21) >> 21;
8983 val
+= (offset
<< 1) + 2;
8988 if (disas_thumb2_insn(env
, s
, insn
))
8994 gen_set_condexec(s
);
8995 gen_set_pc_im(s
->pc
- 4);
8996 gen_exception(EXCP_UDEF
);
8997 s
->is_jmp
= DISAS_JUMP
;
9001 gen_set_condexec(s
);
9002 gen_set_pc_im(s
->pc
- 2);
9003 gen_exception(EXCP_UDEF
);
9004 s
->is_jmp
= DISAS_JUMP
;
9007 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9008 basic block 'tb'. If search_pc is TRUE, also generate PC
9009 information for each intermediate instruction. */
9010 static inline void gen_intermediate_code_internal(CPUState
*env
,
9011 TranslationBlock
*tb
,
9014 DisasContext dc1
, *dc
= &dc1
;
9016 uint16_t *gen_opc_end
;
9018 target_ulong pc_start
;
9019 uint32_t next_page_start
;
9023 /* generate intermediate code */
9030 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9032 dc
->is_jmp
= DISAS_NEXT
;
9034 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9036 dc
->thumb
= env
->thumb
;
9037 dc
->condexec_mask
= (env
->condexec_bits
& 0xf) << 1;
9038 dc
->condexec_cond
= env
->condexec_bits
>> 4;
9039 #if !defined(CONFIG_USER_ONLY)
9041 dc
->user
= ((env
->v7m
.exception
== 0) && (env
->v7m
.control
& 1));
9043 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
9046 cpu_F0s
= tcg_temp_new_i32();
9047 cpu_F1s
= tcg_temp_new_i32();
9048 cpu_F0d
= tcg_temp_new_i64();
9049 cpu_F1d
= tcg_temp_new_i64();
9052 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9053 cpu_M0
= tcg_temp_new_i64();
9054 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9057 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9059 max_insns
= CF_COUNT_MASK
;
9062 /* Reset the conditional execution bits immediately. This avoids
9063 complications trying to do it at the end of the block. */
9064 if (env
->condexec_bits
)
9066 TCGv tmp
= new_tmp();
9067 tcg_gen_movi_i32(tmp
, 0);
9068 store_cpu_field(tmp
, condexec_bits
);
9071 #ifdef CONFIG_USER_ONLY
9072 /* Intercept jump to the magic kernel page. */
9073 if (dc
->pc
>= 0xffff0000) {
9074 /* We always get here via a jump, so know we are not in a
9075 conditional execution block. */
9076 gen_exception(EXCP_KERNEL_TRAP
);
9077 dc
->is_jmp
= DISAS_UPDATE
;
9081 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9082 /* We always get here via a jump, so know we are not in a
9083 conditional execution block. */
9084 gen_exception(EXCP_EXCEPTION_EXIT
);
9085 dc
->is_jmp
= DISAS_UPDATE
;
9090 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9091 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9092 if (bp
->pc
== dc
->pc
) {
9093 gen_set_condexec(dc
);
9094 gen_set_pc_im(dc
->pc
);
9095 gen_exception(EXCP_DEBUG
);
9096 dc
->is_jmp
= DISAS_JUMP
;
9097 /* Advance PC so that clearing the breakpoint will
9098 invalidate this TB. */
9100 goto done_generating
;
9106 j
= gen_opc_ptr
- gen_opc_buf
;
9110 gen_opc_instr_start
[lj
++] = 0;
9112 gen_opc_pc
[lj
] = dc
->pc
;
9113 gen_opc_instr_start
[lj
] = 1;
9114 gen_opc_icount
[lj
] = num_insns
;
9117 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9121 disas_thumb_insn(env
, dc
);
9122 if (dc
->condexec_mask
) {
9123 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9124 | ((dc
->condexec_mask
>> 4) & 1);
9125 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9126 if (dc
->condexec_mask
== 0) {
9127 dc
->condexec_cond
= 0;
9131 disas_arm_insn(env
, dc
);
9134 fprintf(stderr
, "Internal resource leak before %08x\n", dc
->pc
);
9138 if (dc
->condjmp
&& !dc
->is_jmp
) {
9139 gen_set_label(dc
->condlabel
);
9142 /* Translation stops when a conditional branch is encountered.
9143 * Otherwise the subsequent code could get translated several times.
9144 * Also stop translation when a page boundary is reached. This
9145 * ensures prefetch aborts occur at the right place. */
9147 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9148 !env
->singlestep_enabled
&&
9150 dc
->pc
< next_page_start
&&
9151 num_insns
< max_insns
);
9153 if (tb
->cflags
& CF_LAST_IO
) {
9155 /* FIXME: This can theoretically happen with self-modifying
9157 cpu_abort(env
, "IO on conditional branch instruction");
9162 /* At this stage dc->condjmp will only be set when the skipped
9163 instruction was a conditional branch or trap, and the PC has
9164 already been written. */
9165 if (unlikely(env
->singlestep_enabled
)) {
9166 /* Make sure the pc is updated, and raise a debug exception. */
9168 gen_set_condexec(dc
);
9169 if (dc
->is_jmp
== DISAS_SWI
) {
9170 gen_exception(EXCP_SWI
);
9172 gen_exception(EXCP_DEBUG
);
9174 gen_set_label(dc
->condlabel
);
9176 if (dc
->condjmp
|| !dc
->is_jmp
) {
9177 gen_set_pc_im(dc
->pc
);
9180 gen_set_condexec(dc
);
9181 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9182 gen_exception(EXCP_SWI
);
9184 /* FIXME: Single stepping a WFI insn will not halt
9186 gen_exception(EXCP_DEBUG
);
9189 /* While branches must always occur at the end of an IT block,
9190 there are a few other things that can cause us to terminate
9191 the TB in the middel of an IT block:
9192 - Exception generating instructions (bkpt, swi, undefined).
9194 - Hardware watchpoints.
9195 Hardware breakpoints have already been handled and skip this code.
9197 gen_set_condexec(dc
);
9198 switch(dc
->is_jmp
) {
9200 gen_goto_tb(dc
, 1, dc
->pc
);
9205 /* indicate that the hash table must be used to find the next TB */
9209 /* nothing more to generate */
9215 gen_exception(EXCP_SWI
);
9219 gen_set_label(dc
->condlabel
);
9220 gen_set_condexec(dc
);
9221 gen_goto_tb(dc
, 1, dc
->pc
);
9227 gen_icount_end(tb
, num_insns
);
9228 *gen_opc_ptr
= INDEX_op_end
;
9231 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9232 qemu_log("----------------\n");
9233 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9234 log_target_disas(pc_start
, dc
->pc
- pc_start
, env
->thumb
);
9239 j
= gen_opc_ptr
- gen_opc_buf
;
9242 gen_opc_instr_start
[lj
++] = 0;
9244 tb
->size
= dc
->pc
- pc_start
;
9245 tb
->icount
= num_insns
;
9249 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
9251 gen_intermediate_code_internal(env
, tb
, 0);
9254 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
9256 gen_intermediate_code_internal(env
, tb
, 1);
9259 static const char *cpu_mode_names
[16] = {
9260 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9261 "???", "???", "???", "und", "???", "???", "???", "sys"
9264 void cpu_dump_state(CPUState
*env
, FILE *f
,
9265 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
9275 /* ??? This assumes float64 and double have the same layout.
9276 Oh well, it's only debug dumps. */
9285 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
9287 cpu_fprintf(f
, "\n");
9289 cpu_fprintf(f
, " ");
9291 psr
= cpsr_read(env
);
9292 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
9294 psr
& (1 << 31) ? 'N' : '-',
9295 psr
& (1 << 30) ? 'Z' : '-',
9296 psr
& (1 << 29) ? 'C' : '-',
9297 psr
& (1 << 28) ? 'V' : '-',
9298 psr
& CPSR_T
? 'T' : 'A',
9299 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
9302 for (i
= 0; i
< 16; i
++) {
9303 d
.d
= env
->vfp
.regs
[i
];
9307 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9308 i
* 2, (int)s0
.i
, s0
.s
,
9309 i
* 2 + 1, (int)s1
.i
, s1
.s
,
9310 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
9313 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
9317 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
9318 unsigned long searched_pc
, int pc_pos
, void *puc
)
9320 env
->regs
[15] = gen_opc_pc
[pc_pos
];