4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
45 /* internal defines */
46 typedef struct DisasContext
{
49 /* Nonzero if this instruction has been conditionally skipped. */
51 /* The label that will be jumped to when the instruction is skipped. */
53 /* Thumb-2 condtional execution bits. */
56 struct TranslationBlock
*tb
;
57 int singlestep_enabled
;
59 #if !defined(CONFIG_USER_ONLY)
64 #if defined(CONFIG_USER_ONLY)
67 #define IS_USER(s) (s->user)
70 /* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
75 static TCGv_ptr cpu_env
;
76 /* We reuse the same 64-bit temporaries for efficiency. */
77 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
78 static TCGv_i32 cpu_R
[16];
79 static TCGv_i32 cpu_exclusive_addr
;
80 static TCGv_i32 cpu_exclusive_val
;
81 static TCGv_i32 cpu_exclusive_high
;
82 #ifdef CONFIG_USER_ONLY
83 static TCGv_i32 cpu_exclusive_test
;
84 static TCGv_i32 cpu_exclusive_info
;
87 /* FIXME: These should be removed. */
88 static TCGv cpu_F0s
, cpu_F1s
;
89 static TCGv_i64 cpu_F0d
, cpu_F1d
;
91 #include "gen-icount.h"
93 static const char *regnames
[] =
94 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
97 /* initialize TCG globals. */
98 void arm_translate_init(void)
102 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
104 for (i
= 0; i
< 16; i
++) {
105 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
106 offsetof(CPUState
, regs
[i
]),
109 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
110 offsetof(CPUState
, exclusive_addr
), "exclusive_addr");
111 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
112 offsetof(CPUState
, exclusive_val
), "exclusive_val");
113 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
114 offsetof(CPUState
, exclusive_high
), "exclusive_high");
115 #ifdef CONFIG_USER_ONLY
116 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUState
, exclusive_test
), "exclusive_test");
118 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUState
, exclusive_info
), "exclusive_info");
126 static int num_temps
;
128 /* Allocate a temporary variable. */
129 static TCGv_i32
new_tmp(void)
132 return tcg_temp_new_i32();
135 /* Release a temporary variable. */
136 static void dead_tmp(TCGv tmp
)
142 static inline TCGv
load_cpu_offset(int offset
)
144 TCGv tmp
= new_tmp();
145 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
149 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
151 static inline void store_cpu_offset(TCGv var
, int offset
)
153 tcg_gen_st_i32(var
, cpu_env
, offset
);
157 #define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUState, name))
160 /* Set a variable to the value of a CPU register. */
161 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
165 /* normaly, since we updated PC, we need only to add one insn */
167 addr
= (long)s
->pc
+ 2;
169 addr
= (long)s
->pc
+ 4;
170 tcg_gen_movi_i32(var
, addr
);
172 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
176 /* Create a new temporary and set it to the value of a CPU register. */
177 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
179 TCGv tmp
= new_tmp();
180 load_reg_var(s
, tmp
, reg
);
184 /* Set a CPU register. The source must be a temporary and will be
186 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
189 tcg_gen_andi_i32(var
, var
, ~1);
190 s
->is_jmp
= DISAS_JUMP
;
192 tcg_gen_mov_i32(cpu_R
[reg
], var
);
196 /* Value extensions. */
197 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
199 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
202 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
206 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
208 TCGv tmp_mask
= tcg_const_i32(mask
);
209 gen_helper_cpsr_write(var
, tmp_mask
);
210 tcg_temp_free_i32(tmp_mask
);
212 /* Set NZCV flags from the high 4 bits of var. */
213 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
215 static void gen_exception(int excp
)
217 TCGv tmp
= new_tmp();
218 tcg_gen_movi_i32(tmp
, excp
);
219 gen_helper_exception(tmp
);
223 static void gen_smul_dual(TCGv a
, TCGv b
)
225 TCGv tmp1
= new_tmp();
226 TCGv tmp2
= new_tmp();
227 tcg_gen_ext16s_i32(tmp1
, a
);
228 tcg_gen_ext16s_i32(tmp2
, b
);
229 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
231 tcg_gen_sari_i32(a
, a
, 16);
232 tcg_gen_sari_i32(b
, b
, 16);
233 tcg_gen_mul_i32(b
, b
, a
);
234 tcg_gen_mov_i32(a
, tmp1
);
238 /* Byteswap each halfword. */
239 static void gen_rev16(TCGv var
)
241 TCGv tmp
= new_tmp();
242 tcg_gen_shri_i32(tmp
, var
, 8);
243 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
244 tcg_gen_shli_i32(var
, var
, 8);
245 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
246 tcg_gen_or_i32(var
, var
, tmp
);
250 /* Byteswap low halfword and sign extend. */
251 static void gen_revsh(TCGv var
)
253 TCGv tmp
= new_tmp();
254 tcg_gen_shri_i32(tmp
, var
, 8);
255 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff);
256 tcg_gen_shli_i32(var
, var
, 8);
257 tcg_gen_ext8s_i32(var
, var
);
258 tcg_gen_or_i32(var
, var
, tmp
);
262 /* Unsigned bitfield extract. */
263 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
266 tcg_gen_shri_i32(var
, var
, shift
);
267 tcg_gen_andi_i32(var
, var
, mask
);
270 /* Signed bitfield extract. */
271 static void gen_sbfx(TCGv var
, int shift
, int width
)
276 tcg_gen_sari_i32(var
, var
, shift
);
277 if (shift
+ width
< 32) {
278 signbit
= 1u << (width
- 1);
279 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
280 tcg_gen_xori_i32(var
, var
, signbit
);
281 tcg_gen_subi_i32(var
, var
, signbit
);
285 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
286 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
288 tcg_gen_andi_i32(val
, val
, mask
);
289 tcg_gen_shli_i32(val
, val
, shift
);
290 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
291 tcg_gen_or_i32(dest
, base
, val
);
294 /* Round the top 32 bits of a 64-bit value. */
295 static void gen_roundqd(TCGv a
, TCGv b
)
297 tcg_gen_shri_i32(a
, a
, 31);
298 tcg_gen_add_i32(a
, a
, b
);
301 /* FIXME: Most targets have native widening multiplication.
302 It would be good to use that instead of a full wide multiply. */
303 /* 32x32->64 multiply. Marks inputs as dead. */
304 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
306 TCGv_i64 tmp1
= tcg_temp_new_i64();
307 TCGv_i64 tmp2
= tcg_temp_new_i64();
309 tcg_gen_extu_i32_i64(tmp1
, a
);
311 tcg_gen_extu_i32_i64(tmp2
, b
);
313 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
314 tcg_temp_free_i64(tmp2
);
318 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
320 TCGv_i64 tmp1
= tcg_temp_new_i64();
321 TCGv_i64 tmp2
= tcg_temp_new_i64();
323 tcg_gen_ext_i32_i64(tmp1
, a
);
325 tcg_gen_ext_i32_i64(tmp2
, b
);
327 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
328 tcg_temp_free_i64(tmp2
);
332 /* Signed 32x32->64 multiply. */
333 static void gen_imull(TCGv a
, TCGv b
)
335 TCGv_i64 tmp1
= tcg_temp_new_i64();
336 TCGv_i64 tmp2
= tcg_temp_new_i64();
338 tcg_gen_ext_i32_i64(tmp1
, a
);
339 tcg_gen_ext_i32_i64(tmp2
, b
);
340 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
341 tcg_temp_free_i64(tmp2
);
342 tcg_gen_trunc_i64_i32(a
, tmp1
);
343 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
344 tcg_gen_trunc_i64_i32(b
, tmp1
);
345 tcg_temp_free_i64(tmp1
);
348 /* Swap low and high halfwords. */
349 static void gen_swap_half(TCGv var
)
351 TCGv tmp
= new_tmp();
352 tcg_gen_shri_i32(tmp
, var
, 16);
353 tcg_gen_shli_i32(var
, var
, 16);
354 tcg_gen_or_i32(var
, var
, tmp
);
358 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
359 tmp = (t0 ^ t1) & 0x8000;
362 t0 = (t0 + t1) ^ tmp;
365 static void gen_add16(TCGv t0
, TCGv t1
)
367 TCGv tmp
= new_tmp();
368 tcg_gen_xor_i32(tmp
, t0
, t1
);
369 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
370 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
371 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
372 tcg_gen_add_i32(t0
, t0
, t1
);
373 tcg_gen_xor_i32(t0
, t0
, tmp
);
378 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
380 /* Set CF to the top bit of var. */
381 static void gen_set_CF_bit31(TCGv var
)
383 TCGv tmp
= new_tmp();
384 tcg_gen_shri_i32(tmp
, var
, 31);
389 /* Set N and Z flags from var. */
390 static inline void gen_logic_CC(TCGv var
)
392 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
393 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
397 static void gen_adc(TCGv t0
, TCGv t1
)
400 tcg_gen_add_i32(t0
, t0
, t1
);
401 tmp
= load_cpu_field(CF
);
402 tcg_gen_add_i32(t0
, t0
, tmp
);
406 /* dest = T0 + T1 + CF. */
407 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
410 tcg_gen_add_i32(dest
, t0
, t1
);
411 tmp
= load_cpu_field(CF
);
412 tcg_gen_add_i32(dest
, dest
, tmp
);
416 /* dest = T0 - T1 + CF - 1. */
417 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
420 tcg_gen_sub_i32(dest
, t0
, t1
);
421 tmp
= load_cpu_field(CF
);
422 tcg_gen_add_i32(dest
, dest
, tmp
);
423 tcg_gen_subi_i32(dest
, dest
, 1);
427 /* FIXME: Implement this natively. */
428 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
430 static void shifter_out_im(TCGv var
, int shift
)
432 TCGv tmp
= new_tmp();
434 tcg_gen_andi_i32(tmp
, var
, 1);
436 tcg_gen_shri_i32(tmp
, var
, shift
);
438 tcg_gen_andi_i32(tmp
, tmp
, 1);
444 /* Shift by immediate. Includes special handling for shift == 0. */
445 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
451 shifter_out_im(var
, 32 - shift
);
452 tcg_gen_shli_i32(var
, var
, shift
);
458 tcg_gen_shri_i32(var
, var
, 31);
461 tcg_gen_movi_i32(var
, 0);
464 shifter_out_im(var
, shift
- 1);
465 tcg_gen_shri_i32(var
, var
, shift
);
472 shifter_out_im(var
, shift
- 1);
475 tcg_gen_sari_i32(var
, var
, shift
);
477 case 3: /* ROR/RRX */
480 shifter_out_im(var
, shift
- 1);
481 tcg_gen_rotri_i32(var
, var
, shift
); break;
483 TCGv tmp
= load_cpu_field(CF
);
485 shifter_out_im(var
, 0);
486 tcg_gen_shri_i32(var
, var
, 1);
487 tcg_gen_shli_i32(tmp
, tmp
, 31);
488 tcg_gen_or_i32(var
, var
, tmp
);
494 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
495 TCGv shift
, int flags
)
499 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
500 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
501 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
502 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
506 case 0: gen_helper_shl(var
, var
, shift
); break;
507 case 1: gen_helper_shr(var
, var
, shift
); break;
508 case 2: gen_helper_sar(var
, var
, shift
); break;
509 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
510 tcg_gen_rotr_i32(var
, var
, shift
); break;
516 #define PAS_OP(pfx) \
518 case 0: gen_pas_helper(glue(pfx,add16)); break; \
519 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
520 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
521 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
522 case 4: gen_pas_helper(glue(pfx,add8)); break; \
523 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
525 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
530 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
532 tmp
= tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
535 tcg_temp_free_ptr(tmp
);
538 tmp
= tcg_temp_new_ptr();
539 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
541 tcg_temp_free_ptr(tmp
);
543 #undef gen_pas_helper
544 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
557 #undef gen_pas_helper
562 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
563 #define PAS_OP(pfx) \
565 case 0: gen_pas_helper(glue(pfx,add8)); break; \
566 case 1: gen_pas_helper(glue(pfx,add16)); break; \
567 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
568 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
569 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
570 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
572 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
577 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
579 tmp
= tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
582 tcg_temp_free_ptr(tmp
);
585 tmp
= tcg_temp_new_ptr();
586 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
588 tcg_temp_free_ptr(tmp
);
590 #undef gen_pas_helper
591 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
604 #undef gen_pas_helper
609 static void gen_test_cc(int cc
, int label
)
617 tmp
= load_cpu_field(ZF
);
618 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
621 tmp
= load_cpu_field(ZF
);
622 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
625 tmp
= load_cpu_field(CF
);
626 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
629 tmp
= load_cpu_field(CF
);
630 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
633 tmp
= load_cpu_field(NF
);
634 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
637 tmp
= load_cpu_field(NF
);
638 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
641 tmp
= load_cpu_field(VF
);
642 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
645 tmp
= load_cpu_field(VF
);
646 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
648 case 8: /* hi: C && !Z */
649 inv
= gen_new_label();
650 tmp
= load_cpu_field(CF
);
651 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
653 tmp
= load_cpu_field(ZF
);
654 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
657 case 9: /* ls: !C || Z */
658 tmp
= load_cpu_field(CF
);
659 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
661 tmp
= load_cpu_field(ZF
);
662 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
664 case 10: /* ge: N == V -> N ^ V == 0 */
665 tmp
= load_cpu_field(VF
);
666 tmp2
= load_cpu_field(NF
);
667 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
669 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
671 case 11: /* lt: N != V -> N ^ V != 0 */
672 tmp
= load_cpu_field(VF
);
673 tmp2
= load_cpu_field(NF
);
674 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
676 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
678 case 12: /* gt: !Z && N == V */
679 inv
= gen_new_label();
680 tmp
= load_cpu_field(ZF
);
681 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
683 tmp
= load_cpu_field(VF
);
684 tmp2
= load_cpu_field(NF
);
685 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
687 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
690 case 13: /* le: Z || N != V */
691 tmp
= load_cpu_field(ZF
);
692 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
694 tmp
= load_cpu_field(VF
);
695 tmp2
= load_cpu_field(NF
);
696 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
698 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
701 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
707 static const uint8_t table_logic_cc
[16] = {
726 /* Set PC and Thumb state from an immediate address. */
727 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
731 s
->is_jmp
= DISAS_UPDATE
;
732 if (s
->thumb
!= (addr
& 1)) {
734 tcg_gen_movi_i32(tmp
, addr
& 1);
735 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
738 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
741 /* Set PC and Thumb state from var. var is marked as dead. */
742 static inline void gen_bx(DisasContext
*s
, TCGv var
)
744 s
->is_jmp
= DISAS_UPDATE
;
745 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
746 tcg_gen_andi_i32(var
, var
, 1);
747 store_cpu_field(var
, thumb
);
750 /* Variant of store_reg which uses branch&exchange logic when storing
751 to r15 in ARM architecture v7 and above. The source must be a temporary
752 and will be marked as dead. */
753 static inline void store_reg_bx(CPUState
*env
, DisasContext
*s
,
756 if (reg
== 15 && ENABLE_ARCH_7
) {
759 store_reg(s
, reg
, var
);
763 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
765 TCGv tmp
= new_tmp();
766 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
769 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
771 TCGv tmp
= new_tmp();
772 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
775 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
777 TCGv tmp
= new_tmp();
778 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
781 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
783 TCGv tmp
= new_tmp();
784 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
787 static inline TCGv
gen_ld32(TCGv addr
, int index
)
789 TCGv tmp
= new_tmp();
790 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
793 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
795 TCGv_i64 tmp
= tcg_temp_new_i64();
796 tcg_gen_qemu_ld64(tmp
, addr
, index
);
799 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
801 tcg_gen_qemu_st8(val
, addr
, index
);
804 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
806 tcg_gen_qemu_st16(val
, addr
, index
);
809 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
811 tcg_gen_qemu_st32(val
, addr
, index
);
814 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
816 tcg_gen_qemu_st64(val
, addr
, index
);
817 tcg_temp_free_i64(val
);
820 static inline void gen_set_pc_im(uint32_t val
)
822 tcg_gen_movi_i32(cpu_R
[15], val
);
825 /* Force a TB lookup after an instruction that changes the CPU state. */
826 static inline void gen_lookup_tb(DisasContext
*s
)
828 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
829 s
->is_jmp
= DISAS_UPDATE
;
832 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
835 int val
, rm
, shift
, shiftop
;
838 if (!(insn
& (1 << 25))) {
841 if (!(insn
& (1 << 23)))
844 tcg_gen_addi_i32(var
, var
, val
);
848 shift
= (insn
>> 7) & 0x1f;
849 shiftop
= (insn
>> 5) & 3;
850 offset
= load_reg(s
, rm
);
851 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
852 if (!(insn
& (1 << 23)))
853 tcg_gen_sub_i32(var
, var
, offset
);
855 tcg_gen_add_i32(var
, var
, offset
);
860 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
866 if (insn
& (1 << 22)) {
868 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
869 if (!(insn
& (1 << 23)))
873 tcg_gen_addi_i32(var
, var
, val
);
877 tcg_gen_addi_i32(var
, var
, extra
);
879 offset
= load_reg(s
, rm
);
880 if (!(insn
& (1 << 23)))
881 tcg_gen_sub_i32(var
, var
, offset
);
883 tcg_gen_add_i32(var
, var
, offset
);
888 #define VFP_OP2(name) \
889 static inline void gen_vfp_##name(int dp) \
892 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
894 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
904 static inline void gen_vfp_abs(int dp
)
907 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
909 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
912 static inline void gen_vfp_neg(int dp
)
915 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
917 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
920 static inline void gen_vfp_sqrt(int dp
)
923 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
925 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
928 static inline void gen_vfp_cmp(int dp
)
931 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
933 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
936 static inline void gen_vfp_cmpe(int dp
)
939 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
941 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
944 static inline void gen_vfp_F1_ld0(int dp
)
947 tcg_gen_movi_i64(cpu_F1d
, 0);
949 tcg_gen_movi_i32(cpu_F1s
, 0);
952 static inline void gen_vfp_uito(int dp
)
955 gen_helper_vfp_uitod(cpu_F0d
, cpu_F0s
, cpu_env
);
957 gen_helper_vfp_uitos(cpu_F0s
, cpu_F0s
, cpu_env
);
960 static inline void gen_vfp_sito(int dp
)
963 gen_helper_vfp_sitod(cpu_F0d
, cpu_F0s
, cpu_env
);
965 gen_helper_vfp_sitos(cpu_F0s
, cpu_F0s
, cpu_env
);
968 static inline void gen_vfp_toui(int dp
)
971 gen_helper_vfp_touid(cpu_F0s
, cpu_F0d
, cpu_env
);
973 gen_helper_vfp_touis(cpu_F0s
, cpu_F0s
, cpu_env
);
976 static inline void gen_vfp_touiz(int dp
)
979 gen_helper_vfp_touizd(cpu_F0s
, cpu_F0d
, cpu_env
);
981 gen_helper_vfp_touizs(cpu_F0s
, cpu_F0s
, cpu_env
);
984 static inline void gen_vfp_tosi(int dp
)
987 gen_helper_vfp_tosid(cpu_F0s
, cpu_F0d
, cpu_env
);
989 gen_helper_vfp_tosis(cpu_F0s
, cpu_F0s
, cpu_env
);
992 static inline void gen_vfp_tosiz(int dp
)
995 gen_helper_vfp_tosizd(cpu_F0s
, cpu_F0d
, cpu_env
);
997 gen_helper_vfp_tosizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1000 #define VFP_GEN_FIX(name) \
1001 static inline void gen_vfp_##name(int dp, int shift) \
1003 TCGv tmp_shift = tcg_const_i32(shift); \
1005 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1007 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1008 tcg_temp_free_i32(tmp_shift); \
1020 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1023 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1025 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1028 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1031 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1033 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1037 vfp_reg_offset (int dp
, int reg
)
1040 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1042 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1043 + offsetof(CPU_DoubleU
, l
.upper
);
1045 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1046 + offsetof(CPU_DoubleU
, l
.lower
);
1050 /* Return the offset of a 32-bit piece of a NEON register.
1051 zero is the least significant end of the register. */
1053 neon_reg_offset (int reg
, int n
)
1057 return vfp_reg_offset(0, sreg
);
1060 static TCGv
neon_load_reg(int reg
, int pass
)
1062 TCGv tmp
= new_tmp();
1063 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1067 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1069 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1073 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1075 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1078 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1080 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1083 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1084 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1085 #define tcg_gen_st_f32 tcg_gen_st_i32
1086 #define tcg_gen_st_f64 tcg_gen_st_i64
1088 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1091 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1093 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1096 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1099 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1101 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1104 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1107 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1109 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1112 #define ARM_CP_RW_BIT (1 << 20)
1114 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1116 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1119 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1121 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1124 static inline TCGv
iwmmxt_load_creg(int reg
)
1126 TCGv var
= new_tmp();
1127 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1131 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1133 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1136 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1138 iwmmxt_store_reg(cpu_M0
, rn
);
1141 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1143 iwmmxt_load_reg(cpu_M0
, rn
);
1146 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1148 iwmmxt_load_reg(cpu_V1
, rn
);
1149 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1152 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1154 iwmmxt_load_reg(cpu_V1
, rn
);
1155 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1158 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1160 iwmmxt_load_reg(cpu_V1
, rn
);
1161 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1164 #define IWMMXT_OP(name) \
1165 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1167 iwmmxt_load_reg(cpu_V1, rn); \
1168 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1171 #define IWMMXT_OP_ENV(name) \
1172 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1174 iwmmxt_load_reg(cpu_V1, rn); \
1175 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1178 #define IWMMXT_OP_ENV_SIZE(name) \
1179 IWMMXT_OP_ENV(name##b) \
1180 IWMMXT_OP_ENV(name##w) \
1181 IWMMXT_OP_ENV(name##l)
1183 #define IWMMXT_OP_ENV1(name) \
1184 static inline void gen_op_iwmmxt_##name##_M0(void) \
1186 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1200 IWMMXT_OP_ENV_SIZE(unpackl
)
1201 IWMMXT_OP_ENV_SIZE(unpackh
)
1203 IWMMXT_OP_ENV1(unpacklub
)
1204 IWMMXT_OP_ENV1(unpackluw
)
1205 IWMMXT_OP_ENV1(unpacklul
)
1206 IWMMXT_OP_ENV1(unpackhub
)
1207 IWMMXT_OP_ENV1(unpackhuw
)
1208 IWMMXT_OP_ENV1(unpackhul
)
1209 IWMMXT_OP_ENV1(unpacklsb
)
1210 IWMMXT_OP_ENV1(unpacklsw
)
1211 IWMMXT_OP_ENV1(unpacklsl
)
1212 IWMMXT_OP_ENV1(unpackhsb
)
1213 IWMMXT_OP_ENV1(unpackhsw
)
1214 IWMMXT_OP_ENV1(unpackhsl
)
1216 IWMMXT_OP_ENV_SIZE(cmpeq
)
1217 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1218 IWMMXT_OP_ENV_SIZE(cmpgts
)
1220 IWMMXT_OP_ENV_SIZE(mins
)
1221 IWMMXT_OP_ENV_SIZE(minu
)
1222 IWMMXT_OP_ENV_SIZE(maxs
)
1223 IWMMXT_OP_ENV_SIZE(maxu
)
1225 IWMMXT_OP_ENV_SIZE(subn
)
1226 IWMMXT_OP_ENV_SIZE(addn
)
1227 IWMMXT_OP_ENV_SIZE(subu
)
1228 IWMMXT_OP_ENV_SIZE(addu
)
1229 IWMMXT_OP_ENV_SIZE(subs
)
1230 IWMMXT_OP_ENV_SIZE(adds
)
1232 IWMMXT_OP_ENV(avgb0
)
1233 IWMMXT_OP_ENV(avgb1
)
1234 IWMMXT_OP_ENV(avgw0
)
1235 IWMMXT_OP_ENV(avgw1
)
1239 IWMMXT_OP_ENV(packuw
)
1240 IWMMXT_OP_ENV(packul
)
1241 IWMMXT_OP_ENV(packuq
)
1242 IWMMXT_OP_ENV(packsw
)
1243 IWMMXT_OP_ENV(packsl
)
1244 IWMMXT_OP_ENV(packsq
)
1246 static void gen_op_iwmmxt_set_mup(void)
1249 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1250 tcg_gen_ori_i32(tmp
, tmp
, 2);
1251 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1254 static void gen_op_iwmmxt_set_cup(void)
1257 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1258 tcg_gen_ori_i32(tmp
, tmp
, 1);
1259 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1262 static void gen_op_iwmmxt_setpsr_nz(void)
1264 TCGv tmp
= new_tmp();
1265 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1266 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1269 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1271 iwmmxt_load_reg(cpu_V1
, rn
);
1272 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1273 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1276 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1282 rd
= (insn
>> 16) & 0xf;
1283 tmp
= load_reg(s
, rd
);
1285 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1286 if (insn
& (1 << 24)) {
1288 if (insn
& (1 << 23))
1289 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1291 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1292 tcg_gen_mov_i32(dest
, tmp
);
1293 if (insn
& (1 << 21))
1294 store_reg(s
, rd
, tmp
);
1297 } else if (insn
& (1 << 21)) {
1299 tcg_gen_mov_i32(dest
, tmp
);
1300 if (insn
& (1 << 23))
1301 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1303 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1304 store_reg(s
, rd
, tmp
);
1305 } else if (!(insn
& (1 << 23)))
1310 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1312 int rd
= (insn
>> 0) & 0xf;
1315 if (insn
& (1 << 8)) {
1316 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1319 tmp
= iwmmxt_load_creg(rd
);
1323 iwmmxt_load_reg(cpu_V0
, rd
);
1324 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1326 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1327 tcg_gen_mov_i32(dest
, tmp
);
1332 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1333 (ie. an undefined instruction). */
1334 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1337 int rdhi
, rdlo
, rd0
, rd1
, i
;
1339 TCGv tmp
, tmp2
, tmp3
;
1341 if ((insn
& 0x0e000e00) == 0x0c000000) {
1342 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1344 rdlo
= (insn
>> 12) & 0xf;
1345 rdhi
= (insn
>> 16) & 0xf;
1346 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1347 iwmmxt_load_reg(cpu_V0
, wrd
);
1348 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1349 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1350 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1351 } else { /* TMCRR */
1352 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1353 iwmmxt_store_reg(cpu_V0
, wrd
);
1354 gen_op_iwmmxt_set_mup();
1359 wrd
= (insn
>> 12) & 0xf;
1361 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1365 if (insn
& ARM_CP_RW_BIT
) {
1366 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1368 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1369 iwmmxt_store_creg(wrd
, tmp
);
1372 if (insn
& (1 << 8)) {
1373 if (insn
& (1 << 22)) { /* WLDRD */
1374 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1376 } else { /* WLDRW wRd */
1377 tmp
= gen_ld32(addr
, IS_USER(s
));
1380 if (insn
& (1 << 22)) { /* WLDRH */
1381 tmp
= gen_ld16u(addr
, IS_USER(s
));
1382 } else { /* WLDRB */
1383 tmp
= gen_ld8u(addr
, IS_USER(s
));
1387 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1390 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1393 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1394 tmp
= iwmmxt_load_creg(wrd
);
1395 gen_st32(tmp
, addr
, IS_USER(s
));
1397 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1399 if (insn
& (1 << 8)) {
1400 if (insn
& (1 << 22)) { /* WSTRD */
1402 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1403 } else { /* WSTRW wRd */
1404 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1405 gen_st32(tmp
, addr
, IS_USER(s
));
1408 if (insn
& (1 << 22)) { /* WSTRH */
1409 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1410 gen_st16(tmp
, addr
, IS_USER(s
));
1411 } else { /* WSTRB */
1412 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1413 gen_st8(tmp
, addr
, IS_USER(s
));
1421 if ((insn
& 0x0f000000) != 0x0e000000)
1424 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1425 case 0x000: /* WOR */
1426 wrd
= (insn
>> 12) & 0xf;
1427 rd0
= (insn
>> 0) & 0xf;
1428 rd1
= (insn
>> 16) & 0xf;
1429 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1430 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1431 gen_op_iwmmxt_setpsr_nz();
1432 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1433 gen_op_iwmmxt_set_mup();
1434 gen_op_iwmmxt_set_cup();
1436 case 0x011: /* TMCR */
1439 rd
= (insn
>> 12) & 0xf;
1440 wrd
= (insn
>> 16) & 0xf;
1442 case ARM_IWMMXT_wCID
:
1443 case ARM_IWMMXT_wCASF
:
1445 case ARM_IWMMXT_wCon
:
1446 gen_op_iwmmxt_set_cup();
1448 case ARM_IWMMXT_wCSSF
:
1449 tmp
= iwmmxt_load_creg(wrd
);
1450 tmp2
= load_reg(s
, rd
);
1451 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1453 iwmmxt_store_creg(wrd
, tmp
);
1455 case ARM_IWMMXT_wCGR0
:
1456 case ARM_IWMMXT_wCGR1
:
1457 case ARM_IWMMXT_wCGR2
:
1458 case ARM_IWMMXT_wCGR3
:
1459 gen_op_iwmmxt_set_cup();
1460 tmp
= load_reg(s
, rd
);
1461 iwmmxt_store_creg(wrd
, tmp
);
1467 case 0x100: /* WXOR */
1468 wrd
= (insn
>> 12) & 0xf;
1469 rd0
= (insn
>> 0) & 0xf;
1470 rd1
= (insn
>> 16) & 0xf;
1471 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1472 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1473 gen_op_iwmmxt_setpsr_nz();
1474 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1475 gen_op_iwmmxt_set_mup();
1476 gen_op_iwmmxt_set_cup();
1478 case 0x111: /* TMRC */
1481 rd
= (insn
>> 12) & 0xf;
1482 wrd
= (insn
>> 16) & 0xf;
1483 tmp
= iwmmxt_load_creg(wrd
);
1484 store_reg(s
, rd
, tmp
);
1486 case 0x300: /* WANDN */
1487 wrd
= (insn
>> 12) & 0xf;
1488 rd0
= (insn
>> 0) & 0xf;
1489 rd1
= (insn
>> 16) & 0xf;
1490 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1491 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1492 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1493 gen_op_iwmmxt_setpsr_nz();
1494 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1495 gen_op_iwmmxt_set_mup();
1496 gen_op_iwmmxt_set_cup();
1498 case 0x200: /* WAND */
1499 wrd
= (insn
>> 12) & 0xf;
1500 rd0
= (insn
>> 0) & 0xf;
1501 rd1
= (insn
>> 16) & 0xf;
1502 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1503 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1504 gen_op_iwmmxt_setpsr_nz();
1505 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1506 gen_op_iwmmxt_set_mup();
1507 gen_op_iwmmxt_set_cup();
1509 case 0x810: case 0xa10: /* WMADD */
1510 wrd
= (insn
>> 12) & 0xf;
1511 rd0
= (insn
>> 0) & 0xf;
1512 rd1
= (insn
>> 16) & 0xf;
1513 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1514 if (insn
& (1 << 21))
1515 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1517 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1518 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1519 gen_op_iwmmxt_set_mup();
1521 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1522 wrd
= (insn
>> 12) & 0xf;
1523 rd0
= (insn
>> 16) & 0xf;
1524 rd1
= (insn
>> 0) & 0xf;
1525 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1526 switch ((insn
>> 22) & 3) {
1528 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1531 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1534 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1539 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1540 gen_op_iwmmxt_set_mup();
1541 gen_op_iwmmxt_set_cup();
1543 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1544 wrd
= (insn
>> 12) & 0xf;
1545 rd0
= (insn
>> 16) & 0xf;
1546 rd1
= (insn
>> 0) & 0xf;
1547 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1548 switch ((insn
>> 22) & 3) {
1550 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1553 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1556 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1561 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1562 gen_op_iwmmxt_set_mup();
1563 gen_op_iwmmxt_set_cup();
1565 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1566 wrd
= (insn
>> 12) & 0xf;
1567 rd0
= (insn
>> 16) & 0xf;
1568 rd1
= (insn
>> 0) & 0xf;
1569 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1570 if (insn
& (1 << 22))
1571 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1573 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1574 if (!(insn
& (1 << 20)))
1575 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1576 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1577 gen_op_iwmmxt_set_mup();
1579 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1580 wrd
= (insn
>> 12) & 0xf;
1581 rd0
= (insn
>> 16) & 0xf;
1582 rd1
= (insn
>> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1584 if (insn
& (1 << 21)) {
1585 if (insn
& (1 << 20))
1586 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1588 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1590 if (insn
& (1 << 20))
1591 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1593 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1595 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1596 gen_op_iwmmxt_set_mup();
1598 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1599 wrd
= (insn
>> 12) & 0xf;
1600 rd0
= (insn
>> 16) & 0xf;
1601 rd1
= (insn
>> 0) & 0xf;
1602 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1603 if (insn
& (1 << 21))
1604 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1606 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1607 if (!(insn
& (1 << 20))) {
1608 iwmmxt_load_reg(cpu_V1
, wrd
);
1609 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1611 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1612 gen_op_iwmmxt_set_mup();
1614 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1615 wrd
= (insn
>> 12) & 0xf;
1616 rd0
= (insn
>> 16) & 0xf;
1617 rd1
= (insn
>> 0) & 0xf;
1618 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1619 switch ((insn
>> 22) & 3) {
1621 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1624 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1627 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1632 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1633 gen_op_iwmmxt_set_mup();
1634 gen_op_iwmmxt_set_cup();
1636 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1637 wrd
= (insn
>> 12) & 0xf;
1638 rd0
= (insn
>> 16) & 0xf;
1639 rd1
= (insn
>> 0) & 0xf;
1640 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1641 if (insn
& (1 << 22)) {
1642 if (insn
& (1 << 20))
1643 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1645 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1647 if (insn
& (1 << 20))
1648 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1650 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1652 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1656 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1657 wrd
= (insn
>> 12) & 0xf;
1658 rd0
= (insn
>> 16) & 0xf;
1659 rd1
= (insn
>> 0) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1661 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1662 tcg_gen_andi_i32(tmp
, tmp
, 7);
1663 iwmmxt_load_reg(cpu_V1
, rd1
);
1664 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1666 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1667 gen_op_iwmmxt_set_mup();
1669 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1670 if (((insn
>> 6) & 3) == 3)
1672 rd
= (insn
>> 12) & 0xf;
1673 wrd
= (insn
>> 16) & 0xf;
1674 tmp
= load_reg(s
, rd
);
1675 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1676 switch ((insn
>> 6) & 3) {
1678 tmp2
= tcg_const_i32(0xff);
1679 tmp3
= tcg_const_i32((insn
& 7) << 3);
1682 tmp2
= tcg_const_i32(0xffff);
1683 tmp3
= tcg_const_i32((insn
& 3) << 4);
1686 tmp2
= tcg_const_i32(0xffffffff);
1687 tmp3
= tcg_const_i32((insn
& 1) << 5);
1693 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1694 tcg_temp_free(tmp3
);
1695 tcg_temp_free(tmp2
);
1697 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1698 gen_op_iwmmxt_set_mup();
1700 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1701 rd
= (insn
>> 12) & 0xf;
1702 wrd
= (insn
>> 16) & 0xf;
1703 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1705 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1707 switch ((insn
>> 22) & 3) {
1709 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1710 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1712 tcg_gen_ext8s_i32(tmp
, tmp
);
1714 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1718 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1719 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1721 tcg_gen_ext16s_i32(tmp
, tmp
);
1723 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1727 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1728 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1731 store_reg(s
, rd
, tmp
);
1733 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1734 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1736 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1737 switch ((insn
>> 22) & 3) {
1739 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1742 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1745 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1748 tcg_gen_shli_i32(tmp
, tmp
, 28);
1752 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1753 if (((insn
>> 6) & 3) == 3)
1755 rd
= (insn
>> 12) & 0xf;
1756 wrd
= (insn
>> 16) & 0xf;
1757 tmp
= load_reg(s
, rd
);
1758 switch ((insn
>> 6) & 3) {
1760 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1763 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1766 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1770 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1771 gen_op_iwmmxt_set_mup();
1773 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1774 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1776 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1778 tcg_gen_mov_i32(tmp2
, tmp
);
1779 switch ((insn
>> 22) & 3) {
1781 for (i
= 0; i
< 7; i
++) {
1782 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1783 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1787 for (i
= 0; i
< 3; i
++) {
1788 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1789 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1793 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1794 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1801 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1802 wrd
= (insn
>> 12) & 0xf;
1803 rd0
= (insn
>> 16) & 0xf;
1804 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1805 switch ((insn
>> 22) & 3) {
1807 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1810 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1813 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1818 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1819 gen_op_iwmmxt_set_mup();
1821 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1822 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1824 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1826 tcg_gen_mov_i32(tmp2
, tmp
);
1827 switch ((insn
>> 22) & 3) {
1829 for (i
= 0; i
< 7; i
++) {
1830 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1831 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1835 for (i
= 0; i
< 3; i
++) {
1836 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1837 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1841 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1842 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1849 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1850 rd
= (insn
>> 12) & 0xf;
1851 rd0
= (insn
>> 16) & 0xf;
1852 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1854 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1856 switch ((insn
>> 22) & 3) {
1858 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1861 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1864 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1867 store_reg(s
, rd
, tmp
);
1869 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1870 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1871 wrd
= (insn
>> 12) & 0xf;
1872 rd0
= (insn
>> 16) & 0xf;
1873 rd1
= (insn
>> 0) & 0xf;
1874 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1875 switch ((insn
>> 22) & 3) {
1877 if (insn
& (1 << 21))
1878 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1880 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1883 if (insn
& (1 << 21))
1884 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1886 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1889 if (insn
& (1 << 21))
1890 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1892 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1897 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1898 gen_op_iwmmxt_set_mup();
1899 gen_op_iwmmxt_set_cup();
1901 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1902 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1903 wrd
= (insn
>> 12) & 0xf;
1904 rd0
= (insn
>> 16) & 0xf;
1905 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1906 switch ((insn
>> 22) & 3) {
1908 if (insn
& (1 << 21))
1909 gen_op_iwmmxt_unpacklsb_M0();
1911 gen_op_iwmmxt_unpacklub_M0();
1914 if (insn
& (1 << 21))
1915 gen_op_iwmmxt_unpacklsw_M0();
1917 gen_op_iwmmxt_unpackluw_M0();
1920 if (insn
& (1 << 21))
1921 gen_op_iwmmxt_unpacklsl_M0();
1923 gen_op_iwmmxt_unpacklul_M0();
1928 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1929 gen_op_iwmmxt_set_mup();
1930 gen_op_iwmmxt_set_cup();
1932 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1933 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1934 wrd
= (insn
>> 12) & 0xf;
1935 rd0
= (insn
>> 16) & 0xf;
1936 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1937 switch ((insn
>> 22) & 3) {
1939 if (insn
& (1 << 21))
1940 gen_op_iwmmxt_unpackhsb_M0();
1942 gen_op_iwmmxt_unpackhub_M0();
1945 if (insn
& (1 << 21))
1946 gen_op_iwmmxt_unpackhsw_M0();
1948 gen_op_iwmmxt_unpackhuw_M0();
1951 if (insn
& (1 << 21))
1952 gen_op_iwmmxt_unpackhsl_M0();
1954 gen_op_iwmmxt_unpackhul_M0();
1959 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1960 gen_op_iwmmxt_set_mup();
1961 gen_op_iwmmxt_set_cup();
1963 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1964 case 0x214: case 0x614: case 0xa14: case 0xe14:
1965 if (((insn
>> 22) & 3) == 0)
1967 wrd
= (insn
>> 12) & 0xf;
1968 rd0
= (insn
>> 16) & 0xf;
1969 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1971 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
1975 switch ((insn
>> 22) & 3) {
1977 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1980 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1983 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1987 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1988 gen_op_iwmmxt_set_mup();
1989 gen_op_iwmmxt_set_cup();
1991 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1992 case 0x014: case 0x414: case 0x814: case 0xc14:
1993 if (((insn
>> 22) & 3) == 0)
1995 wrd
= (insn
>> 12) & 0xf;
1996 rd0
= (insn
>> 16) & 0xf;
1997 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1999 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2003 switch ((insn
>> 22) & 3) {
2005 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2008 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2011 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2015 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2016 gen_op_iwmmxt_set_mup();
2017 gen_op_iwmmxt_set_cup();
2019 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2020 case 0x114: case 0x514: case 0x914: case 0xd14:
2021 if (((insn
>> 22) & 3) == 0)
2023 wrd
= (insn
>> 12) & 0xf;
2024 rd0
= (insn
>> 16) & 0xf;
2025 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2027 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2031 switch ((insn
>> 22) & 3) {
2033 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2036 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2039 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2043 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2044 gen_op_iwmmxt_set_mup();
2045 gen_op_iwmmxt_set_cup();
2047 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2048 case 0x314: case 0x714: case 0xb14: case 0xf14:
2049 if (((insn
>> 22) & 3) == 0)
2051 wrd
= (insn
>> 12) & 0xf;
2052 rd0
= (insn
>> 16) & 0xf;
2053 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2055 switch ((insn
>> 22) & 3) {
2057 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2061 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2064 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2068 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2071 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2075 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2079 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2083 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2084 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2085 wrd
= (insn
>> 12) & 0xf;
2086 rd0
= (insn
>> 16) & 0xf;
2087 rd1
= (insn
>> 0) & 0xf;
2088 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2089 switch ((insn
>> 22) & 3) {
2091 if (insn
& (1 << 21))
2092 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2094 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2097 if (insn
& (1 << 21))
2098 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2100 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2103 if (insn
& (1 << 21))
2104 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2106 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2111 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2112 gen_op_iwmmxt_set_mup();
2114 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2115 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2116 wrd
= (insn
>> 12) & 0xf;
2117 rd0
= (insn
>> 16) & 0xf;
2118 rd1
= (insn
>> 0) & 0xf;
2119 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2120 switch ((insn
>> 22) & 3) {
2122 if (insn
& (1 << 21))
2123 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2125 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2128 if (insn
& (1 << 21))
2129 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2131 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2134 if (insn
& (1 << 21))
2135 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2137 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2142 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2143 gen_op_iwmmxt_set_mup();
2145 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2146 case 0x402: case 0x502: case 0x602: case 0x702:
2147 wrd
= (insn
>> 12) & 0xf;
2148 rd0
= (insn
>> 16) & 0xf;
2149 rd1
= (insn
>> 0) & 0xf;
2150 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2151 tmp
= tcg_const_i32((insn
>> 20) & 3);
2152 iwmmxt_load_reg(cpu_V1
, rd1
);
2153 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2155 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2156 gen_op_iwmmxt_set_mup();
2158 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2159 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2160 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2161 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2162 wrd
= (insn
>> 12) & 0xf;
2163 rd0
= (insn
>> 16) & 0xf;
2164 rd1
= (insn
>> 0) & 0xf;
2165 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2166 switch ((insn
>> 20) & 0xf) {
2168 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2171 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2174 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2177 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2180 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2183 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2186 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2189 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2192 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2197 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2198 gen_op_iwmmxt_set_mup();
2199 gen_op_iwmmxt_set_cup();
2201 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2202 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2203 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2204 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2205 wrd
= (insn
>> 12) & 0xf;
2206 rd0
= (insn
>> 16) & 0xf;
2207 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2208 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2209 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2211 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2212 gen_op_iwmmxt_set_mup();
2213 gen_op_iwmmxt_set_cup();
2215 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2216 case 0x418: case 0x518: case 0x618: case 0x718:
2217 case 0x818: case 0x918: case 0xa18: case 0xb18:
2218 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2219 wrd
= (insn
>> 12) & 0xf;
2220 rd0
= (insn
>> 16) & 0xf;
2221 rd1
= (insn
>> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2223 switch ((insn
>> 20) & 0xf) {
2225 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2228 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2231 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2234 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2237 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2240 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2243 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2246 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2249 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2254 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2255 gen_op_iwmmxt_set_mup();
2256 gen_op_iwmmxt_set_cup();
2258 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2259 case 0x408: case 0x508: case 0x608: case 0x708:
2260 case 0x808: case 0x908: case 0xa08: case 0xb08:
2261 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2262 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2264 wrd
= (insn
>> 12) & 0xf;
2265 rd0
= (insn
>> 16) & 0xf;
2266 rd1
= (insn
>> 0) & 0xf;
2267 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2268 switch ((insn
>> 22) & 3) {
2270 if (insn
& (1 << 21))
2271 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2273 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2276 if (insn
& (1 << 21))
2277 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2279 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2282 if (insn
& (1 << 21))
2283 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2285 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2288 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2289 gen_op_iwmmxt_set_mup();
2290 gen_op_iwmmxt_set_cup();
2292 case 0x201: case 0x203: case 0x205: case 0x207:
2293 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2294 case 0x211: case 0x213: case 0x215: case 0x217:
2295 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2296 wrd
= (insn
>> 5) & 0xf;
2297 rd0
= (insn
>> 12) & 0xf;
2298 rd1
= (insn
>> 0) & 0xf;
2299 if (rd0
== 0xf || rd1
== 0xf)
2301 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2302 tmp
= load_reg(s
, rd0
);
2303 tmp2
= load_reg(s
, rd1
);
2304 switch ((insn
>> 16) & 0xf) {
2305 case 0x0: /* TMIA */
2306 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2308 case 0x8: /* TMIAPH */
2309 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2311 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2312 if (insn
& (1 << 16))
2313 tcg_gen_shri_i32(tmp
, tmp
, 16);
2314 if (insn
& (1 << 17))
2315 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2316 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2325 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2326 gen_op_iwmmxt_set_mup();
2335 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2336 (ie. an undefined instruction). */
2337 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2339 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2342 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2343 /* Multiply with Internal Accumulate Format */
2344 rd0
= (insn
>> 12) & 0xf;
2346 acc
= (insn
>> 5) & 7;
2351 tmp
= load_reg(s
, rd0
);
2352 tmp2
= load_reg(s
, rd1
);
2353 switch ((insn
>> 16) & 0xf) {
2355 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2357 case 0x8: /* MIAPH */
2358 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2360 case 0xc: /* MIABB */
2361 case 0xd: /* MIABT */
2362 case 0xe: /* MIATB */
2363 case 0xf: /* MIATT */
2364 if (insn
& (1 << 16))
2365 tcg_gen_shri_i32(tmp
, tmp
, 16);
2366 if (insn
& (1 << 17))
2367 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2368 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2376 gen_op_iwmmxt_movq_wRn_M0(acc
);
2380 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2381 /* Internal Accumulator Access Format */
2382 rdhi
= (insn
>> 16) & 0xf;
2383 rdlo
= (insn
>> 12) & 0xf;
2389 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2390 iwmmxt_load_reg(cpu_V0
, acc
);
2391 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2392 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2393 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2394 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2396 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2397 iwmmxt_store_reg(cpu_V0
, acc
);
2405 /* Disassemble system coprocessor instruction. Return nonzero if
2406 instruction is not defined. */
2407 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2410 uint32_t rd
= (insn
>> 12) & 0xf;
2411 uint32_t cp
= (insn
>> 8) & 0xf;
2416 if (insn
& ARM_CP_RW_BIT
) {
2417 if (!env
->cp
[cp
].cp_read
)
2419 gen_set_pc_im(s
->pc
);
2421 tmp2
= tcg_const_i32(insn
);
2422 gen_helper_get_cp(tmp
, cpu_env
, tmp2
);
2423 tcg_temp_free(tmp2
);
2424 store_reg(s
, rd
, tmp
);
2426 if (!env
->cp
[cp
].cp_write
)
2428 gen_set_pc_im(s
->pc
);
2429 tmp
= load_reg(s
, rd
);
2430 tmp2
= tcg_const_i32(insn
);
2431 gen_helper_set_cp(cpu_env
, tmp2
, tmp
);
2432 tcg_temp_free(tmp2
);
2438 static int cp15_user_ok(uint32_t insn
)
2440 int cpn
= (insn
>> 16) & 0xf;
2441 int cpm
= insn
& 0xf;
2442 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2444 if (cpn
== 13 && cpm
== 0) {
2446 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2450 /* ISB, DSB, DMB. */
2451 if ((cpm
== 5 && op
== 4)
2452 || (cpm
== 10 && (op
== 4 || op
== 5)))
2458 static int cp15_tls_load_store(CPUState
*env
, DisasContext
*s
, uint32_t insn
, uint32_t rd
)
2461 int cpn
= (insn
>> 16) & 0xf;
2462 int cpm
= insn
& 0xf;
2463 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2465 if (!arm_feature(env
, ARM_FEATURE_V6K
))
2468 if (!(cpn
== 13 && cpm
== 0))
2471 if (insn
& ARM_CP_RW_BIT
) {
2474 tmp
= load_cpu_field(cp15
.c13_tls1
);
2477 tmp
= load_cpu_field(cp15
.c13_tls2
);
2480 tmp
= load_cpu_field(cp15
.c13_tls3
);
2485 store_reg(s
, rd
, tmp
);
2488 tmp
= load_reg(s
, rd
);
2491 store_cpu_field(tmp
, cp15
.c13_tls1
);
2494 store_cpu_field(tmp
, cp15
.c13_tls2
);
2497 store_cpu_field(tmp
, cp15
.c13_tls3
);
2507 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2508 instruction is not defined. */
2509 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2514 /* M profile cores use memory mapped registers instead of cp15. */
2515 if (arm_feature(env
, ARM_FEATURE_M
))
2518 if ((insn
& (1 << 25)) == 0) {
2519 if (insn
& (1 << 20)) {
2523 /* mcrr. Used for block cache operations, so implement as no-op. */
2526 if ((insn
& (1 << 4)) == 0) {
2530 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
2533 if ((insn
& 0x0fff0fff) == 0x0e070f90
2534 || (insn
& 0x0fff0fff) == 0x0e070f58) {
2535 /* Wait for interrupt. */
2536 gen_set_pc_im(s
->pc
);
2537 s
->is_jmp
= DISAS_WFI
;
2540 rd
= (insn
>> 12) & 0xf;
2542 if (cp15_tls_load_store(env
, s
, insn
, rd
))
2545 tmp2
= tcg_const_i32(insn
);
2546 if (insn
& ARM_CP_RW_BIT
) {
2548 gen_helper_get_cp15(tmp
, cpu_env
, tmp2
);
2549 /* If the destination register is r15 then sets condition codes. */
2551 store_reg(s
, rd
, tmp
);
2555 tmp
= load_reg(s
, rd
);
2556 gen_helper_set_cp15(cpu_env
, tmp2
, tmp
);
2558 /* Normally we would always end the TB here, but Linux
2559 * arch/arm/mach-pxa/sleep.S expects two instructions following
2560 * an MMU enable to execute from cache. Imitate this behaviour. */
2561 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2562 (insn
& 0x0fff0fff) != 0x0e010f10)
2565 tcg_temp_free_i32(tmp2
);
2569 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2570 #define VFP_SREG(insn, bigbit, smallbit) \
2571 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2572 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2573 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2574 reg = (((insn) >> (bigbit)) & 0x0f) \
2575 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2577 if (insn & (1 << (smallbit))) \
2579 reg = ((insn) >> (bigbit)) & 0x0f; \
2582 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2583 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2584 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2585 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2586 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2587 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2589 /* Move between integer and VFP cores. */
2590 static TCGv
gen_vfp_mrs(void)
2592 TCGv tmp
= new_tmp();
2593 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2597 static void gen_vfp_msr(TCGv tmp
)
2599 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2604 vfp_enabled(CPUState
* env
)
2606 return ((env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) != 0);
2609 static void gen_neon_dup_u8(TCGv var
, int shift
)
2611 TCGv tmp
= new_tmp();
2613 tcg_gen_shri_i32(var
, var
, shift
);
2614 tcg_gen_ext8u_i32(var
, var
);
2615 tcg_gen_shli_i32(tmp
, var
, 8);
2616 tcg_gen_or_i32(var
, var
, tmp
);
2617 tcg_gen_shli_i32(tmp
, var
, 16);
2618 tcg_gen_or_i32(var
, var
, tmp
);
2622 static void gen_neon_dup_low16(TCGv var
)
2624 TCGv tmp
= new_tmp();
2625 tcg_gen_ext16u_i32(var
, var
);
2626 tcg_gen_shli_i32(tmp
, var
, 16);
2627 tcg_gen_or_i32(var
, var
, tmp
);
2631 static void gen_neon_dup_high16(TCGv var
)
2633 TCGv tmp
= new_tmp();
2634 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2635 tcg_gen_shri_i32(tmp
, var
, 16);
2636 tcg_gen_or_i32(var
, var
, tmp
);
2640 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2641 (ie. an undefined instruction). */
2642 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2644 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2650 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2653 if (!vfp_enabled(env
)) {
2654 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2655 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2657 rn
= (insn
>> 16) & 0xf;
2658 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2659 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2662 dp
= ((insn
& 0xf00) == 0xb00);
2663 switch ((insn
>> 24) & 0xf) {
2665 if (insn
& (1 << 4)) {
2666 /* single register transfer */
2667 rd
= (insn
>> 12) & 0xf;
2672 VFP_DREG_N(rn
, insn
);
2675 if (insn
& 0x00c00060
2676 && !arm_feature(env
, ARM_FEATURE_NEON
))
2679 pass
= (insn
>> 21) & 1;
2680 if (insn
& (1 << 22)) {
2682 offset
= ((insn
>> 5) & 3) * 8;
2683 } else if (insn
& (1 << 5)) {
2685 offset
= (insn
& (1 << 6)) ? 16 : 0;
2690 if (insn
& ARM_CP_RW_BIT
) {
2692 tmp
= neon_load_reg(rn
, pass
);
2696 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2697 if (insn
& (1 << 23))
2703 if (insn
& (1 << 23)) {
2705 tcg_gen_shri_i32(tmp
, tmp
, 16);
2711 tcg_gen_sari_i32(tmp
, tmp
, 16);
2720 store_reg(s
, rd
, tmp
);
2723 tmp
= load_reg(s
, rd
);
2724 if (insn
& (1 << 23)) {
2727 gen_neon_dup_u8(tmp
, 0);
2728 } else if (size
== 1) {
2729 gen_neon_dup_low16(tmp
);
2731 for (n
= 0; n
<= pass
* 2; n
++) {
2733 tcg_gen_mov_i32(tmp2
, tmp
);
2734 neon_store_reg(rn
, n
, tmp2
);
2736 neon_store_reg(rn
, n
, tmp
);
2741 tmp2
= neon_load_reg(rn
, pass
);
2742 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2746 tmp2
= neon_load_reg(rn
, pass
);
2747 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2753 neon_store_reg(rn
, pass
, tmp
);
2757 if ((insn
& 0x6f) != 0x00)
2759 rn
= VFP_SREG_N(insn
);
2760 if (insn
& ARM_CP_RW_BIT
) {
2762 if (insn
& (1 << 21)) {
2763 /* system register */
2768 /* VFP2 allows access to FSID from userspace.
2769 VFP3 restricts all id registers to privileged
2772 && arm_feature(env
, ARM_FEATURE_VFP3
))
2774 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2779 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2781 case ARM_VFP_FPINST
:
2782 case ARM_VFP_FPINST2
:
2783 /* Not present in VFP3. */
2785 || arm_feature(env
, ARM_FEATURE_VFP3
))
2787 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2791 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2792 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2795 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2801 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2803 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2809 gen_mov_F0_vreg(0, rn
);
2810 tmp
= gen_vfp_mrs();
2813 /* Set the 4 flag bits in the CPSR. */
2817 store_reg(s
, rd
, tmp
);
2821 tmp
= load_reg(s
, rd
);
2822 if (insn
& (1 << 21)) {
2824 /* system register */
2829 /* Writes are ignored. */
2832 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2839 /* TODO: VFP subarchitecture support.
2840 * For now, keep the EN bit only */
2841 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2842 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2845 case ARM_VFP_FPINST
:
2846 case ARM_VFP_FPINST2
:
2847 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2854 gen_mov_vreg_F0(0, rn
);
2859 /* data processing */
2860 /* The opcode is in bits 23, 21, 20 and 6. */
2861 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2865 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2867 /* rn is register number */
2868 VFP_DREG_N(rn
, insn
);
2871 if (op
== 15 && (rn
== 15 || rn
> 17)) {
2872 /* Integer or single precision destination. */
2873 rd
= VFP_SREG_D(insn
);
2875 VFP_DREG_D(rd
, insn
);
2878 if (op
== 15 && (rn
== 16 || rn
== 17)) {
2879 /* Integer source. */
2880 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
2882 VFP_DREG_M(rm
, insn
);
2885 rn
= VFP_SREG_N(insn
);
2886 if (op
== 15 && rn
== 15) {
2887 /* Double precision destination. */
2888 VFP_DREG_D(rd
, insn
);
2890 rd
= VFP_SREG_D(insn
);
2892 rm
= VFP_SREG_M(insn
);
2895 veclen
= env
->vfp
.vec_len
;
2896 if (op
== 15 && rn
> 3)
2899 /* Shut up compiler warnings. */
2910 /* Figure out what type of vector operation this is. */
2911 if ((rd
& bank_mask
) == 0) {
2916 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
2918 delta_d
= env
->vfp
.vec_stride
+ 1;
2920 if ((rm
& bank_mask
) == 0) {
2921 /* mixed scalar/vector */
2930 /* Load the initial operands. */
2935 /* Integer source */
2936 gen_mov_F0_vreg(0, rm
);
2941 gen_mov_F0_vreg(dp
, rd
);
2942 gen_mov_F1_vreg(dp
, rm
);
2946 /* Compare with zero */
2947 gen_mov_F0_vreg(dp
, rd
);
2958 /* Source and destination the same. */
2959 gen_mov_F0_vreg(dp
, rd
);
2962 /* One source operand. */
2963 gen_mov_F0_vreg(dp
, rm
);
2967 /* Two source operands. */
2968 gen_mov_F0_vreg(dp
, rn
);
2969 gen_mov_F1_vreg(dp
, rm
);
2973 /* Perform the calculation. */
2975 case 0: /* mac: fd + (fn * fm) */
2977 gen_mov_F1_vreg(dp
, rd
);
2980 case 1: /* nmac: fd - (fn * fm) */
2983 gen_mov_F1_vreg(dp
, rd
);
2986 case 2: /* msc: -fd + (fn * fm) */
2988 gen_mov_F1_vreg(dp
, rd
);
2991 case 3: /* nmsc: -fd - (fn * fm) */
2994 gen_mov_F1_vreg(dp
, rd
);
2997 case 4: /* mul: fn * fm */
3000 case 5: /* nmul: -(fn * fm) */
3004 case 6: /* add: fn + fm */
3007 case 7: /* sub: fn - fm */
3010 case 8: /* div: fn / fm */
3013 case 14: /* fconst */
3014 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3017 n
= (insn
<< 12) & 0x80000000;
3018 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3025 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3032 tcg_gen_movi_i32(cpu_F0s
, n
);
3035 case 15: /* extension space */
3049 case 4: /* vcvtb.f32.f16 */
3050 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3052 tmp
= gen_vfp_mrs();
3053 tcg_gen_ext16u_i32(tmp
, tmp
);
3054 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3057 case 5: /* vcvtt.f32.f16 */
3058 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3060 tmp
= gen_vfp_mrs();
3061 tcg_gen_shri_i32(tmp
, tmp
, 16);
3062 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3065 case 6: /* vcvtb.f16.f32 */
3066 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3069 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3070 gen_mov_F0_vreg(0, rd
);
3071 tmp2
= gen_vfp_mrs();
3072 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3073 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3077 case 7: /* vcvtt.f16.f32 */
3078 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3081 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3082 tcg_gen_shli_i32(tmp
, tmp
, 16);
3083 gen_mov_F0_vreg(0, rd
);
3084 tmp2
= gen_vfp_mrs();
3085 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3086 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3099 case 11: /* cmpez */
3103 case 15: /* single<->double conversion */
3105 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3107 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3109 case 16: /* fuito */
3112 case 17: /* fsito */
3115 case 20: /* fshto */
3116 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3118 gen_vfp_shto(dp
, 16 - rm
);
3120 case 21: /* fslto */
3121 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3123 gen_vfp_slto(dp
, 32 - rm
);
3125 case 22: /* fuhto */
3126 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3128 gen_vfp_uhto(dp
, 16 - rm
);
3130 case 23: /* fulto */
3131 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3133 gen_vfp_ulto(dp
, 32 - rm
);
3135 case 24: /* ftoui */
3138 case 25: /* ftouiz */
3141 case 26: /* ftosi */
3144 case 27: /* ftosiz */
3147 case 28: /* ftosh */
3148 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3150 gen_vfp_tosh(dp
, 16 - rm
);
3152 case 29: /* ftosl */
3153 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3155 gen_vfp_tosl(dp
, 32 - rm
);
3157 case 30: /* ftouh */
3158 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3160 gen_vfp_touh(dp
, 16 - rm
);
3162 case 31: /* ftoul */
3163 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3165 gen_vfp_toul(dp
, 32 - rm
);
3167 default: /* undefined */
3168 printf ("rn:%d\n", rn
);
3172 default: /* undefined */
3173 printf ("op:%d\n", op
);
3177 /* Write back the result. */
3178 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3179 ; /* Comparison, do nothing. */
3180 else if (op
== 15 && rn
> 17)
3181 /* Integer result. */
3182 gen_mov_vreg_F0(0, rd
);
3183 else if (op
== 15 && rn
== 15)
3185 gen_mov_vreg_F0(!dp
, rd
);
3187 gen_mov_vreg_F0(dp
, rd
);
3189 /* break out of the loop if we have finished */
3193 if (op
== 15 && delta_m
== 0) {
3194 /* single source one-many */
3196 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3198 gen_mov_vreg_F0(dp
, rd
);
3202 /* Setup the next operands. */
3204 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3208 /* One source operand. */
3209 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3211 gen_mov_F0_vreg(dp
, rm
);
3213 /* Two source operands. */
3214 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3216 gen_mov_F0_vreg(dp
, rn
);
3218 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3220 gen_mov_F1_vreg(dp
, rm
);
3228 if (dp
&& (insn
& 0x03e00000) == 0x00400000) {
3229 /* two-register transfer */
3230 rn
= (insn
>> 16) & 0xf;
3231 rd
= (insn
>> 12) & 0xf;
3233 VFP_DREG_M(rm
, insn
);
3235 rm
= VFP_SREG_M(insn
);
3238 if (insn
& ARM_CP_RW_BIT
) {
3241 gen_mov_F0_vreg(0, rm
* 2);
3242 tmp
= gen_vfp_mrs();
3243 store_reg(s
, rd
, tmp
);
3244 gen_mov_F0_vreg(0, rm
* 2 + 1);
3245 tmp
= gen_vfp_mrs();
3246 store_reg(s
, rn
, tmp
);
3248 gen_mov_F0_vreg(0, rm
);
3249 tmp
= gen_vfp_mrs();
3250 store_reg(s
, rn
, tmp
);
3251 gen_mov_F0_vreg(0, rm
+ 1);
3252 tmp
= gen_vfp_mrs();
3253 store_reg(s
, rd
, tmp
);
3258 tmp
= load_reg(s
, rd
);
3260 gen_mov_vreg_F0(0, rm
* 2);
3261 tmp
= load_reg(s
, rn
);
3263 gen_mov_vreg_F0(0, rm
* 2 + 1);
3265 tmp
= load_reg(s
, rn
);
3267 gen_mov_vreg_F0(0, rm
);
3268 tmp
= load_reg(s
, rd
);
3270 gen_mov_vreg_F0(0, rm
+ 1);
3275 rn
= (insn
>> 16) & 0xf;
3277 VFP_DREG_D(rd
, insn
);
3279 rd
= VFP_SREG_D(insn
);
3280 if (s
->thumb
&& rn
== 15) {
3282 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3284 addr
= load_reg(s
, rn
);
3286 if ((insn
& 0x01200000) == 0x01000000) {
3287 /* Single load/store */
3288 offset
= (insn
& 0xff) << 2;
3289 if ((insn
& (1 << 23)) == 0)
3291 tcg_gen_addi_i32(addr
, addr
, offset
);
3292 if (insn
& (1 << 20)) {
3293 gen_vfp_ld(s
, dp
, addr
);
3294 gen_mov_vreg_F0(dp
, rd
);
3296 gen_mov_F0_vreg(dp
, rd
);
3297 gen_vfp_st(s
, dp
, addr
);
3301 /* load/store multiple */
3303 n
= (insn
>> 1) & 0x7f;
3307 if (insn
& (1 << 24)) /* pre-decrement */
3308 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3314 for (i
= 0; i
< n
; i
++) {
3315 if (insn
& ARM_CP_RW_BIT
) {
3317 gen_vfp_ld(s
, dp
, addr
);
3318 gen_mov_vreg_F0(dp
, rd
+ i
);
3321 gen_mov_F0_vreg(dp
, rd
+ i
);
3322 gen_vfp_st(s
, dp
, addr
);
3324 tcg_gen_addi_i32(addr
, addr
, offset
);
3326 if (insn
& (1 << 21)) {
3328 if (insn
& (1 << 24))
3329 offset
= -offset
* n
;
3330 else if (dp
&& (insn
& 1))
3336 tcg_gen_addi_i32(addr
, addr
, offset
);
3337 store_reg(s
, rn
, addr
);
3345 /* Should never happen. */
3351 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3353 TranslationBlock
*tb
;
3356 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3358 gen_set_pc_im(dest
);
3359 tcg_gen_exit_tb((long)tb
+ n
);
3361 gen_set_pc_im(dest
);
3366 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3368 if (unlikely(s
->singlestep_enabled
)) {
3369 /* An indirect jump so that we still trigger the debug exception. */
3374 gen_goto_tb(s
, 0, dest
);
3375 s
->is_jmp
= DISAS_TB_JUMP
;
3379 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3382 tcg_gen_sari_i32(t0
, t0
, 16);
3386 tcg_gen_sari_i32(t1
, t1
, 16);
3389 tcg_gen_mul_i32(t0
, t0
, t1
);
3392 /* Return the mask of PSR bits set by a MSR instruction. */
3393 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3397 if (flags
& (1 << 0))
3399 if (flags
& (1 << 1))
3401 if (flags
& (1 << 2))
3403 if (flags
& (1 << 3))
3406 /* Mask out undefined bits. */
3407 mask
&= ~CPSR_RESERVED
;
3408 if (!arm_feature(env
, ARM_FEATURE_V6
))
3409 mask
&= ~(CPSR_E
| CPSR_GE
);
3410 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3412 /* Mask out execution state bits. */
3415 /* Mask out privileged bits. */
3421 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3422 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3426 /* ??? This is also undefined in system mode. */
3430 tmp
= load_cpu_field(spsr
);
3431 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3432 tcg_gen_andi_i32(t0
, t0
, mask
);
3433 tcg_gen_or_i32(tmp
, tmp
, t0
);
3434 store_cpu_field(tmp
, spsr
);
3436 gen_set_cpsr(t0
, mask
);
3443 /* Returns nonzero if access to the PSR is not permitted. */
3444 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3448 tcg_gen_movi_i32(tmp
, val
);
3449 return gen_set_psr(s
, mask
, spsr
, tmp
);
3452 /* Generate an old-style exception return. Marks pc as dead. */
3453 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3456 store_reg(s
, 15, pc
);
3457 tmp
= load_cpu_field(spsr
);
3458 gen_set_cpsr(tmp
, 0xffffffff);
3460 s
->is_jmp
= DISAS_UPDATE
;
3463 /* Generate a v6 exception return. Marks both values as dead. */
3464 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3466 gen_set_cpsr(cpsr
, 0xffffffff);
3468 store_reg(s
, 15, pc
);
3469 s
->is_jmp
= DISAS_UPDATE
;
3473 gen_set_condexec (DisasContext
*s
)
3475 if (s
->condexec_mask
) {
3476 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3477 TCGv tmp
= new_tmp();
3478 tcg_gen_movi_i32(tmp
, val
);
3479 store_cpu_field(tmp
, condexec_bits
);
3483 static void gen_nop_hint(DisasContext
*s
, int val
)
3487 gen_set_pc_im(s
->pc
);
3488 s
->is_jmp
= DISAS_WFI
;
3492 /* TODO: Implement SEV and WFE. May help SMP performance. */
3498 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3500 static inline int gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3503 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3504 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3505 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3511 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3514 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3515 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3516 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3521 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3522 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3523 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3524 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3525 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3527 /* FIXME: This is wrong. They set the wrong overflow bit. */
3528 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3529 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3530 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3531 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3533 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3534 switch ((size << 1) | u) { \
3536 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3539 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3542 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3545 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3548 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3551 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3553 default: return 1; \
3556 #define GEN_NEON_INTEGER_OP(name) do { \
3557 switch ((size << 1) | u) { \
3559 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3562 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3565 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3568 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3571 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3574 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3576 default: return 1; \
3579 static TCGv
neon_load_scratch(int scratch
)
3581 TCGv tmp
= new_tmp();
3582 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3586 static void neon_store_scratch(int scratch
, TCGv var
)
3588 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3592 static inline TCGv
neon_get_scalar(int size
, int reg
)
3596 tmp
= neon_load_reg(reg
>> 1, reg
& 1);
3598 tmp
= neon_load_reg(reg
>> 2, (reg
>> 1) & 1);
3600 gen_neon_dup_low16(tmp
);
3602 gen_neon_dup_high16(tmp
);
3608 static void gen_neon_unzip_u8(TCGv t0
, TCGv t1
)
3616 tcg_gen_andi_i32(rd
, t0
, 0xff);
3617 tcg_gen_shri_i32(tmp
, t0
, 8);
3618 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3619 tcg_gen_or_i32(rd
, rd
, tmp
);
3620 tcg_gen_shli_i32(tmp
, t1
, 16);
3621 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3622 tcg_gen_or_i32(rd
, rd
, tmp
);
3623 tcg_gen_shli_i32(tmp
, t1
, 8);
3624 tcg_gen_andi_i32(tmp
, tmp
, 0xff000000);
3625 tcg_gen_or_i32(rd
, rd
, tmp
);
3627 tcg_gen_shri_i32(rm
, t0
, 8);
3628 tcg_gen_andi_i32(rm
, rm
, 0xff);
3629 tcg_gen_shri_i32(tmp
, t0
, 16);
3630 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3631 tcg_gen_or_i32(rm
, rm
, tmp
);
3632 tcg_gen_shli_i32(tmp
, t1
, 8);
3633 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3634 tcg_gen_or_i32(rm
, rm
, tmp
);
3635 tcg_gen_andi_i32(tmp
, t1
, 0xff000000);
3636 tcg_gen_or_i32(t1
, rm
, tmp
);
3637 tcg_gen_mov_i32(t0
, rd
);
3644 static void gen_neon_zip_u8(TCGv t0
, TCGv t1
)
3652 tcg_gen_andi_i32(rd
, t0
, 0xff);
3653 tcg_gen_shli_i32(tmp
, t1
, 8);
3654 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3655 tcg_gen_or_i32(rd
, rd
, tmp
);
3656 tcg_gen_shli_i32(tmp
, t0
, 16);
3657 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3658 tcg_gen_or_i32(rd
, rd
, tmp
);
3659 tcg_gen_shli_i32(tmp
, t1
, 24);
3660 tcg_gen_andi_i32(tmp
, tmp
, 0xff000000);
3661 tcg_gen_or_i32(rd
, rd
, tmp
);
3663 tcg_gen_andi_i32(rm
, t1
, 0xff000000);
3664 tcg_gen_shri_i32(tmp
, t0
, 8);
3665 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3666 tcg_gen_or_i32(rm
, rm
, tmp
);
3667 tcg_gen_shri_i32(tmp
, t1
, 8);
3668 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3669 tcg_gen_or_i32(rm
, rm
, tmp
);
3670 tcg_gen_shri_i32(tmp
, t0
, 16);
3671 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
3672 tcg_gen_or_i32(t1
, rm
, tmp
);
3673 tcg_gen_mov_i32(t0
, rd
);
3680 static void gen_neon_zip_u16(TCGv t0
, TCGv t1
)
3687 tcg_gen_andi_i32(tmp
, t0
, 0xffff);
3688 tcg_gen_shli_i32(tmp2
, t1
, 16);
3689 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3690 tcg_gen_andi_i32(t1
, t1
, 0xffff0000);
3691 tcg_gen_shri_i32(tmp2
, t0
, 16);
3692 tcg_gen_or_i32(t1
, t1
, tmp2
);
3693 tcg_gen_mov_i32(t0
, tmp
);
3699 static void gen_neon_unzip(int reg
, int q
, int tmp
, int size
)
3704 for (n
= 0; n
< q
+ 1; n
+= 2) {
3705 t0
= neon_load_reg(reg
, n
);
3706 t1
= neon_load_reg(reg
, n
+ 1);
3708 case 0: gen_neon_unzip_u8(t0
, t1
); break;
3709 case 1: gen_neon_zip_u16(t0
, t1
); break; /* zip and unzip are the same. */
3710 case 2: /* no-op */; break;
3713 neon_store_scratch(tmp
+ n
, t0
);
3714 neon_store_scratch(tmp
+ n
+ 1, t1
);
3718 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3725 tcg_gen_shli_i32(rd
, t0
, 8);
3726 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3727 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3728 tcg_gen_or_i32(rd
, rd
, tmp
);
3730 tcg_gen_shri_i32(t1
, t1
, 8);
3731 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3732 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3733 tcg_gen_or_i32(t1
, t1
, tmp
);
3734 tcg_gen_mov_i32(t0
, rd
);
3740 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3747 tcg_gen_shli_i32(rd
, t0
, 16);
3748 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3749 tcg_gen_or_i32(rd
, rd
, tmp
);
3750 tcg_gen_shri_i32(t1
, t1
, 16);
3751 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3752 tcg_gen_or_i32(t1
, t1
, tmp
);
3753 tcg_gen_mov_i32(t0
, rd
);
3764 } neon_ls_element_type
[11] = {
3778 /* Translate a NEON load/store element instruction. Return nonzero if the
3779 instruction is invalid. */
3780 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3799 if (!vfp_enabled(env
))
3801 VFP_DREG_D(rd
, insn
);
3802 rn
= (insn
>> 16) & 0xf;
3804 load
= (insn
& (1 << 21)) != 0;
3806 if ((insn
& (1 << 23)) == 0) {
3807 /* Load store all elements. */
3808 op
= (insn
>> 8) & 0xf;
3809 size
= (insn
>> 6) & 3;
3812 nregs
= neon_ls_element_type
[op
].nregs
;
3813 interleave
= neon_ls_element_type
[op
].interleave
;
3814 spacing
= neon_ls_element_type
[op
].spacing
;
3815 if (size
== 3 && (interleave
| spacing
) != 1)
3817 load_reg_var(s
, addr
, rn
);
3818 stride
= (1 << size
) * interleave
;
3819 for (reg
= 0; reg
< nregs
; reg
++) {
3820 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3821 load_reg_var(s
, addr
, rn
);
3822 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3823 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3824 load_reg_var(s
, addr
, rn
);
3825 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3829 tmp64
= gen_ld64(addr
, IS_USER(s
));
3830 neon_store_reg64(tmp64
, rd
);
3831 tcg_temp_free_i64(tmp64
);
3833 tmp64
= tcg_temp_new_i64();
3834 neon_load_reg64(tmp64
, rd
);
3835 gen_st64(tmp64
, addr
, IS_USER(s
));
3837 tcg_gen_addi_i32(addr
, addr
, stride
);
3839 for (pass
= 0; pass
< 2; pass
++) {
3842 tmp
= gen_ld32(addr
, IS_USER(s
));
3843 neon_store_reg(rd
, pass
, tmp
);
3845 tmp
= neon_load_reg(rd
, pass
);
3846 gen_st32(tmp
, addr
, IS_USER(s
));
3848 tcg_gen_addi_i32(addr
, addr
, stride
);
3849 } else if (size
== 1) {
3851 tmp
= gen_ld16u(addr
, IS_USER(s
));
3852 tcg_gen_addi_i32(addr
, addr
, stride
);
3853 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3854 tcg_gen_addi_i32(addr
, addr
, stride
);
3855 gen_bfi(tmp
, tmp
, tmp2
, 16, 0xffff);
3857 neon_store_reg(rd
, pass
, tmp
);
3859 tmp
= neon_load_reg(rd
, pass
);
3861 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3862 gen_st16(tmp
, addr
, IS_USER(s
));
3863 tcg_gen_addi_i32(addr
, addr
, stride
);
3864 gen_st16(tmp2
, addr
, IS_USER(s
));
3865 tcg_gen_addi_i32(addr
, addr
, stride
);
3867 } else /* size == 0 */ {
3870 for (n
= 0; n
< 4; n
++) {
3871 tmp
= gen_ld8u(addr
, IS_USER(s
));
3872 tcg_gen_addi_i32(addr
, addr
, stride
);
3876 gen_bfi(tmp2
, tmp2
, tmp
, n
* 8, 0xff);
3880 neon_store_reg(rd
, pass
, tmp2
);
3882 tmp2
= neon_load_reg(rd
, pass
);
3883 for (n
= 0; n
< 4; n
++) {
3886 tcg_gen_mov_i32(tmp
, tmp2
);
3888 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3890 gen_st8(tmp
, addr
, IS_USER(s
));
3891 tcg_gen_addi_i32(addr
, addr
, stride
);
3902 size
= (insn
>> 10) & 3;
3904 /* Load single element to all lanes. */
3907 size
= (insn
>> 6) & 3;
3908 nregs
= ((insn
>> 8) & 3) + 1;
3909 stride
= (insn
& (1 << 5)) ? 2 : 1;
3910 load_reg_var(s
, addr
, rn
);
3911 for (reg
= 0; reg
< nregs
; reg
++) {
3914 tmp
= gen_ld8u(addr
, IS_USER(s
));
3915 gen_neon_dup_u8(tmp
, 0);
3918 tmp
= gen_ld16u(addr
, IS_USER(s
));
3919 gen_neon_dup_low16(tmp
);
3922 tmp
= gen_ld32(addr
, IS_USER(s
));
3926 default: /* Avoid compiler warnings. */
3929 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3931 tcg_gen_mov_i32(tmp2
, tmp
);
3932 neon_store_reg(rd
, 0, tmp2
);
3933 neon_store_reg(rd
, 1, tmp
);
3936 stride
= (1 << size
) * nregs
;
3938 /* Single element. */
3939 pass
= (insn
>> 7) & 1;
3942 shift
= ((insn
>> 5) & 3) * 8;
3946 shift
= ((insn
>> 6) & 1) * 16;
3947 stride
= (insn
& (1 << 5)) ? 2 : 1;
3951 stride
= (insn
& (1 << 6)) ? 2 : 1;
3956 nregs
= ((insn
>> 8) & 3) + 1;
3957 load_reg_var(s
, addr
, rn
);
3958 for (reg
= 0; reg
< nregs
; reg
++) {
3962 tmp
= gen_ld8u(addr
, IS_USER(s
));
3965 tmp
= gen_ld16u(addr
, IS_USER(s
));
3968 tmp
= gen_ld32(addr
, IS_USER(s
));
3970 default: /* Avoid compiler warnings. */
3974 tmp2
= neon_load_reg(rd
, pass
);
3975 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
3978 neon_store_reg(rd
, pass
, tmp
);
3979 } else { /* Store */
3980 tmp
= neon_load_reg(rd
, pass
);
3982 tcg_gen_shri_i32(tmp
, tmp
, shift
);
3985 gen_st8(tmp
, addr
, IS_USER(s
));
3988 gen_st16(tmp
, addr
, IS_USER(s
));
3991 gen_st32(tmp
, addr
, IS_USER(s
));
3996 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3998 stride
= nregs
* (1 << size
);
4005 base
= load_reg(s
, rn
);
4007 tcg_gen_addi_i32(base
, base
, stride
);
4010 index
= load_reg(s
, rm
);
4011 tcg_gen_add_i32(base
, base
, index
);
4014 store_reg(s
, rn
, base
);
4019 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4020 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4022 tcg_gen_and_i32(t
, t
, c
);
4023 tcg_gen_andc_i32(f
, f
, c
);
4024 tcg_gen_or_i32(dest
, t
, f
);
4027 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4030 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4031 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4032 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4037 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4040 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4041 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4042 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4047 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4050 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4051 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4052 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4057 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4063 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4064 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4069 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4070 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4077 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4078 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4083 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4084 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4091 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4095 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4096 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4097 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4102 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4103 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4104 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4111 static inline void gen_neon_addl(int size
)
4114 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4115 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4116 case 2: tcg_gen_add_i64(CPU_V001
); break;
4121 static inline void gen_neon_subl(int size
)
4124 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4125 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4126 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4131 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4134 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4135 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4136 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4141 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4144 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4145 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4150 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4154 switch ((size
<< 1) | u
) {
4155 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4156 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4157 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4158 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4160 tmp
= gen_muls_i64_i32(a
, b
);
4161 tcg_gen_mov_i64(dest
, tmp
);
4164 tmp
= gen_mulu_i64_i32(a
, b
);
4165 tcg_gen_mov_i64(dest
, tmp
);
4171 /* Translate a NEON data processing instruction. Return nonzero if the
4172 instruction is invalid.
4173 We process data in a mixture of 32-bit and 64-bit chunks.
4174 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4176 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4189 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4192 if (!vfp_enabled(env
))
4194 q
= (insn
& (1 << 6)) != 0;
4195 u
= (insn
>> 24) & 1;
4196 VFP_DREG_D(rd
, insn
);
4197 VFP_DREG_N(rn
, insn
);
4198 VFP_DREG_M(rm
, insn
);
4199 size
= (insn
>> 20) & 3;
4200 if ((insn
& (1 << 23)) == 0) {
4201 /* Three register same length. */
4202 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4203 if (size
== 3 && (op
== 1 || op
== 5 || op
== 8 || op
== 9
4204 || op
== 10 || op
== 11 || op
== 16)) {
4205 /* 64-bit element instructions. */
4206 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4207 neon_load_reg64(cpu_V0
, rn
+ pass
);
4208 neon_load_reg64(cpu_V1
, rm
+ pass
);
4212 gen_helper_neon_add_saturate_u64(CPU_V001
);
4214 gen_helper_neon_add_saturate_s64(CPU_V001
);
4219 gen_helper_neon_sub_saturate_u64(CPU_V001
);
4221 gen_helper_neon_sub_saturate_s64(CPU_V001
);
4226 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4228 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4233 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4236 gen_helper_neon_qshl_s64(cpu_V1
, cpu_env
,
4240 case 10: /* VRSHL */
4242 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4244 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4247 case 11: /* VQRSHL */
4249 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4252 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4258 tcg_gen_sub_i64(CPU_V001
);
4260 tcg_gen_add_i64(CPU_V001
);
4266 neon_store_reg64(cpu_V0
, rd
+ pass
);
4273 case 10: /* VRSHL */
4274 case 11: /* VQRSHL */
4277 /* Shift instruction operands are reversed. */
4284 case 20: /* VPMAX */
4285 case 21: /* VPMIN */
4286 case 23: /* VPADD */
4289 case 26: /* VPADD (float) */
4290 pairwise
= (u
&& size
< 2);
4292 case 30: /* VPMIN/VPMAX (float) */
4300 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4309 tmp
= neon_load_reg(rn
, n
);
4310 tmp2
= neon_load_reg(rn
, n
+ 1);
4312 tmp
= neon_load_reg(rm
, n
);
4313 tmp2
= neon_load_reg(rm
, n
+ 1);
4317 tmp
= neon_load_reg(rn
, pass
);
4318 tmp2
= neon_load_reg(rm
, pass
);
4322 GEN_NEON_INTEGER_OP(hadd
);
4325 GEN_NEON_INTEGER_OP_ENV(qadd
);
4327 case 2: /* VRHADD */
4328 GEN_NEON_INTEGER_OP(rhadd
);
4330 case 3: /* Logic ops. */
4331 switch ((u
<< 2) | size
) {
4333 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4336 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4339 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4342 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4345 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4348 tmp3
= neon_load_reg(rd
, pass
);
4349 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4353 tmp3
= neon_load_reg(rd
, pass
);
4354 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4358 tmp3
= neon_load_reg(rd
, pass
);
4359 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4365 GEN_NEON_INTEGER_OP(hsub
);
4368 GEN_NEON_INTEGER_OP_ENV(qsub
);
4371 GEN_NEON_INTEGER_OP(cgt
);
4374 GEN_NEON_INTEGER_OP(cge
);
4377 GEN_NEON_INTEGER_OP(shl
);
4380 GEN_NEON_INTEGER_OP_ENV(qshl
);
4382 case 10: /* VRSHL */
4383 GEN_NEON_INTEGER_OP(rshl
);
4385 case 11: /* VQRSHL */
4386 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4389 GEN_NEON_INTEGER_OP(max
);
4392 GEN_NEON_INTEGER_OP(min
);
4395 GEN_NEON_INTEGER_OP(abd
);
4398 GEN_NEON_INTEGER_OP(abd
);
4400 tmp2
= neon_load_reg(rd
, pass
);
4401 gen_neon_add(size
, tmp
, tmp2
);
4404 if (!u
) { /* VADD */
4405 if (gen_neon_add(size
, tmp
, tmp2
))
4409 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4410 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4411 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4417 if (!u
) { /* VTST */
4419 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4420 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4421 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4426 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4427 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4428 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4433 case 18: /* Multiply. */
4435 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4436 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4437 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4441 tmp2
= neon_load_reg(rd
, pass
);
4443 gen_neon_rsb(size
, tmp
, tmp2
);
4445 gen_neon_add(size
, tmp
, tmp2
);
4449 if (u
) { /* polynomial */
4450 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4451 } else { /* Integer */
4453 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4454 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4455 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4460 case 20: /* VPMAX */
4461 GEN_NEON_INTEGER_OP(pmax
);
4463 case 21: /* VPMIN */
4464 GEN_NEON_INTEGER_OP(pmin
);
4466 case 22: /* Hultiply high. */
4467 if (!u
) { /* VQDMULH */
4469 case 1: gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
); break;
4470 case 2: gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
); break;
4473 } else { /* VQRDHMUL */
4475 case 1: gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
); break;
4476 case 2: gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
); break;
4481 case 23: /* VPADD */
4485 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4486 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4487 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4491 case 26: /* Floating point arithnetic. */
4492 switch ((u
<< 2) | size
) {
4494 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4497 gen_helper_neon_sub_f32(tmp
, tmp
, tmp2
);
4500 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4503 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
);
4509 case 27: /* Float multiply. */
4510 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
4513 tmp2
= neon_load_reg(rd
, pass
);
4515 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4517 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
4521 case 28: /* Float compare. */
4523 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
4526 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
4528 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
4531 case 29: /* Float compare absolute. */
4535 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
);
4537 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
);
4539 case 30: /* Float min/max. */
4541 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
);
4543 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
);
4547 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4549 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4556 /* Save the result. For elementwise operations we can put it
4557 straight into the destination register. For pairwise operations
4558 we have to be careful to avoid clobbering the source operands. */
4559 if (pairwise
&& rd
== rm
) {
4560 neon_store_scratch(pass
, tmp
);
4562 neon_store_reg(rd
, pass
, tmp
);
4566 if (pairwise
&& rd
== rm
) {
4567 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4568 tmp
= neon_load_scratch(pass
);
4569 neon_store_reg(rd
, pass
, tmp
);
4572 /* End of 3 register same size operations. */
4573 } else if (insn
& (1 << 4)) {
4574 if ((insn
& 0x00380080) != 0) {
4575 /* Two registers and shift. */
4576 op
= (insn
>> 8) & 0xf;
4577 if (insn
& (1 << 7)) {
4582 while ((insn
& (1 << (size
+ 19))) == 0)
4585 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4586 /* To avoid excessive dumplication of ops we implement shift
4587 by immediate using the variable shift operations. */
4589 /* Shift by immediate:
4590 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4591 /* Right shifts are encoded as N - shift, where N is the
4592 element size in bits. */
4594 shift
= shift
- (1 << (size
+ 3));
4602 imm
= (uint8_t) shift
;
4607 imm
= (uint16_t) shift
;
4618 for (pass
= 0; pass
< count
; pass
++) {
4620 neon_load_reg64(cpu_V0
, rm
+ pass
);
4621 tcg_gen_movi_i64(cpu_V1
, imm
);
4626 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4628 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4633 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4635 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4640 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4642 case 5: /* VSHL, VSLI */
4643 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4647 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4649 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4651 case 7: /* VQSHLU */
4652 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4655 if (op
== 1 || op
== 3) {
4657 neon_load_reg64(cpu_V0
, rd
+ pass
);
4658 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4659 } else if (op
== 4 || (op
== 5 && u
)) {
4661 cpu_abort(env
, "VS[LR]I.64 not implemented");
4663 neon_store_reg64(cpu_V0
, rd
+ pass
);
4664 } else { /* size < 3 */
4665 /* Operands in T0 and T1. */
4666 tmp
= neon_load_reg(rm
, pass
);
4668 tcg_gen_movi_i32(tmp2
, imm
);
4672 GEN_NEON_INTEGER_OP(shl
);
4676 GEN_NEON_INTEGER_OP(rshl
);
4681 GEN_NEON_INTEGER_OP(shl
);
4683 case 5: /* VSHL, VSLI */
4685 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
4686 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
4687 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
4692 GEN_NEON_INTEGER_OP_ENV(qshl
);
4694 case 7: /* VQSHLU */
4696 case 0: gen_helper_neon_qshl_u8(tmp
, cpu_env
, tmp
, tmp2
); break;
4697 case 1: gen_helper_neon_qshl_u16(tmp
, cpu_env
, tmp
, tmp2
); break;
4698 case 2: gen_helper_neon_qshl_u32(tmp
, cpu_env
, tmp
, tmp2
); break;
4705 if (op
== 1 || op
== 3) {
4707 tmp2
= neon_load_reg(rd
, pass
);
4708 gen_neon_add(size
, tmp2
, tmp
);
4710 } else if (op
== 4 || (op
== 5 && u
)) {
4715 mask
= 0xff >> -shift
;
4717 mask
= (uint8_t)(0xff << shift
);
4723 mask
= 0xffff >> -shift
;
4725 mask
= (uint16_t)(0xffff << shift
);
4729 if (shift
< -31 || shift
> 31) {
4733 mask
= 0xffffffffu
>> -shift
;
4735 mask
= 0xffffffffu
<< shift
;
4741 tmp2
= neon_load_reg(rd
, pass
);
4742 tcg_gen_andi_i32(tmp
, tmp
, mask
);
4743 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
4744 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4747 neon_store_reg(rd
, pass
, tmp
);
4750 } else if (op
< 10) {
4751 /* Shift by immediate and narrow:
4752 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4753 shift
= shift
- (1 << (size
+ 3));
4757 imm
= (uint16_t)shift
;
4759 tmp2
= tcg_const_i32(imm
);
4760 TCGV_UNUSED_I64(tmp64
);
4763 imm
= (uint32_t)shift
;
4764 tmp2
= tcg_const_i32(imm
);
4765 TCGV_UNUSED_I64(tmp64
);
4768 tmp64
= tcg_const_i64(shift
);
4775 for (pass
= 0; pass
< 2; pass
++) {
4777 neon_load_reg64(cpu_V0
, rm
+ pass
);
4780 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, tmp64
);
4782 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, tmp64
);
4785 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, tmp64
);
4787 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, tmp64
);
4790 tmp
= neon_load_reg(rm
+ pass
, 0);
4791 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
, u
);
4792 tmp3
= neon_load_reg(rm
+ pass
, 1);
4793 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
, u
);
4794 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
4799 if (op
== 8 && !u
) {
4800 gen_neon_narrow(size
- 1, tmp
, cpu_V0
);
4803 gen_neon_narrow_sats(size
- 1, tmp
, cpu_V0
);
4805 gen_neon_narrow_satu(size
- 1, tmp
, cpu_V0
);
4807 neon_store_reg(rd
, pass
, tmp
);
4810 tcg_temp_free_i64(tmp64
);
4814 } else if (op
== 10) {
4818 tmp
= neon_load_reg(rm
, 0);
4819 tmp2
= neon_load_reg(rm
, 1);
4820 for (pass
= 0; pass
< 2; pass
++) {
4824 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4827 /* The shift is less than the width of the source
4828 type, so we can just shift the whole register. */
4829 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
4830 if (size
< 2 || !u
) {
4833 imm
= (0xffu
>> (8 - shift
));
4836 imm
= 0xffff >> (16 - shift
);
4838 imm64
= imm
| (((uint64_t)imm
) << 32);
4839 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, imm64
);
4842 neon_store_reg64(cpu_V0
, rd
+ pass
);
4844 } else if (op
== 15 || op
== 16) {
4845 /* VCVT fixed-point. */
4846 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4847 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
4850 gen_vfp_ulto(0, shift
);
4852 gen_vfp_slto(0, shift
);
4855 gen_vfp_toul(0, shift
);
4857 gen_vfp_tosl(0, shift
);
4859 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
4864 } else { /* (insn & 0x00380080) == 0 */
4867 op
= (insn
>> 8) & 0xf;
4868 /* One register and immediate. */
4869 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
4870 invert
= (insn
& (1 << 5)) != 0;
4888 imm
= (imm
<< 8) | (imm
<< 24);
4891 imm
= (imm
< 8) | 0xff;
4894 imm
= (imm
<< 16) | 0xffff;
4897 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
4902 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
4903 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
4909 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4910 if (op
& 1 && op
< 12) {
4911 tmp
= neon_load_reg(rd
, pass
);
4913 /* The immediate value has already been inverted, so
4915 tcg_gen_andi_i32(tmp
, tmp
, imm
);
4917 tcg_gen_ori_i32(tmp
, tmp
, imm
);
4922 if (op
== 14 && invert
) {
4925 for (n
= 0; n
< 4; n
++) {
4926 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
4927 val
|= 0xff << (n
* 8);
4929 tcg_gen_movi_i32(tmp
, val
);
4931 tcg_gen_movi_i32(tmp
, imm
);
4934 neon_store_reg(rd
, pass
, tmp
);
4937 } else { /* (insn & 0x00800010 == 0x00800000) */
4939 op
= (insn
>> 8) & 0xf;
4940 if ((insn
& (1 << 6)) == 0) {
4941 /* Three registers of different lengths. */
4945 /* prewiden, src1_wide, src2_wide */
4946 static const int neon_3reg_wide
[16][3] = {
4947 {1, 0, 0}, /* VADDL */
4948 {1, 1, 0}, /* VADDW */
4949 {1, 0, 0}, /* VSUBL */
4950 {1, 1, 0}, /* VSUBW */
4951 {0, 1, 1}, /* VADDHN */
4952 {0, 0, 0}, /* VABAL */
4953 {0, 1, 1}, /* VSUBHN */
4954 {0, 0, 0}, /* VABDL */
4955 {0, 0, 0}, /* VMLAL */
4956 {0, 0, 0}, /* VQDMLAL */
4957 {0, 0, 0}, /* VMLSL */
4958 {0, 0, 0}, /* VQDMLSL */
4959 {0, 0, 0}, /* Integer VMULL */
4960 {0, 0, 0}, /* VQDMULL */
4961 {0, 0, 0} /* Polynomial VMULL */
4964 prewiden
= neon_3reg_wide
[op
][0];
4965 src1_wide
= neon_3reg_wide
[op
][1];
4966 src2_wide
= neon_3reg_wide
[op
][2];
4968 if (size
== 0 && (op
== 9 || op
== 11 || op
== 13))
4971 /* Avoid overlapping operands. Wide source operands are
4972 always aligned so will never overlap with wide
4973 destinations in problematic ways. */
4974 if (rd
== rm
&& !src2_wide
) {
4975 tmp
= neon_load_reg(rm
, 1);
4976 neon_store_scratch(2, tmp
);
4977 } else if (rd
== rn
&& !src1_wide
) {
4978 tmp
= neon_load_reg(rn
, 1);
4979 neon_store_scratch(2, tmp
);
4982 for (pass
= 0; pass
< 2; pass
++) {
4984 neon_load_reg64(cpu_V0
, rn
+ pass
);
4987 if (pass
== 1 && rd
== rn
) {
4988 tmp
= neon_load_scratch(2);
4990 tmp
= neon_load_reg(rn
, pass
);
4993 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4997 neon_load_reg64(cpu_V1
, rm
+ pass
);
5000 if (pass
== 1 && rd
== rm
) {
5001 tmp2
= neon_load_scratch(2);
5003 tmp2
= neon_load_reg(rm
, pass
);
5006 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5010 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5011 gen_neon_addl(size
);
5013 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5014 gen_neon_subl(size
);
5016 case 5: case 7: /* VABAL, VABDL */
5017 switch ((size
<< 1) | u
) {
5019 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5022 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5025 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5028 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5031 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5034 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5041 case 8: case 9: case 10: case 11: case 12: case 13:
5042 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5043 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5047 case 14: /* Polynomial VMULL */
5048 cpu_abort(env
, "Polynomial VMULL not implemented");
5050 default: /* 15 is RESERVED. */
5053 if (op
== 5 || op
== 13 || (op
>= 8 && op
<= 11)) {
5055 if (op
== 10 || op
== 11) {
5056 gen_neon_negl(cpu_V0
, size
);
5060 neon_load_reg64(cpu_V1
, rd
+ pass
);
5064 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
5065 gen_neon_addl(size
);
5067 case 9: case 11: /* VQDMLAL, VQDMLSL */
5068 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5069 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5072 case 13: /* VQDMULL */
5073 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5078 neon_store_reg64(cpu_V0
, rd
+ pass
);
5079 } else if (op
== 4 || op
== 6) {
5080 /* Narrowing operation. */
5085 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5088 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5091 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5092 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5099 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5102 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5105 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5106 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5107 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5115 neon_store_reg(rd
, 0, tmp3
);
5116 neon_store_reg(rd
, 1, tmp
);
5119 /* Write back the result. */
5120 neon_store_reg64(cpu_V0
, rd
+ pass
);
5124 /* Two registers and a scalar. */
5126 case 0: /* Integer VMLA scalar */
5127 case 1: /* Float VMLA scalar */
5128 case 4: /* Integer VMLS scalar */
5129 case 5: /* Floating point VMLS scalar */
5130 case 8: /* Integer VMUL scalar */
5131 case 9: /* Floating point VMUL scalar */
5132 case 12: /* VQDMULH scalar */
5133 case 13: /* VQRDMULH scalar */
5134 tmp
= neon_get_scalar(size
, rm
);
5135 neon_store_scratch(0, tmp
);
5136 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5137 tmp
= neon_load_scratch(0);
5138 tmp2
= neon_load_reg(rn
, pass
);
5141 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5143 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5145 } else if (op
== 13) {
5147 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5149 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5151 } else if (op
& 1) {
5152 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
5155 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5156 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5157 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5164 tmp2
= neon_load_reg(rd
, pass
);
5167 gen_neon_add(size
, tmp
, tmp2
);
5170 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
5173 gen_neon_rsb(size
, tmp
, tmp2
);
5176 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
5183 neon_store_reg(rd
, pass
, tmp
);
5186 case 2: /* VMLAL sclar */
5187 case 3: /* VQDMLAL scalar */
5188 case 6: /* VMLSL scalar */
5189 case 7: /* VQDMLSL scalar */
5190 case 10: /* VMULL scalar */
5191 case 11: /* VQDMULL scalar */
5192 if (size
== 0 && (op
== 3 || op
== 7 || op
== 11))
5195 tmp2
= neon_get_scalar(size
, rm
);
5196 tmp3
= neon_load_reg(rn
, 1);
5198 for (pass
= 0; pass
< 2; pass
++) {
5200 tmp
= neon_load_reg(rn
, 0);
5204 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5206 if (op
== 6 || op
== 7) {
5207 gen_neon_negl(cpu_V0
, size
);
5210 neon_load_reg64(cpu_V1
, rd
+ pass
);
5214 gen_neon_addl(size
);
5217 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5218 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5224 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5229 neon_store_reg64(cpu_V0
, rd
+ pass
);
5235 default: /* 14 and 15 are RESERVED */
5239 } else { /* size == 3 */
5242 imm
= (insn
>> 8) & 0xf;
5249 neon_load_reg64(cpu_V0
, rn
);
5251 neon_load_reg64(cpu_V1
, rn
+ 1);
5253 } else if (imm
== 8) {
5254 neon_load_reg64(cpu_V0
, rn
+ 1);
5256 neon_load_reg64(cpu_V1
, rm
);
5259 tmp64
= tcg_temp_new_i64();
5261 neon_load_reg64(cpu_V0
, rn
);
5262 neon_load_reg64(tmp64
, rn
+ 1);
5264 neon_load_reg64(cpu_V0
, rn
+ 1);
5265 neon_load_reg64(tmp64
, rm
);
5267 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5268 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5269 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5271 neon_load_reg64(cpu_V1
, rm
);
5273 neon_load_reg64(cpu_V1
, rm
+ 1);
5276 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5277 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5278 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5279 tcg_temp_free_i64(tmp64
);
5282 neon_load_reg64(cpu_V0
, rn
);
5283 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5284 neon_load_reg64(cpu_V1
, rm
);
5285 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5286 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5288 neon_store_reg64(cpu_V0
, rd
);
5290 neon_store_reg64(cpu_V1
, rd
+ 1);
5292 } else if ((insn
& (1 << 11)) == 0) {
5293 /* Two register misc. */
5294 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5295 size
= (insn
>> 18) & 3;
5297 case 0: /* VREV64 */
5300 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5301 tmp
= neon_load_reg(rm
, pass
* 2);
5302 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5304 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5305 case 1: gen_swap_half(tmp
); break;
5306 case 2: /* no-op */ break;
5309 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5311 neon_store_reg(rd
, pass
* 2, tmp2
);
5314 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5315 case 1: gen_swap_half(tmp2
); break;
5318 neon_store_reg(rd
, pass
* 2, tmp2
);
5322 case 4: case 5: /* VPADDL */
5323 case 12: case 13: /* VPADAL */
5326 for (pass
= 0; pass
< q
+ 1; pass
++) {
5327 tmp
= neon_load_reg(rm
, pass
* 2);
5328 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5329 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5330 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5332 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5333 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5334 case 2: tcg_gen_add_i64(CPU_V001
); break;
5339 neon_load_reg64(cpu_V1
, rd
+ pass
);
5340 gen_neon_addl(size
);
5342 neon_store_reg64(cpu_V0
, rd
+ pass
);
5347 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5348 tmp
= neon_load_reg(rm
, n
);
5349 tmp2
= neon_load_reg(rd
, n
+ 1);
5350 neon_store_reg(rm
, n
, tmp2
);
5351 neon_store_reg(rd
, n
+ 1, tmp
);
5359 Rd A3 A2 A1 A0 B2 B0 A2 A0
5360 Rm B3 B2 B1 B0 B3 B1 A3 A1
5364 gen_neon_unzip(rd
, q
, 0, size
);
5365 gen_neon_unzip(rm
, q
, 4, size
);
5367 static int unzip_order_q
[8] =
5368 {0, 2, 4, 6, 1, 3, 5, 7};
5369 for (n
= 0; n
< 8; n
++) {
5370 int reg
= (n
< 4) ? rd
: rm
;
5371 tmp
= neon_load_scratch(unzip_order_q
[n
]);
5372 neon_store_reg(reg
, n
% 4, tmp
);
5375 static int unzip_order
[4] =
5377 for (n
= 0; n
< 4; n
++) {
5378 int reg
= (n
< 2) ? rd
: rm
;
5379 tmp
= neon_load_scratch(unzip_order
[n
]);
5380 neon_store_reg(reg
, n
% 2, tmp
);
5386 Rd A3 A2 A1 A0 B1 A1 B0 A0
5387 Rm B3 B2 B1 B0 B3 A3 B2 A2
5391 count
= (q
? 4 : 2);
5392 for (n
= 0; n
< count
; n
++) {
5393 tmp
= neon_load_reg(rd
, n
);
5394 tmp2
= neon_load_reg(rd
, n
);
5396 case 0: gen_neon_zip_u8(tmp
, tmp2
); break;
5397 case 1: gen_neon_zip_u16(tmp
, tmp2
); break;
5398 case 2: /* no-op */; break;
5401 neon_store_scratch(n
* 2, tmp
);
5402 neon_store_scratch(n
* 2 + 1, tmp2
);
5404 for (n
= 0; n
< count
* 2; n
++) {
5405 int reg
= (n
< count
) ? rd
: rm
;
5406 tmp
= neon_load_scratch(n
);
5407 neon_store_reg(reg
, n
% count
, tmp
);
5410 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5414 for (pass
= 0; pass
< 2; pass
++) {
5415 neon_load_reg64(cpu_V0
, rm
+ pass
);
5417 if (op
== 36 && q
== 0) {
5418 gen_neon_narrow(size
, tmp
, cpu_V0
);
5420 gen_neon_narrow_satu(size
, tmp
, cpu_V0
);
5422 gen_neon_narrow_sats(size
, tmp
, cpu_V0
);
5427 neon_store_reg(rd
, 0, tmp2
);
5428 neon_store_reg(rd
, 1, tmp
);
5432 case 38: /* VSHLL */
5435 tmp
= neon_load_reg(rm
, 0);
5436 tmp2
= neon_load_reg(rm
, 1);
5437 for (pass
= 0; pass
< 2; pass
++) {
5440 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5441 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5442 neon_store_reg64(cpu_V0
, rd
+ pass
);
5445 case 44: /* VCVT.F16.F32 */
5446 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5450 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5451 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5452 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5453 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5454 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5455 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5456 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5457 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5458 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5459 neon_store_reg(rd
, 0, tmp2
);
5461 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5462 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5463 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5464 neon_store_reg(rd
, 1, tmp2
);
5467 case 46: /* VCVT.F32.F16 */
5468 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5471 tmp
= neon_load_reg(rm
, 0);
5472 tmp2
= neon_load_reg(rm
, 1);
5473 tcg_gen_ext16u_i32(tmp3
, tmp
);
5474 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5475 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5476 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5477 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5478 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5480 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5481 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5482 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5483 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5484 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5485 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5491 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5492 if (op
== 30 || op
== 31 || op
>= 58) {
5493 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5494 neon_reg_offset(rm
, pass
));
5497 tmp
= neon_load_reg(rm
, pass
);
5500 case 1: /* VREV32 */
5502 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5503 case 1: gen_swap_half(tmp
); break;
5507 case 2: /* VREV16 */
5514 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5515 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5516 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5522 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5523 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5524 case 2: gen_helper_clz(tmp
, tmp
); break;
5531 gen_helper_neon_cnt_u8(tmp
, tmp
);
5536 tcg_gen_not_i32(tmp
, tmp
);
5538 case 14: /* VQABS */
5540 case 0: gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
); break;
5541 case 1: gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
); break;
5542 case 2: gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
); break;
5546 case 15: /* VQNEG */
5548 case 0: gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
); break;
5549 case 1: gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
); break;
5550 case 2: gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
); break;
5554 case 16: case 19: /* VCGT #0, VCLE #0 */
5555 tmp2
= tcg_const_i32(0);
5557 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5558 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5559 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
5562 tcg_temp_free(tmp2
);
5564 tcg_gen_not_i32(tmp
, tmp
);
5566 case 17: case 20: /* VCGE #0, VCLT #0 */
5567 tmp2
= tcg_const_i32(0);
5569 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
5570 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
5571 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
5574 tcg_temp_free(tmp2
);
5576 tcg_gen_not_i32(tmp
, tmp
);
5578 case 18: /* VCEQ #0 */
5579 tmp2
= tcg_const_i32(0);
5581 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5582 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5583 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5586 tcg_temp_free(tmp2
);
5590 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
5591 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
5592 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
5599 tmp2
= tcg_const_i32(0);
5600 gen_neon_rsb(size
, tmp
, tmp2
);
5601 tcg_temp_free(tmp2
);
5603 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5604 tmp2
= tcg_const_i32(0);
5605 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
5606 tcg_temp_free(tmp2
);
5608 tcg_gen_not_i32(tmp
, tmp
);
5610 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5611 tmp2
= tcg_const_i32(0);
5612 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
5613 tcg_temp_free(tmp2
);
5615 tcg_gen_not_i32(tmp
, tmp
);
5617 case 26: /* Float VCEQ #0 */
5618 tmp2
= tcg_const_i32(0);
5619 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
5620 tcg_temp_free(tmp2
);
5622 case 30: /* Float VABS */
5625 case 31: /* Float VNEG */
5629 tmp2
= neon_load_reg(rd
, pass
);
5630 neon_store_reg(rm
, pass
, tmp2
);
5633 tmp2
= neon_load_reg(rd
, pass
);
5635 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
5636 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
5640 neon_store_reg(rm
, pass
, tmp2
);
5642 case 56: /* Integer VRECPE */
5643 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
5645 case 57: /* Integer VRSQRTE */
5646 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
5648 case 58: /* Float VRECPE */
5649 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5651 case 59: /* Float VRSQRTE */
5652 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5654 case 60: /* VCVT.F32.S32 */
5657 case 61: /* VCVT.F32.U32 */
5660 case 62: /* VCVT.S32.F32 */
5663 case 63: /* VCVT.U32.F32 */
5667 /* Reserved: 21, 29, 39-56 */
5670 if (op
== 30 || op
== 31 || op
>= 58) {
5671 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
5672 neon_reg_offset(rd
, pass
));
5674 neon_store_reg(rd
, pass
, tmp
);
5679 } else if ((insn
& (1 << 10)) == 0) {
5681 n
= ((insn
>> 5) & 0x18) + 8;
5682 if (insn
& (1 << 6)) {
5683 tmp
= neon_load_reg(rd
, 0);
5686 tcg_gen_movi_i32(tmp
, 0);
5688 tmp2
= neon_load_reg(rm
, 0);
5689 tmp4
= tcg_const_i32(rn
);
5690 tmp5
= tcg_const_i32(n
);
5691 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
5693 if (insn
& (1 << 6)) {
5694 tmp
= neon_load_reg(rd
, 1);
5697 tcg_gen_movi_i32(tmp
, 0);
5699 tmp3
= neon_load_reg(rm
, 1);
5700 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
5701 tcg_temp_free_i32(tmp5
);
5702 tcg_temp_free_i32(tmp4
);
5703 neon_store_reg(rd
, 0, tmp2
);
5704 neon_store_reg(rd
, 1, tmp3
);
5706 } else if ((insn
& 0x380) == 0) {
5708 if (insn
& (1 << 19)) {
5709 tmp
= neon_load_reg(rm
, 1);
5711 tmp
= neon_load_reg(rm
, 0);
5713 if (insn
& (1 << 16)) {
5714 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
5715 } else if (insn
& (1 << 17)) {
5716 if ((insn
>> 18) & 1)
5717 gen_neon_dup_high16(tmp
);
5719 gen_neon_dup_low16(tmp
);
5721 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5723 tcg_gen_mov_i32(tmp2
, tmp
);
5724 neon_store_reg(rd
, pass
, tmp2
);
5735 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5737 int crn
= (insn
>> 16) & 0xf;
5738 int crm
= insn
& 0xf;
5739 int op1
= (insn
>> 21) & 7;
5740 int op2
= (insn
>> 5) & 7;
5741 int rt
= (insn
>> 12) & 0xf;
5744 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5745 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5749 tmp
= load_cpu_field(teecr
);
5750 store_reg(s
, rt
, tmp
);
5753 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5755 if (IS_USER(s
) && (env
->teecr
& 1))
5757 tmp
= load_cpu_field(teehbr
);
5758 store_reg(s
, rt
, tmp
);
5762 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5763 op1
, crn
, crm
, op2
);
5767 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5769 int crn
= (insn
>> 16) & 0xf;
5770 int crm
= insn
& 0xf;
5771 int op1
= (insn
>> 21) & 7;
5772 int op2
= (insn
>> 5) & 7;
5773 int rt
= (insn
>> 12) & 0xf;
5776 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5777 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5781 tmp
= load_reg(s
, rt
);
5782 gen_helper_set_teecr(cpu_env
, tmp
);
5786 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5788 if (IS_USER(s
) && (env
->teecr
& 1))
5790 tmp
= load_reg(s
, rt
);
5791 store_cpu_field(tmp
, teehbr
);
5795 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5796 op1
, crn
, crm
, op2
);
5800 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5804 cpnum
= (insn
>> 8) & 0xf;
5805 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
5806 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
5812 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5813 return disas_iwmmxt_insn(env
, s
, insn
);
5814 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5815 return disas_dsp_insn(env
, s
, insn
);
5820 return disas_vfp_insn (env
, s
, insn
);
5822 /* Coprocessors 7-15 are architecturally reserved by ARM.
5823 Unfortunately Intel decided to ignore this. */
5824 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
5826 if (insn
& (1 << 20))
5827 return disas_cp14_read(env
, s
, insn
);
5829 return disas_cp14_write(env
, s
, insn
);
5831 return disas_cp15_insn (env
, s
, insn
);
5834 /* Unknown coprocessor. See if the board has hooked it. */
5835 return disas_cp_insn (env
, s
, insn
);
5840 /* Store a 64-bit value to a register pair. Clobbers val. */
5841 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
5845 tcg_gen_trunc_i64_i32(tmp
, val
);
5846 store_reg(s
, rlow
, tmp
);
5848 tcg_gen_shri_i64(val
, val
, 32);
5849 tcg_gen_trunc_i64_i32(tmp
, val
);
5850 store_reg(s
, rhigh
, tmp
);
5853 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5854 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
5859 /* Load value and extend to 64 bits. */
5860 tmp
= tcg_temp_new_i64();
5861 tmp2
= load_reg(s
, rlow
);
5862 tcg_gen_extu_i32_i64(tmp
, tmp2
);
5864 tcg_gen_add_i64(val
, val
, tmp
);
5865 tcg_temp_free_i64(tmp
);
5868 /* load and add a 64-bit value from a register pair. */
5869 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
5875 /* Load 64-bit value rd:rn. */
5876 tmpl
= load_reg(s
, rlow
);
5877 tmph
= load_reg(s
, rhigh
);
5878 tmp
= tcg_temp_new_i64();
5879 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
5882 tcg_gen_add_i64(val
, val
, tmp
);
5883 tcg_temp_free_i64(tmp
);
5886 /* Set N and Z flags from a 64-bit value. */
5887 static void gen_logicq_cc(TCGv_i64 val
)
5889 TCGv tmp
= new_tmp();
5890 gen_helper_logicq_cc(tmp
, val
);
5895 /* Load/Store exclusive instructions are implemented by remembering
5896 the value/address loaded, and seeing if these are the same
5897 when the store is performed. This should be is sufficient to implement
5898 the architecturally mandated semantics, and avoids having to monitor
5901 In system emulation mode only one CPU will be running at once, so
5902 this sequence is effectively atomic. In user emulation mode we
5903 throw an exception and handle the atomic operation elsewhere. */
5904 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
5905 TCGv addr
, int size
)
5911 tmp
= gen_ld8u(addr
, IS_USER(s
));
5914 tmp
= gen_ld16u(addr
, IS_USER(s
));
5918 tmp
= gen_ld32(addr
, IS_USER(s
));
5923 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
5924 store_reg(s
, rt
, tmp
);
5926 tcg_gen_addi_i32(addr
, addr
, 4);
5927 tmp
= gen_ld32(addr
, IS_USER(s
));
5928 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
5929 store_reg(s
, rt2
, tmp
);
5931 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
5934 static void gen_clrex(DisasContext
*s
)
5936 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
5939 #ifdef CONFIG_USER_ONLY
5940 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
5941 TCGv addr
, int size
)
5943 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
5944 tcg_gen_movi_i32(cpu_exclusive_info
,
5945 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
5946 gen_set_condexec(s
);
5947 gen_set_pc_im(s
->pc
- 4);
5948 gen_exception(EXCP_STREX
);
5949 s
->is_jmp
= DISAS_JUMP
;
5952 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
5953 TCGv addr
, int size
)
5959 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5965 fail_label
= gen_new_label();
5966 done_label
= gen_new_label();
5967 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
5970 tmp
= gen_ld8u(addr
, IS_USER(s
));
5973 tmp
= gen_ld16u(addr
, IS_USER(s
));
5977 tmp
= gen_ld32(addr
, IS_USER(s
));
5982 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
5985 TCGv tmp2
= new_tmp();
5986 tcg_gen_addi_i32(tmp2
, addr
, 4);
5987 tmp
= gen_ld32(addr
, IS_USER(s
));
5989 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
5992 tmp
= load_reg(s
, rt
);
5995 gen_st8(tmp
, addr
, IS_USER(s
));
5998 gen_st16(tmp
, addr
, IS_USER(s
));
6002 gen_st32(tmp
, addr
, IS_USER(s
));
6008 tcg_gen_addi_i32(addr
, addr
, 4);
6009 tmp
= load_reg(s
, rt2
);
6010 gen_st32(tmp
, addr
, IS_USER(s
));
6012 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6013 tcg_gen_br(done_label
);
6014 gen_set_label(fail_label
);
6015 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6016 gen_set_label(done_label
);
6017 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6021 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
6023 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6030 insn
= ldl_code(s
->pc
);
6033 /* M variants do not implement ARM mode. */
6038 /* Unconditional instructions. */
6039 if (((insn
>> 25) & 7) == 1) {
6040 /* NEON Data processing. */
6041 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6044 if (disas_neon_data_insn(env
, s
, insn
))
6048 if ((insn
& 0x0f100000) == 0x04000000) {
6049 /* NEON load/store. */
6050 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6053 if (disas_neon_ls_insn(env
, s
, insn
))
6057 if ((insn
& 0x0d70f000) == 0x0550f000)
6059 else if ((insn
& 0x0ffffdff) == 0x01010000) {
6062 if (insn
& (1 << 9)) {
6063 /* BE8 mode not implemented. */
6067 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6068 switch ((insn
>> 4) & 0xf) {
6077 /* We don't emulate caches so these are a no-op. */
6082 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6088 op1
= (insn
& 0x1f);
6089 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
6090 addr
= load_reg(s
, 13);
6093 tmp
= tcg_const_i32(op1
);
6094 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6095 tcg_temp_free_i32(tmp
);
6097 i
= (insn
>> 23) & 3;
6099 case 0: offset
= -4; break; /* DA */
6100 case 1: offset
= 0; break; /* IA */
6101 case 2: offset
= -8; break; /* DB */
6102 case 3: offset
= 4; break; /* IB */
6106 tcg_gen_addi_i32(addr
, addr
, offset
);
6107 tmp
= load_reg(s
, 14);
6108 gen_st32(tmp
, addr
, 0);
6109 tmp
= load_cpu_field(spsr
);
6110 tcg_gen_addi_i32(addr
, addr
, 4);
6111 gen_st32(tmp
, addr
, 0);
6112 if (insn
& (1 << 21)) {
6113 /* Base writeback. */
6115 case 0: offset
= -8; break;
6116 case 1: offset
= 4; break;
6117 case 2: offset
= -4; break;
6118 case 3: offset
= 0; break;
6122 tcg_gen_addi_i32(addr
, addr
, offset
);
6123 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
6124 store_reg(s
, 13, addr
);
6126 tmp
= tcg_const_i32(op1
);
6127 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6128 tcg_temp_free_i32(tmp
);
6135 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6141 rn
= (insn
>> 16) & 0xf;
6142 addr
= load_reg(s
, rn
);
6143 i
= (insn
>> 23) & 3;
6145 case 0: offset
= -4; break; /* DA */
6146 case 1: offset
= 0; break; /* IA */
6147 case 2: offset
= -8; break; /* DB */
6148 case 3: offset
= 4; break; /* IB */
6152 tcg_gen_addi_i32(addr
, addr
, offset
);
6153 /* Load PC into tmp and CPSR into tmp2. */
6154 tmp
= gen_ld32(addr
, 0);
6155 tcg_gen_addi_i32(addr
, addr
, 4);
6156 tmp2
= gen_ld32(addr
, 0);
6157 if (insn
& (1 << 21)) {
6158 /* Base writeback. */
6160 case 0: offset
= -8; break;
6161 case 1: offset
= 4; break;
6162 case 2: offset
= -4; break;
6163 case 3: offset
= 0; break;
6167 tcg_gen_addi_i32(addr
, addr
, offset
);
6168 store_reg(s
, rn
, addr
);
6172 gen_rfe(s
, tmp
, tmp2
);
6174 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6175 /* branch link and change to thumb (blx <offset>) */
6178 val
= (uint32_t)s
->pc
;
6180 tcg_gen_movi_i32(tmp
, val
);
6181 store_reg(s
, 14, tmp
);
6182 /* Sign-extend the 24-bit offset */
6183 offset
= (((int32_t)insn
) << 8) >> 8;
6184 /* offset * 4 + bit24 * 2 + (thumb bit) */
6185 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6186 /* pipeline offset */
6190 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6191 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6192 /* iWMMXt register transfer. */
6193 if (env
->cp15
.c15_cpar
& (1 << 1))
6194 if (!disas_iwmmxt_insn(env
, s
, insn
))
6197 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6198 /* Coprocessor double register transfer. */
6199 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6200 /* Additional coprocessor register transfer. */
6201 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6204 /* cps (privileged) */
6208 if (insn
& (1 << 19)) {
6209 if (insn
& (1 << 8))
6211 if (insn
& (1 << 7))
6213 if (insn
& (1 << 6))
6215 if (insn
& (1 << 18))
6218 if (insn
& (1 << 17)) {
6220 val
|= (insn
& 0x1f);
6223 gen_set_psr_im(s
, mask
, 0, val
);
6230 /* if not always execute, we generate a conditional jump to
6232 s
->condlabel
= gen_new_label();
6233 gen_test_cc(cond
^ 1, s
->condlabel
);
6236 if ((insn
& 0x0f900000) == 0x03000000) {
6237 if ((insn
& (1 << 21)) == 0) {
6239 rd
= (insn
>> 12) & 0xf;
6240 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6241 if ((insn
& (1 << 22)) == 0) {
6244 tcg_gen_movi_i32(tmp
, val
);
6247 tmp
= load_reg(s
, rd
);
6248 tcg_gen_ext16u_i32(tmp
, tmp
);
6249 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6251 store_reg(s
, rd
, tmp
);
6253 if (((insn
>> 12) & 0xf) != 0xf)
6255 if (((insn
>> 16) & 0xf) == 0) {
6256 gen_nop_hint(s
, insn
& 0xff);
6258 /* CPSR = immediate */
6260 shift
= ((insn
>> 8) & 0xf) * 2;
6262 val
= (val
>> shift
) | (val
<< (32 - shift
));
6263 i
= ((insn
& (1 << 22)) != 0);
6264 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6268 } else if ((insn
& 0x0f900000) == 0x01000000
6269 && (insn
& 0x00000090) != 0x00000090) {
6270 /* miscellaneous instructions */
6271 op1
= (insn
>> 21) & 3;
6272 sh
= (insn
>> 4) & 0xf;
6275 case 0x0: /* move program status register */
6278 tmp
= load_reg(s
, rm
);
6279 i
= ((op1
& 2) != 0);
6280 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6284 rd
= (insn
>> 12) & 0xf;
6288 tmp
= load_cpu_field(spsr
);
6291 gen_helper_cpsr_read(tmp
);
6293 store_reg(s
, rd
, tmp
);
6298 /* branch/exchange thumb (bx). */
6299 tmp
= load_reg(s
, rm
);
6301 } else if (op1
== 3) {
6303 rd
= (insn
>> 12) & 0xf;
6304 tmp
= load_reg(s
, rm
);
6305 gen_helper_clz(tmp
, tmp
);
6306 store_reg(s
, rd
, tmp
);
6314 /* Trivial implementation equivalent to bx. */
6315 tmp
= load_reg(s
, rm
);
6325 /* branch link/exchange thumb (blx) */
6326 tmp
= load_reg(s
, rm
);
6328 tcg_gen_movi_i32(tmp2
, s
->pc
);
6329 store_reg(s
, 14, tmp2
);
6332 case 0x5: /* saturating add/subtract */
6333 rd
= (insn
>> 12) & 0xf;
6334 rn
= (insn
>> 16) & 0xf;
6335 tmp
= load_reg(s
, rm
);
6336 tmp2
= load_reg(s
, rn
);
6338 gen_helper_double_saturate(tmp2
, tmp2
);
6340 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6342 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6344 store_reg(s
, rd
, tmp
);
6347 gen_set_condexec(s
);
6348 gen_set_pc_im(s
->pc
- 4);
6349 gen_exception(EXCP_BKPT
);
6350 s
->is_jmp
= DISAS_JUMP
;
6352 case 0x8: /* signed multiply */
6356 rs
= (insn
>> 8) & 0xf;
6357 rn
= (insn
>> 12) & 0xf;
6358 rd
= (insn
>> 16) & 0xf;
6360 /* (32 * 16) >> 16 */
6361 tmp
= load_reg(s
, rm
);
6362 tmp2
= load_reg(s
, rs
);
6364 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6367 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6368 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6370 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6371 tcg_temp_free_i64(tmp64
);
6372 if ((sh
& 2) == 0) {
6373 tmp2
= load_reg(s
, rn
);
6374 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6377 store_reg(s
, rd
, tmp
);
6380 tmp
= load_reg(s
, rm
);
6381 tmp2
= load_reg(s
, rs
);
6382 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6385 tmp64
= tcg_temp_new_i64();
6386 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6388 gen_addq(s
, tmp64
, rn
, rd
);
6389 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6390 tcg_temp_free_i64(tmp64
);
6393 tmp2
= load_reg(s
, rn
);
6394 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6397 store_reg(s
, rd
, tmp
);
6404 } else if (((insn
& 0x0e000000) == 0 &&
6405 (insn
& 0x00000090) != 0x90) ||
6406 ((insn
& 0x0e000000) == (1 << 25))) {
6407 int set_cc
, logic_cc
, shiftop
;
6409 op1
= (insn
>> 21) & 0xf;
6410 set_cc
= (insn
>> 20) & 1;
6411 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6413 /* data processing instruction */
6414 if (insn
& (1 << 25)) {
6415 /* immediate operand */
6417 shift
= ((insn
>> 8) & 0xf) * 2;
6419 val
= (val
>> shift
) | (val
<< (32 - shift
));
6422 tcg_gen_movi_i32(tmp2
, val
);
6423 if (logic_cc
&& shift
) {
6424 gen_set_CF_bit31(tmp2
);
6429 tmp2
= load_reg(s
, rm
);
6430 shiftop
= (insn
>> 5) & 3;
6431 if (!(insn
& (1 << 4))) {
6432 shift
= (insn
>> 7) & 0x1f;
6433 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6435 rs
= (insn
>> 8) & 0xf;
6436 tmp
= load_reg(s
, rs
);
6437 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6440 if (op1
!= 0x0f && op1
!= 0x0d) {
6441 rn
= (insn
>> 16) & 0xf;
6442 tmp
= load_reg(s
, rn
);
6446 rd
= (insn
>> 12) & 0xf;
6449 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6453 store_reg_bx(env
, s
, rd
, tmp
);
6456 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6460 store_reg_bx(env
, s
, rd
, tmp
);
6463 if (set_cc
&& rd
== 15) {
6464 /* SUBS r15, ... is used for exception return. */
6468 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6469 gen_exception_return(s
, tmp
);
6472 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6474 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6476 store_reg_bx(env
, s
, rd
, tmp
);
6481 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
6483 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6485 store_reg_bx(env
, s
, rd
, tmp
);
6489 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6491 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6493 store_reg_bx(env
, s
, rd
, tmp
);
6497 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
6499 gen_add_carry(tmp
, tmp
, tmp2
);
6501 store_reg_bx(env
, s
, rd
, tmp
);
6505 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
6507 gen_sub_carry(tmp
, tmp
, tmp2
);
6509 store_reg_bx(env
, s
, rd
, tmp
);
6513 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
6515 gen_sub_carry(tmp
, tmp2
, tmp
);
6517 store_reg_bx(env
, s
, rd
, tmp
);
6521 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6528 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6535 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6541 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6546 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6550 store_reg_bx(env
, s
, rd
, tmp
);
6553 if (logic_cc
&& rd
== 15) {
6554 /* MOVS r15, ... is used for exception return. */
6558 gen_exception_return(s
, tmp2
);
6563 store_reg_bx(env
, s
, rd
, tmp2
);
6567 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
6571 store_reg_bx(env
, s
, rd
, tmp
);
6575 tcg_gen_not_i32(tmp2
, tmp2
);
6579 store_reg_bx(env
, s
, rd
, tmp2
);
6582 if (op1
!= 0x0f && op1
!= 0x0d) {
6586 /* other instructions */
6587 op1
= (insn
>> 24) & 0xf;
6591 /* multiplies, extra load/stores */
6592 sh
= (insn
>> 5) & 3;
6595 rd
= (insn
>> 16) & 0xf;
6596 rn
= (insn
>> 12) & 0xf;
6597 rs
= (insn
>> 8) & 0xf;
6599 op1
= (insn
>> 20) & 0xf;
6601 case 0: case 1: case 2: case 3: case 6:
6603 tmp
= load_reg(s
, rs
);
6604 tmp2
= load_reg(s
, rm
);
6605 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
6607 if (insn
& (1 << 22)) {
6608 /* Subtract (mls) */
6610 tmp2
= load_reg(s
, rn
);
6611 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6613 } else if (insn
& (1 << 21)) {
6615 tmp2
= load_reg(s
, rn
);
6616 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6619 if (insn
& (1 << 20))
6621 store_reg(s
, rd
, tmp
);
6625 tmp
= load_reg(s
, rs
);
6626 tmp2
= load_reg(s
, rm
);
6627 if (insn
& (1 << 22))
6628 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6630 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6631 if (insn
& (1 << 21)) /* mult accumulate */
6632 gen_addq(s
, tmp64
, rn
, rd
);
6633 if (!(insn
& (1 << 23))) { /* double accumulate */
6635 gen_addq_lo(s
, tmp64
, rn
);
6636 gen_addq_lo(s
, tmp64
, rd
);
6638 if (insn
& (1 << 20))
6639 gen_logicq_cc(tmp64
);
6640 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6641 tcg_temp_free_i64(tmp64
);
6645 rn
= (insn
>> 16) & 0xf;
6646 rd
= (insn
>> 12) & 0xf;
6647 if (insn
& (1 << 23)) {
6648 /* load/store exclusive */
6649 op1
= (insn
>> 21) & 0x3;
6654 addr
= tcg_temp_local_new_i32();
6655 load_reg_var(s
, addr
, rn
);
6656 if (insn
& (1 << 20)) {
6659 gen_load_exclusive(s
, rd
, 15, addr
, 2);
6661 case 1: /* ldrexd */
6662 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
6664 case 2: /* ldrexb */
6665 gen_load_exclusive(s
, rd
, 15, addr
, 0);
6667 case 3: /* ldrexh */
6668 gen_load_exclusive(s
, rd
, 15, addr
, 1);
6677 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
6679 case 1: /* strexd */
6680 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
6682 case 2: /* strexb */
6683 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
6685 case 3: /* strexh */
6686 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
6692 tcg_temp_free(addr
);
6694 /* SWP instruction */
6697 /* ??? This is not really atomic. However we know
6698 we never have multiple CPUs running in parallel,
6699 so it is good enough. */
6700 addr
= load_reg(s
, rn
);
6701 tmp
= load_reg(s
, rm
);
6702 if (insn
& (1 << 22)) {
6703 tmp2
= gen_ld8u(addr
, IS_USER(s
));
6704 gen_st8(tmp
, addr
, IS_USER(s
));
6706 tmp2
= gen_ld32(addr
, IS_USER(s
));
6707 gen_st32(tmp
, addr
, IS_USER(s
));
6710 store_reg(s
, rd
, tmp2
);
6716 /* Misc load/store */
6717 rn
= (insn
>> 16) & 0xf;
6718 rd
= (insn
>> 12) & 0xf;
6719 addr
= load_reg(s
, rn
);
6720 if (insn
& (1 << 24))
6721 gen_add_datah_offset(s
, insn
, 0, addr
);
6723 if (insn
& (1 << 20)) {
6727 tmp
= gen_ld16u(addr
, IS_USER(s
));
6730 tmp
= gen_ld8s(addr
, IS_USER(s
));
6734 tmp
= gen_ld16s(addr
, IS_USER(s
));
6738 } else if (sh
& 2) {
6742 tmp
= load_reg(s
, rd
);
6743 gen_st32(tmp
, addr
, IS_USER(s
));
6744 tcg_gen_addi_i32(addr
, addr
, 4);
6745 tmp
= load_reg(s
, rd
+ 1);
6746 gen_st32(tmp
, addr
, IS_USER(s
));
6750 tmp
= gen_ld32(addr
, IS_USER(s
));
6751 store_reg(s
, rd
, tmp
);
6752 tcg_gen_addi_i32(addr
, addr
, 4);
6753 tmp
= gen_ld32(addr
, IS_USER(s
));
6757 address_offset
= -4;
6760 tmp
= load_reg(s
, rd
);
6761 gen_st16(tmp
, addr
, IS_USER(s
));
6764 /* Perform base writeback before the loaded value to
6765 ensure correct behavior with overlapping index registers.
6766 ldrd with base writeback is is undefined if the
6767 destination and index registers overlap. */
6768 if (!(insn
& (1 << 24))) {
6769 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
6770 store_reg(s
, rn
, addr
);
6771 } else if (insn
& (1 << 21)) {
6773 tcg_gen_addi_i32(addr
, addr
, address_offset
);
6774 store_reg(s
, rn
, addr
);
6779 /* Complete the load. */
6780 store_reg(s
, rd
, tmp
);
6789 if (insn
& (1 << 4)) {
6791 /* Armv6 Media instructions. */
6793 rn
= (insn
>> 16) & 0xf;
6794 rd
= (insn
>> 12) & 0xf;
6795 rs
= (insn
>> 8) & 0xf;
6796 switch ((insn
>> 23) & 3) {
6797 case 0: /* Parallel add/subtract. */
6798 op1
= (insn
>> 20) & 7;
6799 tmp
= load_reg(s
, rn
);
6800 tmp2
= load_reg(s
, rm
);
6801 sh
= (insn
>> 5) & 7;
6802 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
6804 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
6806 store_reg(s
, rd
, tmp
);
6809 if ((insn
& 0x00700020) == 0) {
6810 /* Halfword pack. */
6811 tmp
= load_reg(s
, rn
);
6812 tmp2
= load_reg(s
, rm
);
6813 shift
= (insn
>> 7) & 0x1f;
6814 if (insn
& (1 << 6)) {
6818 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
6819 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
6820 tcg_gen_ext16u_i32(tmp2
, tmp2
);
6824 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
6825 tcg_gen_ext16u_i32(tmp
, tmp
);
6826 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
6828 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6830 store_reg(s
, rd
, tmp
);
6831 } else if ((insn
& 0x00200020) == 0x00200000) {
6833 tmp
= load_reg(s
, rm
);
6834 shift
= (insn
>> 7) & 0x1f;
6835 if (insn
& (1 << 6)) {
6838 tcg_gen_sari_i32(tmp
, tmp
, shift
);
6840 tcg_gen_shli_i32(tmp
, tmp
, shift
);
6842 sh
= (insn
>> 16) & 0x1f;
6844 tmp2
= tcg_const_i32(sh
);
6845 if (insn
& (1 << 22))
6846 gen_helper_usat(tmp
, tmp
, tmp2
);
6848 gen_helper_ssat(tmp
, tmp
, tmp2
);
6849 tcg_temp_free_i32(tmp2
);
6851 store_reg(s
, rd
, tmp
);
6852 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
6854 tmp
= load_reg(s
, rm
);
6855 sh
= (insn
>> 16) & 0x1f;
6857 tmp2
= tcg_const_i32(sh
);
6858 if (insn
& (1 << 22))
6859 gen_helper_usat16(tmp
, tmp
, tmp2
);
6861 gen_helper_ssat16(tmp
, tmp
, tmp2
);
6862 tcg_temp_free_i32(tmp2
);
6864 store_reg(s
, rd
, tmp
);
6865 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
6867 tmp
= load_reg(s
, rn
);
6868 tmp2
= load_reg(s
, rm
);
6870 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
6871 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
6874 store_reg(s
, rd
, tmp
);
6875 } else if ((insn
& 0x000003e0) == 0x00000060) {
6876 tmp
= load_reg(s
, rm
);
6877 shift
= (insn
>> 10) & 3;
6878 /* ??? In many cases it's not neccessary to do a
6879 rotate, a shift is sufficient. */
6881 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
6882 op1
= (insn
>> 20) & 7;
6884 case 0: gen_sxtb16(tmp
); break;
6885 case 2: gen_sxtb(tmp
); break;
6886 case 3: gen_sxth(tmp
); break;
6887 case 4: gen_uxtb16(tmp
); break;
6888 case 6: gen_uxtb(tmp
); break;
6889 case 7: gen_uxth(tmp
); break;
6890 default: goto illegal_op
;
6893 tmp2
= load_reg(s
, rn
);
6894 if ((op1
& 3) == 0) {
6895 gen_add16(tmp
, tmp2
);
6897 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6901 store_reg(s
, rd
, tmp
);
6902 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
6904 tmp
= load_reg(s
, rm
);
6905 if (insn
& (1 << 22)) {
6906 if (insn
& (1 << 7)) {
6910 gen_helper_rbit(tmp
, tmp
);
6913 if (insn
& (1 << 7))
6916 tcg_gen_bswap32_i32(tmp
, tmp
);
6918 store_reg(s
, rd
, tmp
);
6923 case 2: /* Multiplies (Type 3). */
6924 tmp
= load_reg(s
, rm
);
6925 tmp2
= load_reg(s
, rs
);
6926 if (insn
& (1 << 20)) {
6927 /* Signed multiply most significant [accumulate]. */
6928 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6929 if (insn
& (1 << 5))
6930 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
6931 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6933 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6934 tcg_temp_free_i64(tmp64
);
6936 tmp2
= load_reg(s
, rd
);
6937 if (insn
& (1 << 6)) {
6938 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6940 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6944 store_reg(s
, rn
, tmp
);
6946 if (insn
& (1 << 5))
6947 gen_swap_half(tmp2
);
6948 gen_smul_dual(tmp
, tmp2
);
6949 /* This addition cannot overflow. */
6950 if (insn
& (1 << 6)) {
6951 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6953 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6956 if (insn
& (1 << 22)) {
6957 /* smlald, smlsld */
6958 tmp64
= tcg_temp_new_i64();
6959 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6961 gen_addq(s
, tmp64
, rd
, rn
);
6962 gen_storeq_reg(s
, rd
, rn
, tmp64
);
6963 tcg_temp_free_i64(tmp64
);
6965 /* smuad, smusd, smlad, smlsd */
6968 tmp2
= load_reg(s
, rd
);
6969 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6972 store_reg(s
, rn
, tmp
);
6977 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
6979 case 0: /* Unsigned sum of absolute differences. */
6981 tmp
= load_reg(s
, rm
);
6982 tmp2
= load_reg(s
, rs
);
6983 gen_helper_usad8(tmp
, tmp
, tmp2
);
6986 tmp2
= load_reg(s
, rd
);
6987 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6990 store_reg(s
, rn
, tmp
);
6992 case 0x20: case 0x24: case 0x28: case 0x2c:
6993 /* Bitfield insert/clear. */
6995 shift
= (insn
>> 7) & 0x1f;
6996 i
= (insn
>> 16) & 0x1f;
7000 tcg_gen_movi_i32(tmp
, 0);
7002 tmp
= load_reg(s
, rm
);
7005 tmp2
= load_reg(s
, rd
);
7006 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7009 store_reg(s
, rd
, tmp
);
7011 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7012 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7014 tmp
= load_reg(s
, rm
);
7015 shift
= (insn
>> 7) & 0x1f;
7016 i
= ((insn
>> 16) & 0x1f) + 1;
7021 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7023 gen_sbfx(tmp
, shift
, i
);
7026 store_reg(s
, rd
, tmp
);
7036 /* Check for undefined extension instructions
7037 * per the ARM Bible IE:
7038 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7040 sh
= (0xf << 20) | (0xf << 4);
7041 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7045 /* load/store byte/word */
7046 rn
= (insn
>> 16) & 0xf;
7047 rd
= (insn
>> 12) & 0xf;
7048 tmp2
= load_reg(s
, rn
);
7049 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7050 if (insn
& (1 << 24))
7051 gen_add_data_offset(s
, insn
, tmp2
);
7052 if (insn
& (1 << 20)) {
7054 if (insn
& (1 << 22)) {
7055 tmp
= gen_ld8u(tmp2
, i
);
7057 tmp
= gen_ld32(tmp2
, i
);
7061 tmp
= load_reg(s
, rd
);
7062 if (insn
& (1 << 22))
7063 gen_st8(tmp
, tmp2
, i
);
7065 gen_st32(tmp
, tmp2
, i
);
7067 if (!(insn
& (1 << 24))) {
7068 gen_add_data_offset(s
, insn
, tmp2
);
7069 store_reg(s
, rn
, tmp2
);
7070 } else if (insn
& (1 << 21)) {
7071 store_reg(s
, rn
, tmp2
);
7075 if (insn
& (1 << 20)) {
7076 /* Complete the load. */
7080 store_reg(s
, rd
, tmp
);
7086 int j
, n
, user
, loaded_base
;
7088 /* load/store multiple words */
7089 /* XXX: store correct base if write back */
7091 if (insn
& (1 << 22)) {
7093 goto illegal_op
; /* only usable in supervisor mode */
7095 if ((insn
& (1 << 15)) == 0)
7098 rn
= (insn
>> 16) & 0xf;
7099 addr
= load_reg(s
, rn
);
7101 /* compute total size */
7103 TCGV_UNUSED(loaded_var
);
7106 if (insn
& (1 << i
))
7109 /* XXX: test invalid n == 0 case ? */
7110 if (insn
& (1 << 23)) {
7111 if (insn
& (1 << 24)) {
7113 tcg_gen_addi_i32(addr
, addr
, 4);
7115 /* post increment */
7118 if (insn
& (1 << 24)) {
7120 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7122 /* post decrement */
7124 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7129 if (insn
& (1 << i
)) {
7130 if (insn
& (1 << 20)) {
7132 tmp
= gen_ld32(addr
, IS_USER(s
));
7136 tmp2
= tcg_const_i32(i
);
7137 gen_helper_set_user_reg(tmp2
, tmp
);
7138 tcg_temp_free_i32(tmp2
);
7140 } else if (i
== rn
) {
7144 store_reg(s
, i
, tmp
);
7149 /* special case: r15 = PC + 8 */
7150 val
= (long)s
->pc
+ 4;
7152 tcg_gen_movi_i32(tmp
, val
);
7155 tmp2
= tcg_const_i32(i
);
7156 gen_helper_get_user_reg(tmp
, tmp2
);
7157 tcg_temp_free_i32(tmp2
);
7159 tmp
= load_reg(s
, i
);
7161 gen_st32(tmp
, addr
, IS_USER(s
));
7164 /* no need to add after the last transfer */
7166 tcg_gen_addi_i32(addr
, addr
, 4);
7169 if (insn
& (1 << 21)) {
7171 if (insn
& (1 << 23)) {
7172 if (insn
& (1 << 24)) {
7175 /* post increment */
7176 tcg_gen_addi_i32(addr
, addr
, 4);
7179 if (insn
& (1 << 24)) {
7182 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7184 /* post decrement */
7185 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7188 store_reg(s
, rn
, addr
);
7193 store_reg(s
, rn
, loaded_var
);
7195 if ((insn
& (1 << 22)) && !user
) {
7196 /* Restore CPSR from SPSR. */
7197 tmp
= load_cpu_field(spsr
);
7198 gen_set_cpsr(tmp
, 0xffffffff);
7200 s
->is_jmp
= DISAS_UPDATE
;
7209 /* branch (and link) */
7210 val
= (int32_t)s
->pc
;
7211 if (insn
& (1 << 24)) {
7213 tcg_gen_movi_i32(tmp
, val
);
7214 store_reg(s
, 14, tmp
);
7216 offset
= (((int32_t)insn
<< 8) >> 8);
7217 val
+= (offset
<< 2) + 4;
7225 if (disas_coproc_insn(env
, s
, insn
))
7230 gen_set_pc_im(s
->pc
);
7231 s
->is_jmp
= DISAS_SWI
;
7235 gen_set_condexec(s
);
7236 gen_set_pc_im(s
->pc
- 4);
7237 gen_exception(EXCP_UDEF
);
7238 s
->is_jmp
= DISAS_JUMP
;
7244 /* Return true if this is a Thumb-2 logical op. */
7246 thumb2_logic_op(int op
)
7251 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7252 then set condition code flags based on the result of the operation.
7253 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7254 to the high bit of T1.
7255 Returns zero if the opcode is valid. */
7258 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7265 tcg_gen_and_i32(t0
, t0
, t1
);
7269 tcg_gen_andc_i32(t0
, t0
, t1
);
7273 tcg_gen_or_i32(t0
, t0
, t1
);
7277 tcg_gen_not_i32(t1
, t1
);
7278 tcg_gen_or_i32(t0
, t0
, t1
);
7282 tcg_gen_xor_i32(t0
, t0
, t1
);
7287 gen_helper_add_cc(t0
, t0
, t1
);
7289 tcg_gen_add_i32(t0
, t0
, t1
);
7293 gen_helper_adc_cc(t0
, t0
, t1
);
7299 gen_helper_sbc_cc(t0
, t0
, t1
);
7301 gen_sub_carry(t0
, t0
, t1
);
7305 gen_helper_sub_cc(t0
, t0
, t1
);
7307 tcg_gen_sub_i32(t0
, t0
, t1
);
7311 gen_helper_sub_cc(t0
, t1
, t0
);
7313 tcg_gen_sub_i32(t0
, t1
, t0
);
7315 default: /* 5, 6, 7, 9, 12, 15. */
7321 gen_set_CF_bit31(t1
);
7326 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7328 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7330 uint32_t insn
, imm
, shift
, offset
;
7331 uint32_t rd
, rn
, rm
, rs
;
7342 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7343 || arm_feature (env
, ARM_FEATURE_M
))) {
7344 /* Thumb-1 cores may need to treat bl and blx as a pair of
7345 16-bit instructions to get correct prefetch abort behavior. */
7347 if ((insn
& (1 << 12)) == 0) {
7348 /* Second half of blx. */
7349 offset
= ((insn
& 0x7ff) << 1);
7350 tmp
= load_reg(s
, 14);
7351 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7352 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7355 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7356 store_reg(s
, 14, tmp2
);
7360 if (insn
& (1 << 11)) {
7361 /* Second half of bl. */
7362 offset
= ((insn
& 0x7ff) << 1) | 1;
7363 tmp
= load_reg(s
, 14);
7364 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7367 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7368 store_reg(s
, 14, tmp2
);
7372 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7373 /* Instruction spans a page boundary. Implement it as two
7374 16-bit instructions in case the second half causes an
7376 offset
= ((int32_t)insn
<< 21) >> 9;
7377 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7380 /* Fall through to 32-bit decode. */
7383 insn
= lduw_code(s
->pc
);
7385 insn
|= (uint32_t)insn_hw1
<< 16;
7387 if ((insn
& 0xf800e800) != 0xf000e800) {
7391 rn
= (insn
>> 16) & 0xf;
7392 rs
= (insn
>> 12) & 0xf;
7393 rd
= (insn
>> 8) & 0xf;
7395 switch ((insn
>> 25) & 0xf) {
7396 case 0: case 1: case 2: case 3:
7397 /* 16-bit instructions. Should never happen. */
7400 if (insn
& (1 << 22)) {
7401 /* Other load/store, table branch. */
7402 if (insn
& 0x01200000) {
7403 /* Load/store doubleword. */
7406 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7408 addr
= load_reg(s
, rn
);
7410 offset
= (insn
& 0xff) * 4;
7411 if ((insn
& (1 << 23)) == 0)
7413 if (insn
& (1 << 24)) {
7414 tcg_gen_addi_i32(addr
, addr
, offset
);
7417 if (insn
& (1 << 20)) {
7419 tmp
= gen_ld32(addr
, IS_USER(s
));
7420 store_reg(s
, rs
, tmp
);
7421 tcg_gen_addi_i32(addr
, addr
, 4);
7422 tmp
= gen_ld32(addr
, IS_USER(s
));
7423 store_reg(s
, rd
, tmp
);
7426 tmp
= load_reg(s
, rs
);
7427 gen_st32(tmp
, addr
, IS_USER(s
));
7428 tcg_gen_addi_i32(addr
, addr
, 4);
7429 tmp
= load_reg(s
, rd
);
7430 gen_st32(tmp
, addr
, IS_USER(s
));
7432 if (insn
& (1 << 21)) {
7433 /* Base writeback. */
7436 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7437 store_reg(s
, rn
, addr
);
7441 } else if ((insn
& (1 << 23)) == 0) {
7442 /* Load/store exclusive word. */
7443 addr
= tcg_temp_local_new();
7444 load_reg_var(s
, addr
, rn
);
7445 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
7446 if (insn
& (1 << 20)) {
7447 gen_load_exclusive(s
, rs
, 15, addr
, 2);
7449 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
7451 tcg_temp_free(addr
);
7452 } else if ((insn
& (1 << 6)) == 0) {
7456 tcg_gen_movi_i32(addr
, s
->pc
);
7458 addr
= load_reg(s
, rn
);
7460 tmp
= load_reg(s
, rm
);
7461 tcg_gen_add_i32(addr
, addr
, tmp
);
7462 if (insn
& (1 << 4)) {
7464 tcg_gen_add_i32(addr
, addr
, tmp
);
7466 tmp
= gen_ld16u(addr
, IS_USER(s
));
7469 tmp
= gen_ld8u(addr
, IS_USER(s
));
7472 tcg_gen_shli_i32(tmp
, tmp
, 1);
7473 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
7474 store_reg(s
, 15, tmp
);
7476 /* Load/store exclusive byte/halfword/doubleword. */
7478 op
= (insn
>> 4) & 0x3;
7482 addr
= tcg_temp_local_new();
7483 load_reg_var(s
, addr
, rn
);
7484 if (insn
& (1 << 20)) {
7485 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
7487 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
7489 tcg_temp_free(addr
);
7492 /* Load/store multiple, RFE, SRS. */
7493 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
7494 /* Not available in user mode. */
7497 if (insn
& (1 << 20)) {
7499 addr
= load_reg(s
, rn
);
7500 if ((insn
& (1 << 24)) == 0)
7501 tcg_gen_addi_i32(addr
, addr
, -8);
7502 /* Load PC into tmp and CPSR into tmp2. */
7503 tmp
= gen_ld32(addr
, 0);
7504 tcg_gen_addi_i32(addr
, addr
, 4);
7505 tmp2
= gen_ld32(addr
, 0);
7506 if (insn
& (1 << 21)) {
7507 /* Base writeback. */
7508 if (insn
& (1 << 24)) {
7509 tcg_gen_addi_i32(addr
, addr
, 4);
7511 tcg_gen_addi_i32(addr
, addr
, -4);
7513 store_reg(s
, rn
, addr
);
7517 gen_rfe(s
, tmp
, tmp2
);
7521 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7522 addr
= load_reg(s
, 13);
7525 tmp
= tcg_const_i32(op
);
7526 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7527 tcg_temp_free_i32(tmp
);
7529 if ((insn
& (1 << 24)) == 0) {
7530 tcg_gen_addi_i32(addr
, addr
, -8);
7532 tmp
= load_reg(s
, 14);
7533 gen_st32(tmp
, addr
, 0);
7534 tcg_gen_addi_i32(addr
, addr
, 4);
7536 gen_helper_cpsr_read(tmp
);
7537 gen_st32(tmp
, addr
, 0);
7538 if (insn
& (1 << 21)) {
7539 if ((insn
& (1 << 24)) == 0) {
7540 tcg_gen_addi_i32(addr
, addr
, -4);
7542 tcg_gen_addi_i32(addr
, addr
, 4);
7544 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7545 store_reg(s
, 13, addr
);
7547 tmp
= tcg_const_i32(op
);
7548 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7549 tcg_temp_free_i32(tmp
);
7557 /* Load/store multiple. */
7558 addr
= load_reg(s
, rn
);
7560 for (i
= 0; i
< 16; i
++) {
7561 if (insn
& (1 << i
))
7564 if (insn
& (1 << 24)) {
7565 tcg_gen_addi_i32(addr
, addr
, -offset
);
7568 for (i
= 0; i
< 16; i
++) {
7569 if ((insn
& (1 << i
)) == 0)
7571 if (insn
& (1 << 20)) {
7573 tmp
= gen_ld32(addr
, IS_USER(s
));
7577 store_reg(s
, i
, tmp
);
7581 tmp
= load_reg(s
, i
);
7582 gen_st32(tmp
, addr
, IS_USER(s
));
7584 tcg_gen_addi_i32(addr
, addr
, 4);
7586 if (insn
& (1 << 21)) {
7587 /* Base register writeback. */
7588 if (insn
& (1 << 24)) {
7589 tcg_gen_addi_i32(addr
, addr
, -offset
);
7591 /* Fault if writeback register is in register list. */
7592 if (insn
& (1 << rn
))
7594 store_reg(s
, rn
, addr
);
7601 case 5: /* Data processing register constant shift. */
7604 tcg_gen_movi_i32(tmp
, 0);
7606 tmp
= load_reg(s
, rn
);
7608 tmp2
= load_reg(s
, rm
);
7609 op
= (insn
>> 21) & 0xf;
7610 shiftop
= (insn
>> 4) & 3;
7611 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7612 conds
= (insn
& (1 << 20)) != 0;
7613 logic_cc
= (conds
&& thumb2_logic_op(op
));
7614 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7615 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
7619 store_reg(s
, rd
, tmp
);
7624 case 13: /* Misc data processing. */
7625 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
7626 if (op
< 4 && (insn
& 0xf000) != 0xf000)
7629 case 0: /* Register controlled shift. */
7630 tmp
= load_reg(s
, rn
);
7631 tmp2
= load_reg(s
, rm
);
7632 if ((insn
& 0x70) != 0)
7634 op
= (insn
>> 21) & 3;
7635 logic_cc
= (insn
& (1 << 20)) != 0;
7636 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
7639 store_reg_bx(env
, s
, rd
, tmp
);
7641 case 1: /* Sign/zero extend. */
7642 tmp
= load_reg(s
, rm
);
7643 shift
= (insn
>> 4) & 3;
7644 /* ??? In many cases it's not neccessary to do a
7645 rotate, a shift is sufficient. */
7647 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7648 op
= (insn
>> 20) & 7;
7650 case 0: gen_sxth(tmp
); break;
7651 case 1: gen_uxth(tmp
); break;
7652 case 2: gen_sxtb16(tmp
); break;
7653 case 3: gen_uxtb16(tmp
); break;
7654 case 4: gen_sxtb(tmp
); break;
7655 case 5: gen_uxtb(tmp
); break;
7656 default: goto illegal_op
;
7659 tmp2
= load_reg(s
, rn
);
7660 if ((op
>> 1) == 1) {
7661 gen_add16(tmp
, tmp2
);
7663 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7667 store_reg(s
, rd
, tmp
);
7669 case 2: /* SIMD add/subtract. */
7670 op
= (insn
>> 20) & 7;
7671 shift
= (insn
>> 4) & 7;
7672 if ((op
& 3) == 3 || (shift
& 3) == 3)
7674 tmp
= load_reg(s
, rn
);
7675 tmp2
= load_reg(s
, rm
);
7676 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
7678 store_reg(s
, rd
, tmp
);
7680 case 3: /* Other data processing. */
7681 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
7683 /* Saturating add/subtract. */
7684 tmp
= load_reg(s
, rn
);
7685 tmp2
= load_reg(s
, rm
);
7687 gen_helper_double_saturate(tmp
, tmp
);
7689 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
7691 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
7694 tmp
= load_reg(s
, rn
);
7696 case 0x0a: /* rbit */
7697 gen_helper_rbit(tmp
, tmp
);
7699 case 0x08: /* rev */
7700 tcg_gen_bswap32_i32(tmp
, tmp
);
7702 case 0x09: /* rev16 */
7705 case 0x0b: /* revsh */
7708 case 0x10: /* sel */
7709 tmp2
= load_reg(s
, rm
);
7711 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7712 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7716 case 0x18: /* clz */
7717 gen_helper_clz(tmp
, tmp
);
7723 store_reg(s
, rd
, tmp
);
7725 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7726 op
= (insn
>> 4) & 0xf;
7727 tmp
= load_reg(s
, rn
);
7728 tmp2
= load_reg(s
, rm
);
7729 switch ((insn
>> 20) & 7) {
7730 case 0: /* 32 x 32 -> 32 */
7731 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7734 tmp2
= load_reg(s
, rs
);
7736 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7738 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7742 case 1: /* 16 x 16 -> 32 */
7743 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7746 tmp2
= load_reg(s
, rs
);
7747 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7751 case 2: /* Dual multiply add. */
7752 case 4: /* Dual multiply subtract. */
7754 gen_swap_half(tmp2
);
7755 gen_smul_dual(tmp
, tmp2
);
7756 /* This addition cannot overflow. */
7757 if (insn
& (1 << 22)) {
7758 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7760 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7765 tmp2
= load_reg(s
, rs
);
7766 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7770 case 3: /* 32 * 16 -> 32msb */
7772 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7775 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7776 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7778 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7779 tcg_temp_free_i64(tmp64
);
7782 tmp2
= load_reg(s
, rs
);
7783 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7787 case 5: case 6: /* 32 * 32 -> 32msb */
7788 gen_imull(tmp
, tmp2
);
7789 if (insn
& (1 << 5)) {
7790 gen_roundqd(tmp
, tmp2
);
7797 tmp2
= load_reg(s
, rs
);
7798 if (insn
& (1 << 21)) {
7799 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7801 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7806 case 7: /* Unsigned sum of absolute differences. */
7807 gen_helper_usad8(tmp
, tmp
, tmp2
);
7810 tmp2
= load_reg(s
, rs
);
7811 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7816 store_reg(s
, rd
, tmp
);
7818 case 6: case 7: /* 64-bit multiply, Divide. */
7819 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
7820 tmp
= load_reg(s
, rn
);
7821 tmp2
= load_reg(s
, rm
);
7822 if ((op
& 0x50) == 0x10) {
7824 if (!arm_feature(env
, ARM_FEATURE_DIV
))
7827 gen_helper_udiv(tmp
, tmp
, tmp2
);
7829 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7831 store_reg(s
, rd
, tmp
);
7832 } else if ((op
& 0xe) == 0xc) {
7833 /* Dual multiply accumulate long. */
7835 gen_swap_half(tmp2
);
7836 gen_smul_dual(tmp
, tmp2
);
7838 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7840 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7844 tmp64
= tcg_temp_new_i64();
7845 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7847 gen_addq(s
, tmp64
, rs
, rd
);
7848 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7849 tcg_temp_free_i64(tmp64
);
7852 /* Unsigned 64-bit multiply */
7853 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7857 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7859 tmp64
= tcg_temp_new_i64();
7860 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7863 /* Signed 64-bit multiply */
7864 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7869 gen_addq_lo(s
, tmp64
, rs
);
7870 gen_addq_lo(s
, tmp64
, rd
);
7871 } else if (op
& 0x40) {
7872 /* 64-bit accumulate. */
7873 gen_addq(s
, tmp64
, rs
, rd
);
7875 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7876 tcg_temp_free_i64(tmp64
);
7881 case 6: case 7: case 14: case 15:
7883 if (((insn
>> 24) & 3) == 3) {
7884 /* Translate into the equivalent ARM encoding. */
7885 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4);
7886 if (disas_neon_data_insn(env
, s
, insn
))
7889 if (insn
& (1 << 28))
7891 if (disas_coproc_insn (env
, s
, insn
))
7895 case 8: case 9: case 10: case 11:
7896 if (insn
& (1 << 15)) {
7897 /* Branches, misc control. */
7898 if (insn
& 0x5000) {
7899 /* Unconditional branch. */
7900 /* signextend(hw1[10:0]) -> offset[:12]. */
7901 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
7902 /* hw1[10:0] -> offset[11:1]. */
7903 offset
|= (insn
& 0x7ff) << 1;
7904 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7905 offset[24:22] already have the same value because of the
7906 sign extension above. */
7907 offset
^= ((~insn
) & (1 << 13)) << 10;
7908 offset
^= ((~insn
) & (1 << 11)) << 11;
7910 if (insn
& (1 << 14)) {
7911 /* Branch and link. */
7912 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
7916 if (insn
& (1 << 12)) {
7921 offset
&= ~(uint32_t)2;
7922 gen_bx_im(s
, offset
);
7924 } else if (((insn
>> 23) & 7) == 7) {
7926 if (insn
& (1 << 13))
7929 if (insn
& (1 << 26)) {
7930 /* Secure monitor call (v6Z) */
7931 goto illegal_op
; /* not implemented. */
7933 op
= (insn
>> 20) & 7;
7935 case 0: /* msr cpsr. */
7937 tmp
= load_reg(s
, rn
);
7938 addr
= tcg_const_i32(insn
& 0xff);
7939 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
7940 tcg_temp_free_i32(addr
);
7946 case 1: /* msr spsr. */
7949 tmp
= load_reg(s
, rn
);
7951 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
7955 case 2: /* cps, nop-hint. */
7956 if (((insn
>> 8) & 7) == 0) {
7957 gen_nop_hint(s
, insn
& 0xff);
7959 /* Implemented as NOP in user mode. */
7964 if (insn
& (1 << 10)) {
7965 if (insn
& (1 << 7))
7967 if (insn
& (1 << 6))
7969 if (insn
& (1 << 5))
7971 if (insn
& (1 << 9))
7972 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
7974 if (insn
& (1 << 8)) {
7976 imm
|= (insn
& 0x1f);
7979 gen_set_psr_im(s
, offset
, 0, imm
);
7982 case 3: /* Special control operations. */
7984 op
= (insn
>> 4) & 0xf;
7992 /* These execute as NOPs. */
7999 /* Trivial implementation equivalent to bx. */
8000 tmp
= load_reg(s
, rn
);
8003 case 5: /* Exception return. */
8007 if (rn
!= 14 || rd
!= 15) {
8010 tmp
= load_reg(s
, rn
);
8011 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8012 gen_exception_return(s
, tmp
);
8014 case 6: /* mrs cpsr. */
8017 addr
= tcg_const_i32(insn
& 0xff);
8018 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8019 tcg_temp_free_i32(addr
);
8021 gen_helper_cpsr_read(tmp
);
8023 store_reg(s
, rd
, tmp
);
8025 case 7: /* mrs spsr. */
8026 /* Not accessible in user mode. */
8027 if (IS_USER(s
) || IS_M(env
))
8029 tmp
= load_cpu_field(spsr
);
8030 store_reg(s
, rd
, tmp
);
8035 /* Conditional branch. */
8036 op
= (insn
>> 22) & 0xf;
8037 /* Generate a conditional jump to next instruction. */
8038 s
->condlabel
= gen_new_label();
8039 gen_test_cc(op
^ 1, s
->condlabel
);
8042 /* offset[11:1] = insn[10:0] */
8043 offset
= (insn
& 0x7ff) << 1;
8044 /* offset[17:12] = insn[21:16]. */
8045 offset
|= (insn
& 0x003f0000) >> 4;
8046 /* offset[31:20] = insn[26]. */
8047 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8048 /* offset[18] = insn[13]. */
8049 offset
|= (insn
& (1 << 13)) << 5;
8050 /* offset[19] = insn[11]. */
8051 offset
|= (insn
& (1 << 11)) << 8;
8053 /* jump to the offset */
8054 gen_jmp(s
, s
->pc
+ offset
);
8057 /* Data processing immediate. */
8058 if (insn
& (1 << 25)) {
8059 if (insn
& (1 << 24)) {
8060 if (insn
& (1 << 20))
8062 /* Bitfield/Saturate. */
8063 op
= (insn
>> 21) & 7;
8065 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8068 tcg_gen_movi_i32(tmp
, 0);
8070 tmp
= load_reg(s
, rn
);
8073 case 2: /* Signed bitfield extract. */
8075 if (shift
+ imm
> 32)
8078 gen_sbfx(tmp
, shift
, imm
);
8080 case 6: /* Unsigned bitfield extract. */
8082 if (shift
+ imm
> 32)
8085 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8087 case 3: /* Bitfield insert/clear. */
8090 imm
= imm
+ 1 - shift
;
8092 tmp2
= load_reg(s
, rd
);
8093 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8099 default: /* Saturate. */
8102 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8104 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8106 tmp2
= tcg_const_i32(imm
);
8109 if ((op
& 1) && shift
== 0)
8110 gen_helper_usat16(tmp
, tmp
, tmp2
);
8112 gen_helper_usat(tmp
, tmp
, tmp2
);
8115 if ((op
& 1) && shift
== 0)
8116 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8118 gen_helper_ssat(tmp
, tmp
, tmp2
);
8120 tcg_temp_free_i32(tmp2
);
8123 store_reg(s
, rd
, tmp
);
8125 imm
= ((insn
& 0x04000000) >> 15)
8126 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8127 if (insn
& (1 << 22)) {
8128 /* 16-bit immediate. */
8129 imm
|= (insn
>> 4) & 0xf000;
8130 if (insn
& (1 << 23)) {
8132 tmp
= load_reg(s
, rd
);
8133 tcg_gen_ext16u_i32(tmp
, tmp
);
8134 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8138 tcg_gen_movi_i32(tmp
, imm
);
8141 /* Add/sub 12-bit immediate. */
8143 offset
= s
->pc
& ~(uint32_t)3;
8144 if (insn
& (1 << 23))
8149 tcg_gen_movi_i32(tmp
, offset
);
8151 tmp
= load_reg(s
, rn
);
8152 if (insn
& (1 << 23))
8153 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8155 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8158 store_reg(s
, rd
, tmp
);
8161 int shifter_out
= 0;
8162 /* modified 12-bit immediate. */
8163 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8164 imm
= (insn
& 0xff);
8167 /* Nothing to do. */
8169 case 1: /* 00XY00XY */
8172 case 2: /* XY00XY00 */
8176 case 3: /* XYXYXYXY */
8180 default: /* Rotated constant. */
8181 shift
= (shift
<< 1) | (imm
>> 7);
8183 imm
= imm
<< (32 - shift
);
8188 tcg_gen_movi_i32(tmp2
, imm
);
8189 rn
= (insn
>> 16) & 0xf;
8192 tcg_gen_movi_i32(tmp
, 0);
8194 tmp
= load_reg(s
, rn
);
8196 op
= (insn
>> 21) & 0xf;
8197 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8198 shifter_out
, tmp
, tmp2
))
8201 rd
= (insn
>> 8) & 0xf;
8203 store_reg(s
, rd
, tmp
);
8210 case 12: /* Load/store single data item. */
8215 if ((insn
& 0x01100000) == 0x01000000) {
8216 if (disas_neon_ls_insn(env
, s
, insn
))
8224 /* s->pc has already been incremented by 4. */
8225 imm
= s
->pc
& 0xfffffffc;
8226 if (insn
& (1 << 23))
8227 imm
+= insn
& 0xfff;
8229 imm
-= insn
& 0xfff;
8230 tcg_gen_movi_i32(addr
, imm
);
8232 addr
= load_reg(s
, rn
);
8233 if (insn
& (1 << 23)) {
8234 /* Positive offset. */
8236 tcg_gen_addi_i32(addr
, addr
, imm
);
8238 op
= (insn
>> 8) & 7;
8241 case 0: case 8: /* Shifted Register. */
8242 shift
= (insn
>> 4) & 0xf;
8245 tmp
= load_reg(s
, rm
);
8247 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8248 tcg_gen_add_i32(addr
, addr
, tmp
);
8251 case 4: /* Negative offset. */
8252 tcg_gen_addi_i32(addr
, addr
, -imm
);
8254 case 6: /* User privilege. */
8255 tcg_gen_addi_i32(addr
, addr
, imm
);
8258 case 1: /* Post-decrement. */
8261 case 3: /* Post-increment. */
8265 case 5: /* Pre-decrement. */
8268 case 7: /* Pre-increment. */
8269 tcg_gen_addi_i32(addr
, addr
, imm
);
8277 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8278 if (insn
& (1 << 20)) {
8280 if (rs
== 15 && op
!= 2) {
8283 /* Memory hint. Implemented as NOP. */
8286 case 0: tmp
= gen_ld8u(addr
, user
); break;
8287 case 4: tmp
= gen_ld8s(addr
, user
); break;
8288 case 1: tmp
= gen_ld16u(addr
, user
); break;
8289 case 5: tmp
= gen_ld16s(addr
, user
); break;
8290 case 2: tmp
= gen_ld32(addr
, user
); break;
8291 default: goto illegal_op
;
8296 store_reg(s
, rs
, tmp
);
8303 tmp
= load_reg(s
, rs
);
8305 case 0: gen_st8(tmp
, addr
, user
); break;
8306 case 1: gen_st16(tmp
, addr
, user
); break;
8307 case 2: gen_st32(tmp
, addr
, user
); break;
8308 default: goto illegal_op
;
8312 tcg_gen_addi_i32(addr
, addr
, imm
);
8314 store_reg(s
, rn
, addr
);
8328 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
8330 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8337 if (s
->condexec_mask
) {
8338 cond
= s
->condexec_cond
;
8339 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
8340 s
->condlabel
= gen_new_label();
8341 gen_test_cc(cond
^ 1, s
->condlabel
);
8346 insn
= lduw_code(s
->pc
);
8349 switch (insn
>> 12) {
8353 op
= (insn
>> 11) & 3;
8356 rn
= (insn
>> 3) & 7;
8357 tmp
= load_reg(s
, rn
);
8358 if (insn
& (1 << 10)) {
8361 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
8364 rm
= (insn
>> 6) & 7;
8365 tmp2
= load_reg(s
, rm
);
8367 if (insn
& (1 << 9)) {
8368 if (s
->condexec_mask
)
8369 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8371 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8373 if (s
->condexec_mask
)
8374 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8376 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8379 store_reg(s
, rd
, tmp
);
8381 /* shift immediate */
8382 rm
= (insn
>> 3) & 7;
8383 shift
= (insn
>> 6) & 0x1f;
8384 tmp
= load_reg(s
, rm
);
8385 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
8386 if (!s
->condexec_mask
)
8388 store_reg(s
, rd
, tmp
);
8392 /* arithmetic large immediate */
8393 op
= (insn
>> 11) & 3;
8394 rd
= (insn
>> 8) & 0x7;
8395 if (op
== 0) { /* mov */
8397 tcg_gen_movi_i32(tmp
, insn
& 0xff);
8398 if (!s
->condexec_mask
)
8400 store_reg(s
, rd
, tmp
);
8402 tmp
= load_reg(s
, rd
);
8404 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
8407 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8412 if (s
->condexec_mask
)
8413 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8415 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8417 store_reg(s
, rd
, tmp
);
8420 if (s
->condexec_mask
)
8421 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8423 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8425 store_reg(s
, rd
, tmp
);
8431 if (insn
& (1 << 11)) {
8432 rd
= (insn
>> 8) & 7;
8433 /* load pc-relative. Bit 1 of PC is ignored. */
8434 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
8435 val
&= ~(uint32_t)2;
8437 tcg_gen_movi_i32(addr
, val
);
8438 tmp
= gen_ld32(addr
, IS_USER(s
));
8440 store_reg(s
, rd
, tmp
);
8443 if (insn
& (1 << 10)) {
8444 /* data processing extended or blx */
8445 rd
= (insn
& 7) | ((insn
>> 4) & 8);
8446 rm
= (insn
>> 3) & 0xf;
8447 op
= (insn
>> 8) & 3;
8450 tmp
= load_reg(s
, rd
);
8451 tmp2
= load_reg(s
, rm
);
8452 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8454 store_reg(s
, rd
, tmp
);
8457 tmp
= load_reg(s
, rd
);
8458 tmp2
= load_reg(s
, rm
);
8459 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8463 case 2: /* mov/cpy */
8464 tmp
= load_reg(s
, rm
);
8465 store_reg(s
, rd
, tmp
);
8467 case 3:/* branch [and link] exchange thumb register */
8468 tmp
= load_reg(s
, rm
);
8469 if (insn
& (1 << 7)) {
8470 val
= (uint32_t)s
->pc
| 1;
8472 tcg_gen_movi_i32(tmp2
, val
);
8473 store_reg(s
, 14, tmp2
);
8481 /* data processing register */
8483 rm
= (insn
>> 3) & 7;
8484 op
= (insn
>> 6) & 0xf;
8485 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
8486 /* the shift/rotate ops want the operands backwards */
8495 if (op
== 9) { /* neg */
8497 tcg_gen_movi_i32(tmp
, 0);
8498 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
8499 tmp
= load_reg(s
, rd
);
8504 tmp2
= load_reg(s
, rm
);
8507 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8508 if (!s
->condexec_mask
)
8512 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8513 if (!s
->condexec_mask
)
8517 if (s
->condexec_mask
) {
8518 gen_helper_shl(tmp2
, tmp2
, tmp
);
8520 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
8525 if (s
->condexec_mask
) {
8526 gen_helper_shr(tmp2
, tmp2
, tmp
);
8528 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
8533 if (s
->condexec_mask
) {
8534 gen_helper_sar(tmp2
, tmp2
, tmp
);
8536 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
8541 if (s
->condexec_mask
)
8544 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
8547 if (s
->condexec_mask
)
8548 gen_sub_carry(tmp
, tmp
, tmp2
);
8550 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
8553 if (s
->condexec_mask
) {
8554 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
8555 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
8557 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
8562 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8567 if (s
->condexec_mask
)
8568 tcg_gen_neg_i32(tmp
, tmp2
);
8570 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8573 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8577 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8581 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8582 if (!s
->condexec_mask
)
8586 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8587 if (!s
->condexec_mask
)
8591 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8592 if (!s
->condexec_mask
)
8596 tcg_gen_not_i32(tmp2
, tmp2
);
8597 if (!s
->condexec_mask
)
8605 store_reg(s
, rm
, tmp2
);
8609 store_reg(s
, rd
, tmp
);
8619 /* load/store register offset. */
8621 rn
= (insn
>> 3) & 7;
8622 rm
= (insn
>> 6) & 7;
8623 op
= (insn
>> 9) & 7;
8624 addr
= load_reg(s
, rn
);
8625 tmp
= load_reg(s
, rm
);
8626 tcg_gen_add_i32(addr
, addr
, tmp
);
8629 if (op
< 3) /* store */
8630 tmp
= load_reg(s
, rd
);
8634 gen_st32(tmp
, addr
, IS_USER(s
));
8637 gen_st16(tmp
, addr
, IS_USER(s
));
8640 gen_st8(tmp
, addr
, IS_USER(s
));
8643 tmp
= gen_ld8s(addr
, IS_USER(s
));
8646 tmp
= gen_ld32(addr
, IS_USER(s
));
8649 tmp
= gen_ld16u(addr
, IS_USER(s
));
8652 tmp
= gen_ld8u(addr
, IS_USER(s
));
8655 tmp
= gen_ld16s(addr
, IS_USER(s
));
8658 if (op
>= 3) /* load */
8659 store_reg(s
, rd
, tmp
);
8664 /* load/store word immediate offset */
8666 rn
= (insn
>> 3) & 7;
8667 addr
= load_reg(s
, rn
);
8668 val
= (insn
>> 4) & 0x7c;
8669 tcg_gen_addi_i32(addr
, addr
, val
);
8671 if (insn
& (1 << 11)) {
8673 tmp
= gen_ld32(addr
, IS_USER(s
));
8674 store_reg(s
, rd
, tmp
);
8677 tmp
= load_reg(s
, rd
);
8678 gen_st32(tmp
, addr
, IS_USER(s
));
8684 /* load/store byte immediate offset */
8686 rn
= (insn
>> 3) & 7;
8687 addr
= load_reg(s
, rn
);
8688 val
= (insn
>> 6) & 0x1f;
8689 tcg_gen_addi_i32(addr
, addr
, val
);
8691 if (insn
& (1 << 11)) {
8693 tmp
= gen_ld8u(addr
, IS_USER(s
));
8694 store_reg(s
, rd
, tmp
);
8697 tmp
= load_reg(s
, rd
);
8698 gen_st8(tmp
, addr
, IS_USER(s
));
8704 /* load/store halfword immediate offset */
8706 rn
= (insn
>> 3) & 7;
8707 addr
= load_reg(s
, rn
);
8708 val
= (insn
>> 5) & 0x3e;
8709 tcg_gen_addi_i32(addr
, addr
, val
);
8711 if (insn
& (1 << 11)) {
8713 tmp
= gen_ld16u(addr
, IS_USER(s
));
8714 store_reg(s
, rd
, tmp
);
8717 tmp
= load_reg(s
, rd
);
8718 gen_st16(tmp
, addr
, IS_USER(s
));
8724 /* load/store from stack */
8725 rd
= (insn
>> 8) & 7;
8726 addr
= load_reg(s
, 13);
8727 val
= (insn
& 0xff) * 4;
8728 tcg_gen_addi_i32(addr
, addr
, val
);
8730 if (insn
& (1 << 11)) {
8732 tmp
= gen_ld32(addr
, IS_USER(s
));
8733 store_reg(s
, rd
, tmp
);
8736 tmp
= load_reg(s
, rd
);
8737 gen_st32(tmp
, addr
, IS_USER(s
));
8743 /* add to high reg */
8744 rd
= (insn
>> 8) & 7;
8745 if (insn
& (1 << 11)) {
8747 tmp
= load_reg(s
, 13);
8749 /* PC. bit 1 is ignored. */
8751 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
8753 val
= (insn
& 0xff) * 4;
8754 tcg_gen_addi_i32(tmp
, tmp
, val
);
8755 store_reg(s
, rd
, tmp
);
8760 op
= (insn
>> 8) & 0xf;
8763 /* adjust stack pointer */
8764 tmp
= load_reg(s
, 13);
8765 val
= (insn
& 0x7f) * 4;
8766 if (insn
& (1 << 7))
8767 val
= -(int32_t)val
;
8768 tcg_gen_addi_i32(tmp
, tmp
, val
);
8769 store_reg(s
, 13, tmp
);
8772 case 2: /* sign/zero extend. */
8775 rm
= (insn
>> 3) & 7;
8776 tmp
= load_reg(s
, rm
);
8777 switch ((insn
>> 6) & 3) {
8778 case 0: gen_sxth(tmp
); break;
8779 case 1: gen_sxtb(tmp
); break;
8780 case 2: gen_uxth(tmp
); break;
8781 case 3: gen_uxtb(tmp
); break;
8783 store_reg(s
, rd
, tmp
);
8785 case 4: case 5: case 0xc: case 0xd:
8787 addr
= load_reg(s
, 13);
8788 if (insn
& (1 << 8))
8792 for (i
= 0; i
< 8; i
++) {
8793 if (insn
& (1 << i
))
8796 if ((insn
& (1 << 11)) == 0) {
8797 tcg_gen_addi_i32(addr
, addr
, -offset
);
8799 for (i
= 0; i
< 8; i
++) {
8800 if (insn
& (1 << i
)) {
8801 if (insn
& (1 << 11)) {
8803 tmp
= gen_ld32(addr
, IS_USER(s
));
8804 store_reg(s
, i
, tmp
);
8807 tmp
= load_reg(s
, i
);
8808 gen_st32(tmp
, addr
, IS_USER(s
));
8810 /* advance to the next address. */
8811 tcg_gen_addi_i32(addr
, addr
, 4);
8815 if (insn
& (1 << 8)) {
8816 if (insn
& (1 << 11)) {
8818 tmp
= gen_ld32(addr
, IS_USER(s
));
8819 /* don't set the pc until the rest of the instruction
8823 tmp
= load_reg(s
, 14);
8824 gen_st32(tmp
, addr
, IS_USER(s
));
8826 tcg_gen_addi_i32(addr
, addr
, 4);
8828 if ((insn
& (1 << 11)) == 0) {
8829 tcg_gen_addi_i32(addr
, addr
, -offset
);
8831 /* write back the new stack pointer */
8832 store_reg(s
, 13, addr
);
8833 /* set the new PC value */
8834 if ((insn
& 0x0900) == 0x0900)
8838 case 1: case 3: case 9: case 11: /* czb */
8840 tmp
= load_reg(s
, rm
);
8841 s
->condlabel
= gen_new_label();
8843 if (insn
& (1 << 11))
8844 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
8846 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
8848 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
8849 val
= (uint32_t)s
->pc
+ 2;
8854 case 15: /* IT, nop-hint. */
8855 if ((insn
& 0xf) == 0) {
8856 gen_nop_hint(s
, (insn
>> 4) & 0xf);
8860 s
->condexec_cond
= (insn
>> 4) & 0xe;
8861 s
->condexec_mask
= insn
& 0x1f;
8862 /* No actual code generated for this insn, just setup state. */
8865 case 0xe: /* bkpt */
8866 gen_set_condexec(s
);
8867 gen_set_pc_im(s
->pc
- 2);
8868 gen_exception(EXCP_BKPT
);
8869 s
->is_jmp
= DISAS_JUMP
;
8874 rn
= (insn
>> 3) & 0x7;
8876 tmp
= load_reg(s
, rn
);
8877 switch ((insn
>> 6) & 3) {
8878 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
8879 case 1: gen_rev16(tmp
); break;
8880 case 3: gen_revsh(tmp
); break;
8881 default: goto illegal_op
;
8883 store_reg(s
, rd
, tmp
);
8891 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
8894 addr
= tcg_const_i32(16);
8895 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8896 tcg_temp_free_i32(addr
);
8900 addr
= tcg_const_i32(17);
8901 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8902 tcg_temp_free_i32(addr
);
8904 tcg_temp_free_i32(tmp
);
8907 if (insn
& (1 << 4))
8908 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
8911 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
8921 /* load/store multiple */
8922 rn
= (insn
>> 8) & 0x7;
8923 addr
= load_reg(s
, rn
);
8924 for (i
= 0; i
< 8; i
++) {
8925 if (insn
& (1 << i
)) {
8926 if (insn
& (1 << 11)) {
8928 tmp
= gen_ld32(addr
, IS_USER(s
));
8929 store_reg(s
, i
, tmp
);
8932 tmp
= load_reg(s
, i
);
8933 gen_st32(tmp
, addr
, IS_USER(s
));
8935 /* advance to the next address */
8936 tcg_gen_addi_i32(addr
, addr
, 4);
8939 /* Base register writeback. */
8940 if ((insn
& (1 << rn
)) == 0) {
8941 store_reg(s
, rn
, addr
);
8948 /* conditional branch or swi */
8949 cond
= (insn
>> 8) & 0xf;
8955 gen_set_condexec(s
);
8956 gen_set_pc_im(s
->pc
);
8957 s
->is_jmp
= DISAS_SWI
;
8960 /* generate a conditional jump to next instruction */
8961 s
->condlabel
= gen_new_label();
8962 gen_test_cc(cond
^ 1, s
->condlabel
);
8965 /* jump to the offset */
8966 val
= (uint32_t)s
->pc
+ 2;
8967 offset
= ((int32_t)insn
<< 24) >> 24;
8973 if (insn
& (1 << 11)) {
8974 if (disas_thumb2_insn(env
, s
, insn
))
8978 /* unconditional branch */
8979 val
= (uint32_t)s
->pc
;
8980 offset
= ((int32_t)insn
<< 21) >> 21;
8981 val
+= (offset
<< 1) + 2;
8986 if (disas_thumb2_insn(env
, s
, insn
))
8992 gen_set_condexec(s
);
8993 gen_set_pc_im(s
->pc
- 4);
8994 gen_exception(EXCP_UDEF
);
8995 s
->is_jmp
= DISAS_JUMP
;
8999 gen_set_condexec(s
);
9000 gen_set_pc_im(s
->pc
- 2);
9001 gen_exception(EXCP_UDEF
);
9002 s
->is_jmp
= DISAS_JUMP
;
9005 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9006 basic block 'tb'. If search_pc is TRUE, also generate PC
9007 information for each intermediate instruction. */
9008 static inline void gen_intermediate_code_internal(CPUState
*env
,
9009 TranslationBlock
*tb
,
9012 DisasContext dc1
, *dc
= &dc1
;
9014 uint16_t *gen_opc_end
;
9016 target_ulong pc_start
;
9017 uint32_t next_page_start
;
9021 /* generate intermediate code */
9028 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9030 dc
->is_jmp
= DISAS_NEXT
;
9032 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9034 dc
->thumb
= env
->thumb
;
9035 dc
->condexec_mask
= (env
->condexec_bits
& 0xf) << 1;
9036 dc
->condexec_cond
= env
->condexec_bits
>> 4;
9037 #if !defined(CONFIG_USER_ONLY)
9039 dc
->user
= ((env
->v7m
.exception
== 0) && (env
->v7m
.control
& 1));
9041 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
9044 cpu_F0s
= tcg_temp_new_i32();
9045 cpu_F1s
= tcg_temp_new_i32();
9046 cpu_F0d
= tcg_temp_new_i64();
9047 cpu_F1d
= tcg_temp_new_i64();
9050 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9051 cpu_M0
= tcg_temp_new_i64();
9052 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9055 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9057 max_insns
= CF_COUNT_MASK
;
9060 /* Reset the conditional execution bits immediately. This avoids
9061 complications trying to do it at the end of the block. */
9062 if (env
->condexec_bits
)
9064 TCGv tmp
= new_tmp();
9065 tcg_gen_movi_i32(tmp
, 0);
9066 store_cpu_field(tmp
, condexec_bits
);
9069 #ifdef CONFIG_USER_ONLY
9070 /* Intercept jump to the magic kernel page. */
9071 if (dc
->pc
>= 0xffff0000) {
9072 /* We always get here via a jump, so know we are not in a
9073 conditional execution block. */
9074 gen_exception(EXCP_KERNEL_TRAP
);
9075 dc
->is_jmp
= DISAS_UPDATE
;
9079 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9080 /* We always get here via a jump, so know we are not in a
9081 conditional execution block. */
9082 gen_exception(EXCP_EXCEPTION_EXIT
);
9083 dc
->is_jmp
= DISAS_UPDATE
;
9088 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9089 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9090 if (bp
->pc
== dc
->pc
) {
9091 gen_set_condexec(dc
);
9092 gen_set_pc_im(dc
->pc
);
9093 gen_exception(EXCP_DEBUG
);
9094 dc
->is_jmp
= DISAS_JUMP
;
9095 /* Advance PC so that clearing the breakpoint will
9096 invalidate this TB. */
9098 goto done_generating
;
9104 j
= gen_opc_ptr
- gen_opc_buf
;
9108 gen_opc_instr_start
[lj
++] = 0;
9110 gen_opc_pc
[lj
] = dc
->pc
;
9111 gen_opc_instr_start
[lj
] = 1;
9112 gen_opc_icount
[lj
] = num_insns
;
9115 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9119 disas_thumb_insn(env
, dc
);
9120 if (dc
->condexec_mask
) {
9121 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9122 | ((dc
->condexec_mask
>> 4) & 1);
9123 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9124 if (dc
->condexec_mask
== 0) {
9125 dc
->condexec_cond
= 0;
9129 disas_arm_insn(env
, dc
);
9132 fprintf(stderr
, "Internal resource leak before %08x\n", dc
->pc
);
9136 if (dc
->condjmp
&& !dc
->is_jmp
) {
9137 gen_set_label(dc
->condlabel
);
9140 /* Translation stops when a conditional branch is encountered.
9141 * Otherwise the subsequent code could get translated several times.
9142 * Also stop translation when a page boundary is reached. This
9143 * ensures prefetch aborts occur at the right place. */
9145 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9146 !env
->singlestep_enabled
&&
9148 dc
->pc
< next_page_start
&&
9149 num_insns
< max_insns
);
9151 if (tb
->cflags
& CF_LAST_IO
) {
9153 /* FIXME: This can theoretically happen with self-modifying
9155 cpu_abort(env
, "IO on conditional branch instruction");
9160 /* At this stage dc->condjmp will only be set when the skipped
9161 instruction was a conditional branch or trap, and the PC has
9162 already been written. */
9163 if (unlikely(env
->singlestep_enabled
)) {
9164 /* Make sure the pc is updated, and raise a debug exception. */
9166 gen_set_condexec(dc
);
9167 if (dc
->is_jmp
== DISAS_SWI
) {
9168 gen_exception(EXCP_SWI
);
9170 gen_exception(EXCP_DEBUG
);
9172 gen_set_label(dc
->condlabel
);
9174 if (dc
->condjmp
|| !dc
->is_jmp
) {
9175 gen_set_pc_im(dc
->pc
);
9178 gen_set_condexec(dc
);
9179 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9180 gen_exception(EXCP_SWI
);
9182 /* FIXME: Single stepping a WFI insn will not halt
9184 gen_exception(EXCP_DEBUG
);
9187 /* While branches must always occur at the end of an IT block,
9188 there are a few other things that can cause us to terminate
9189 the TB in the middel of an IT block:
9190 - Exception generating instructions (bkpt, swi, undefined).
9192 - Hardware watchpoints.
9193 Hardware breakpoints have already been handled and skip this code.
9195 gen_set_condexec(dc
);
9196 switch(dc
->is_jmp
) {
9198 gen_goto_tb(dc
, 1, dc
->pc
);
9203 /* indicate that the hash table must be used to find the next TB */
9207 /* nothing more to generate */
9213 gen_exception(EXCP_SWI
);
9217 gen_set_label(dc
->condlabel
);
9218 gen_set_condexec(dc
);
9219 gen_goto_tb(dc
, 1, dc
->pc
);
9225 gen_icount_end(tb
, num_insns
);
9226 *gen_opc_ptr
= INDEX_op_end
;
9229 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9230 qemu_log("----------------\n");
9231 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9232 log_target_disas(pc_start
, dc
->pc
- pc_start
, env
->thumb
);
9237 j
= gen_opc_ptr
- gen_opc_buf
;
9240 gen_opc_instr_start
[lj
++] = 0;
9242 tb
->size
= dc
->pc
- pc_start
;
9243 tb
->icount
= num_insns
;
9247 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
9249 gen_intermediate_code_internal(env
, tb
, 0);
9252 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
9254 gen_intermediate_code_internal(env
, tb
, 1);
9257 static const char *cpu_mode_names
[16] = {
9258 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9259 "???", "???", "???", "und", "???", "???", "???", "sys"
9262 void cpu_dump_state(CPUState
*env
, FILE *f
,
9263 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
9273 /* ??? This assumes float64 and double have the same layout.
9274 Oh well, it's only debug dumps. */
9283 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
9285 cpu_fprintf(f
, "\n");
9287 cpu_fprintf(f
, " ");
9289 psr
= cpsr_read(env
);
9290 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
9292 psr
& (1 << 31) ? 'N' : '-',
9293 psr
& (1 << 30) ? 'Z' : '-',
9294 psr
& (1 << 29) ? 'C' : '-',
9295 psr
& (1 << 28) ? 'V' : '-',
9296 psr
& CPSR_T
? 'T' : 'A',
9297 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
9300 for (i
= 0; i
< 16; i
++) {
9301 d
.d
= env
->vfp
.regs
[i
];
9305 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9306 i
* 2, (int)s0
.i
, s0
.s
,
9307 i
* 2 + 1, (int)s1
.i
, s1
.s
,
9308 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
9311 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
9315 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
9316 unsigned long searched_pc
, int pc_pos
, void *puc
)
9318 env
->regs
[15] = gen_opc_pc
[pc_pos
];