4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 #define ENABLE_ARCH_5J 0
39 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
40 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
41 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
42 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
44 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
46 /* internal defines */
47 typedef struct DisasContext
{
50 /* Nonzero if this instruction has been conditionally skipped. */
52 /* The label that will be jumped to when the instruction is skipped. */
54 /* Thumb-2 condtional execution bits. */
57 struct TranslationBlock
*tb
;
58 int singlestep_enabled
;
60 #if !defined(CONFIG_USER_ONLY)
65 #if defined(CONFIG_USER_ONLY)
68 #define IS_USER(s) (s->user)
71 /* These instructions trap after executing, so defer them until after the
72 conditional executions state has been updated. */
76 static TCGv_ptr cpu_env
;
77 /* We reuse the same 64-bit temporaries for efficiency. */
78 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
80 /* FIXME: These should be removed. */
82 static TCGv cpu_F0s
, cpu_F1s
;
83 static TCGv_i64 cpu_F0d
, cpu_F1d
;
85 #define ICOUNT_TEMP cpu_T[0]
86 #include "gen-icount.h"
88 /* initialize TCG globals. */
89 void arm_translate_init(void)
91 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
93 cpu_T
[0] = tcg_global_reg_new_i32(TCG_AREG1
, "T0");
94 cpu_T
[1] = tcg_global_reg_new_i32(TCG_AREG2
, "T1");
100 /* The code generator doesn't like lots of temporaries, so maintain our own
101 cache for reuse within a function. */
103 static int num_temps
;
104 static TCGv temps
[MAX_TEMPS
];
106 /* Allocate a temporary variable. */
107 static TCGv_i32
new_tmp(void)
110 if (num_temps
== MAX_TEMPS
)
113 if (GET_TCGV_I32(temps
[num_temps
]))
114 return temps
[num_temps
++];
116 tmp
= tcg_temp_new_i32();
117 temps
[num_temps
++] = tmp
;
121 /* Release a temporary variable. */
122 static void dead_tmp(TCGv tmp
)
127 if (TCGV_EQUAL(temps
[i
], tmp
))
130 /* Shuffle this temp to the last slot. */
131 while (!TCGV_EQUAL(temps
[i
], tmp
))
133 while (i
< num_temps
) {
134 temps
[i
] = temps
[i
+ 1];
140 static inline TCGv
load_cpu_offset(int offset
)
142 TCGv tmp
= new_tmp();
143 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
147 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
149 static inline void store_cpu_offset(TCGv var
, int offset
)
151 tcg_gen_st_i32(var
, cpu_env
, offset
);
155 #define store_cpu_field(var, name) \
156 store_cpu_offset(var, offsetof(CPUState, name))
158 /* Set a variable to the value of a CPU register. */
159 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
163 /* normaly, since we updated PC, we need only to add one insn */
165 addr
= (long)s
->pc
+ 2;
167 addr
= (long)s
->pc
+ 4;
168 tcg_gen_movi_i32(var
, addr
);
170 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
174 /* Create a new temporary and set it to the value of a CPU register. */
175 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
177 TCGv tmp
= new_tmp();
178 load_reg_var(s
, tmp
, reg
);
182 /* Set a CPU register. The source must be a temporary and will be
184 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
187 tcg_gen_andi_i32(var
, var
, ~1);
188 s
->is_jmp
= DISAS_JUMP
;
190 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
195 /* Basic operations. */
196 #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
197 #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
198 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
200 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
201 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202 #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
203 #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
205 #define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206 #define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
207 #define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
208 #define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
209 #define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
210 #define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
212 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
213 #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
214 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
215 #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
216 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
217 #define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
218 #define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
220 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
221 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
223 /* Value extensions. */
224 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
225 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
226 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
227 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
229 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
230 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
232 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
234 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
235 /* Set NZCV flags from the high 4 bits of var. */
236 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
238 static void gen_exception(int excp
)
240 TCGv tmp
= new_tmp();
241 tcg_gen_movi_i32(tmp
, excp
);
242 gen_helper_exception(tmp
);
246 static void gen_smul_dual(TCGv a
, TCGv b
)
248 TCGv tmp1
= new_tmp();
249 TCGv tmp2
= new_tmp();
250 tcg_gen_ext16s_i32(tmp1
, a
);
251 tcg_gen_ext16s_i32(tmp2
, b
);
252 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
254 tcg_gen_sari_i32(a
, a
, 16);
255 tcg_gen_sari_i32(b
, b
, 16);
256 tcg_gen_mul_i32(b
, b
, a
);
257 tcg_gen_mov_i32(a
, tmp1
);
261 /* Byteswap each halfword. */
262 static void gen_rev16(TCGv var
)
264 TCGv tmp
= new_tmp();
265 tcg_gen_shri_i32(tmp
, var
, 8);
266 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
267 tcg_gen_shli_i32(var
, var
, 8);
268 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
269 tcg_gen_or_i32(var
, var
, tmp
);
273 /* Byteswap low halfword and sign extend. */
274 static void gen_revsh(TCGv var
)
276 TCGv tmp
= new_tmp();
277 tcg_gen_shri_i32(tmp
, var
, 8);
278 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff);
279 tcg_gen_shli_i32(var
, var
, 8);
280 tcg_gen_ext8s_i32(var
, var
);
281 tcg_gen_or_i32(var
, var
, tmp
);
285 /* Unsigned bitfield extract. */
286 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
289 tcg_gen_shri_i32(var
, var
, shift
);
290 tcg_gen_andi_i32(var
, var
, mask
);
293 /* Signed bitfield extract. */
294 static void gen_sbfx(TCGv var
, int shift
, int width
)
299 tcg_gen_sari_i32(var
, var
, shift
);
300 if (shift
+ width
< 32) {
301 signbit
= 1u << (width
- 1);
302 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
303 tcg_gen_xori_i32(var
, var
, signbit
);
304 tcg_gen_subi_i32(var
, var
, signbit
);
308 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
309 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
311 tcg_gen_andi_i32(val
, val
, mask
);
312 tcg_gen_shli_i32(val
, val
, shift
);
313 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
314 tcg_gen_or_i32(dest
, base
, val
);
317 /* Round the top 32 bits of a 64-bit value. */
318 static void gen_roundqd(TCGv a
, TCGv b
)
320 tcg_gen_shri_i32(a
, a
, 31);
321 tcg_gen_add_i32(a
, a
, b
);
324 /* FIXME: Most targets have native widening multiplication.
325 It would be good to use that instead of a full wide multiply. */
326 /* 32x32->64 multiply. Marks inputs as dead. */
327 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
329 TCGv_i64 tmp1
= tcg_temp_new_i64();
330 TCGv_i64 tmp2
= tcg_temp_new_i64();
332 tcg_gen_extu_i32_i64(tmp1
, a
);
334 tcg_gen_extu_i32_i64(tmp2
, b
);
336 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
340 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
342 TCGv_i64 tmp1
= tcg_temp_new_i64();
343 TCGv_i64 tmp2
= tcg_temp_new_i64();
345 tcg_gen_ext_i32_i64(tmp1
, a
);
347 tcg_gen_ext_i32_i64(tmp2
, b
);
349 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
353 /* Unsigned 32x32->64 multiply. */
354 static void gen_op_mull_T0_T1(void)
356 TCGv_i64 tmp1
= tcg_temp_new_i64();
357 TCGv_i64 tmp2
= tcg_temp_new_i64();
359 tcg_gen_extu_i32_i64(tmp1
, cpu_T
[0]);
360 tcg_gen_extu_i32_i64(tmp2
, cpu_T
[1]);
361 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
362 tcg_gen_trunc_i64_i32(cpu_T
[0], tmp1
);
363 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
364 tcg_gen_trunc_i64_i32(cpu_T
[1], tmp1
);
367 /* Signed 32x32->64 multiply. */
368 static void gen_imull(TCGv a
, TCGv b
)
370 TCGv_i64 tmp1
= tcg_temp_new_i64();
371 TCGv_i64 tmp2
= tcg_temp_new_i64();
373 tcg_gen_ext_i32_i64(tmp1
, a
);
374 tcg_gen_ext_i32_i64(tmp2
, b
);
375 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
376 tcg_gen_trunc_i64_i32(a
, tmp1
);
377 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
378 tcg_gen_trunc_i64_i32(b
, tmp1
);
381 /* Swap low and high halfwords. */
382 static void gen_swap_half(TCGv var
)
384 TCGv tmp
= new_tmp();
385 tcg_gen_shri_i32(tmp
, var
, 16);
386 tcg_gen_shli_i32(var
, var
, 16);
387 tcg_gen_or_i32(var
, var
, tmp
);
391 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
392 tmp = (t0 ^ t1) & 0x8000;
395 t0 = (t0 + t1) ^ tmp;
398 static void gen_add16(TCGv t0
, TCGv t1
)
400 TCGv tmp
= new_tmp();
401 tcg_gen_xor_i32(tmp
, t0
, t1
);
402 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
403 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
404 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
405 tcg_gen_add_i32(t0
, t0
, t1
);
406 tcg_gen_xor_i32(t0
, t0
, tmp
);
411 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
413 /* Set CF to the top bit of var. */
414 static void gen_set_CF_bit31(TCGv var
)
416 TCGv tmp
= new_tmp();
417 tcg_gen_shri_i32(tmp
, var
, 31);
422 /* Set N and Z flags from var. */
423 static inline void gen_logic_CC(TCGv var
)
425 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
426 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
430 static void gen_adc_T0_T1(void)
434 tmp
= load_cpu_field(CF
);
435 tcg_gen_add_i32(cpu_T
[0], cpu_T
[0], tmp
);
439 /* dest = T0 - T1 + CF - 1. */
440 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
443 tcg_gen_sub_i32(dest
, t0
, t1
);
444 tmp
= load_cpu_field(CF
);
445 tcg_gen_add_i32(dest
, dest
, tmp
);
446 tcg_gen_subi_i32(dest
, dest
, 1);
450 #define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
451 #define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
453 /* T0 &= ~T1. Clobbers T1. */
454 /* FIXME: Implement bic natively. */
455 static inline void tcg_gen_bic_i32(TCGv dest
, TCGv t0
, TCGv t1
)
457 TCGv tmp
= new_tmp();
458 tcg_gen_not_i32(tmp
, t1
);
459 tcg_gen_and_i32(dest
, t0
, tmp
);
462 static inline void gen_op_bicl_T0_T1(void)
468 /* FIXME: Implement this natively. */
469 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
471 /* FIXME: Implement this natively. */
472 static void tcg_gen_rori_i32(TCGv t0
, TCGv t1
, int i
)
480 tcg_gen_shri_i32(tmp
, t1
, i
);
481 tcg_gen_shli_i32(t1
, t1
, 32 - i
);
482 tcg_gen_or_i32(t0
, t1
, tmp
);
486 static void shifter_out_im(TCGv var
, int shift
)
488 TCGv tmp
= new_tmp();
490 tcg_gen_andi_i32(tmp
, var
, 1);
492 tcg_gen_shri_i32(tmp
, var
, shift
);
494 tcg_gen_andi_i32(tmp
, tmp
, 1);
500 /* Shift by immediate. Includes special handling for shift == 0. */
501 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
507 shifter_out_im(var
, 32 - shift
);
508 tcg_gen_shli_i32(var
, var
, shift
);
514 tcg_gen_shri_i32(var
, var
, 31);
517 tcg_gen_movi_i32(var
, 0);
520 shifter_out_im(var
, shift
- 1);
521 tcg_gen_shri_i32(var
, var
, shift
);
528 shifter_out_im(var
, shift
- 1);
531 tcg_gen_sari_i32(var
, var
, shift
);
533 case 3: /* ROR/RRX */
536 shifter_out_im(var
, shift
- 1);
537 tcg_gen_rori_i32(var
, var
, shift
); break;
539 TCGv tmp
= load_cpu_field(CF
);
541 shifter_out_im(var
, 0);
542 tcg_gen_shri_i32(var
, var
, 1);
543 tcg_gen_shli_i32(tmp
, tmp
, 31);
544 tcg_gen_or_i32(var
, var
, tmp
);
550 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
551 TCGv shift
, int flags
)
555 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
556 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
557 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
558 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
562 case 0: gen_helper_shl(var
, var
, shift
); break;
563 case 1: gen_helper_shr(var
, var
, shift
); break;
564 case 2: gen_helper_sar(var
, var
, shift
); break;
565 case 3: gen_helper_ror(var
, var
, shift
); break;
571 #define PAS_OP(pfx) \
573 case 0: gen_pas_helper(glue(pfx,add16)); break; \
574 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
575 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
576 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
577 case 4: gen_pas_helper(glue(pfx,add8)); break; \
578 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
580 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
587 tmp
= tcg_temp_new_ptr();
588 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
592 tmp
= tcg_temp_new_ptr();
593 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
596 #undef gen_pas_helper
597 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
610 #undef gen_pas_helper
615 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
616 #define PAS_OP(pfx) \
618 case 0: gen_pas_helper(glue(pfx,add8)); break; \
619 case 1: gen_pas_helper(glue(pfx,add16)); break; \
620 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
621 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
622 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
623 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
625 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
630 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
632 tmp
= tcg_temp_new_ptr();
633 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
637 tmp
= tcg_temp_new_ptr();
638 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
641 #undef gen_pas_helper
642 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
655 #undef gen_pas_helper
660 static void gen_test_cc(int cc
, int label
)
668 tmp
= load_cpu_field(ZF
);
669 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
672 tmp
= load_cpu_field(ZF
);
673 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
676 tmp
= load_cpu_field(CF
);
677 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
680 tmp
= load_cpu_field(CF
);
681 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
684 tmp
= load_cpu_field(NF
);
685 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
688 tmp
= load_cpu_field(NF
);
689 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
692 tmp
= load_cpu_field(VF
);
693 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
696 tmp
= load_cpu_field(VF
);
697 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
699 case 8: /* hi: C && !Z */
700 inv
= gen_new_label();
701 tmp
= load_cpu_field(CF
);
702 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
704 tmp
= load_cpu_field(ZF
);
705 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
708 case 9: /* ls: !C || Z */
709 tmp
= load_cpu_field(CF
);
710 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
712 tmp
= load_cpu_field(ZF
);
713 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
715 case 10: /* ge: N == V -> N ^ V == 0 */
716 tmp
= load_cpu_field(VF
);
717 tmp2
= load_cpu_field(NF
);
718 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
720 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
722 case 11: /* lt: N != V -> N ^ V != 0 */
723 tmp
= load_cpu_field(VF
);
724 tmp2
= load_cpu_field(NF
);
725 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
727 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
729 case 12: /* gt: !Z && N == V */
730 inv
= gen_new_label();
731 tmp
= load_cpu_field(ZF
);
732 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
734 tmp
= load_cpu_field(VF
);
735 tmp2
= load_cpu_field(NF
);
736 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
738 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
741 case 13: /* le: Z || N != V */
742 tmp
= load_cpu_field(ZF
);
743 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
745 tmp
= load_cpu_field(VF
);
746 tmp2
= load_cpu_field(NF
);
747 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
749 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
752 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
758 static const uint8_t table_logic_cc
[16] = {
777 /* Set PC and Thumb state from an immediate address. */
778 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
782 s
->is_jmp
= DISAS_UPDATE
;
784 if (s
->thumb
!= (addr
& 1)) {
785 tcg_gen_movi_i32(tmp
, addr
& 1);
786 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
788 tcg_gen_movi_i32(tmp
, addr
& ~1);
789 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, regs
[15]));
793 /* Set PC and Thumb state from var. var is marked as dead. */
794 static inline void gen_bx(DisasContext
*s
, TCGv var
)
798 s
->is_jmp
= DISAS_UPDATE
;
800 tcg_gen_andi_i32(tmp
, var
, 1);
801 store_cpu_field(tmp
, thumb
);
802 tcg_gen_andi_i32(var
, var
, ~1);
803 store_cpu_field(var
, regs
[15]);
806 /* TODO: This should be removed. Use gen_bx instead. */
807 static inline void gen_bx_T0(DisasContext
*s
)
809 TCGv tmp
= new_tmp();
810 tcg_gen_mov_i32(tmp
, cpu_T
[0]);
814 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
816 TCGv tmp
= new_tmp();
817 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
820 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
822 TCGv tmp
= new_tmp();
823 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
826 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
828 TCGv tmp
= new_tmp();
829 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
832 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
834 TCGv tmp
= new_tmp();
835 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
838 static inline TCGv
gen_ld32(TCGv addr
, int index
)
840 TCGv tmp
= new_tmp();
841 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
844 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
846 tcg_gen_qemu_st8(val
, addr
, index
);
849 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
851 tcg_gen_qemu_st16(val
, addr
, index
);
854 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
856 tcg_gen_qemu_st32(val
, addr
, index
);
860 static inline void gen_movl_T0_reg(DisasContext
*s
, int reg
)
862 load_reg_var(s
, cpu_T
[0], reg
);
865 static inline void gen_movl_T1_reg(DisasContext
*s
, int reg
)
867 load_reg_var(s
, cpu_T
[1], reg
);
870 static inline void gen_movl_T2_reg(DisasContext
*s
, int reg
)
872 load_reg_var(s
, cpu_T
[2], reg
);
875 static inline void gen_set_pc_im(uint32_t val
)
877 TCGv tmp
= new_tmp();
878 tcg_gen_movi_i32(tmp
, val
);
879 store_cpu_field(tmp
, regs
[15]);
882 static inline void gen_movl_reg_TN(DisasContext
*s
, int reg
, int t
)
887 tcg_gen_andi_i32(tmp
, cpu_T
[t
], ~1);
891 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
894 s
->is_jmp
= DISAS_JUMP
;
898 static inline void gen_movl_reg_T0(DisasContext
*s
, int reg
)
900 gen_movl_reg_TN(s
, reg
, 0);
903 static inline void gen_movl_reg_T1(DisasContext
*s
, int reg
)
905 gen_movl_reg_TN(s
, reg
, 1);
908 /* Force a TB lookup after an instruction that changes the CPU state. */
909 static inline void gen_lookup_tb(DisasContext
*s
)
911 gen_op_movl_T0_im(s
->pc
);
912 gen_movl_reg_T0(s
, 15);
913 s
->is_jmp
= DISAS_UPDATE
;
916 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
919 int val
, rm
, shift
, shiftop
;
922 if (!(insn
& (1 << 25))) {
925 if (!(insn
& (1 << 23)))
928 tcg_gen_addi_i32(var
, var
, val
);
932 shift
= (insn
>> 7) & 0x1f;
933 shiftop
= (insn
>> 5) & 3;
934 offset
= load_reg(s
, rm
);
935 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
936 if (!(insn
& (1 << 23)))
937 tcg_gen_sub_i32(var
, var
, offset
);
939 tcg_gen_add_i32(var
, var
, offset
);
944 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
950 if (insn
& (1 << 22)) {
952 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
953 if (!(insn
& (1 << 23)))
957 tcg_gen_addi_i32(var
, var
, val
);
961 tcg_gen_addi_i32(var
, var
, extra
);
963 offset
= load_reg(s
, rm
);
964 if (!(insn
& (1 << 23)))
965 tcg_gen_sub_i32(var
, var
, offset
);
967 tcg_gen_add_i32(var
, var
, offset
);
972 #define VFP_OP2(name) \
973 static inline void gen_vfp_##name(int dp) \
976 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
978 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
988 static inline void gen_vfp_abs(int dp
)
991 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
993 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
996 static inline void gen_vfp_neg(int dp
)
999 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1001 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1004 static inline void gen_vfp_sqrt(int dp
)
1007 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
1009 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1012 static inline void gen_vfp_cmp(int dp
)
1015 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1017 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1020 static inline void gen_vfp_cmpe(int dp
)
1023 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1025 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1028 static inline void gen_vfp_F1_ld0(int dp
)
1031 tcg_gen_movi_i64(cpu_F1d
, 0);
1033 tcg_gen_movi_i32(cpu_F1s
, 0);
1036 static inline void gen_vfp_uito(int dp
)
1039 gen_helper_vfp_uitod(cpu_F0d
, cpu_F0s
, cpu_env
);
1041 gen_helper_vfp_uitos(cpu_F0s
, cpu_F0s
, cpu_env
);
1044 static inline void gen_vfp_sito(int dp
)
1047 gen_helper_vfp_sitod(cpu_F0d
, cpu_F0s
, cpu_env
);
1049 gen_helper_vfp_sitos(cpu_F0s
, cpu_F0s
, cpu_env
);
1052 static inline void gen_vfp_toui(int dp
)
1055 gen_helper_vfp_touid(cpu_F0s
, cpu_F0d
, cpu_env
);
1057 gen_helper_vfp_touis(cpu_F0s
, cpu_F0s
, cpu_env
);
1060 static inline void gen_vfp_touiz(int dp
)
1063 gen_helper_vfp_touizd(cpu_F0s
, cpu_F0d
, cpu_env
);
1065 gen_helper_vfp_touizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1068 static inline void gen_vfp_tosi(int dp
)
1071 gen_helper_vfp_tosid(cpu_F0s
, cpu_F0d
, cpu_env
);
1073 gen_helper_vfp_tosis(cpu_F0s
, cpu_F0s
, cpu_env
);
1076 static inline void gen_vfp_tosiz(int dp
)
1079 gen_helper_vfp_tosizd(cpu_F0s
, cpu_F0d
, cpu_env
);
1081 gen_helper_vfp_tosizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1084 #define VFP_GEN_FIX(name) \
1085 static inline void gen_vfp_##name(int dp, int shift) \
1088 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1090 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
1102 static inline void gen_vfp_ld(DisasContext
*s
, int dp
)
1105 tcg_gen_qemu_ld64(cpu_F0d
, cpu_T
[1], IS_USER(s
));
1107 tcg_gen_qemu_ld32u(cpu_F0s
, cpu_T
[1], IS_USER(s
));
1110 static inline void gen_vfp_st(DisasContext
*s
, int dp
)
1113 tcg_gen_qemu_st64(cpu_F0d
, cpu_T
[1], IS_USER(s
));
1115 tcg_gen_qemu_st32(cpu_F0s
, cpu_T
[1], IS_USER(s
));
1119 vfp_reg_offset (int dp
, int reg
)
1122 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1124 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1125 + offsetof(CPU_DoubleU
, l
.upper
);
1127 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1128 + offsetof(CPU_DoubleU
, l
.lower
);
1132 /* Return the offset of a 32-bit piece of a NEON register.
1133 zero is the least significant end of the register. */
1135 neon_reg_offset (int reg
, int n
)
1139 return vfp_reg_offset(0, sreg
);
1142 /* FIXME: Remove these. */
1143 #define neon_T0 cpu_T[0]
1144 #define neon_T1 cpu_T[1]
1145 #define NEON_GET_REG(T, reg, n) \
1146 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1147 #define NEON_SET_REG(T, reg, n) \
1148 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1150 static TCGv
neon_load_reg(int reg
, int pass
)
1152 TCGv tmp
= new_tmp();
1153 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1157 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1159 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1163 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1165 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1168 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1170 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1173 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1174 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1175 #define tcg_gen_st_f32 tcg_gen_st_i32
1176 #define tcg_gen_st_f64 tcg_gen_st_i64
1178 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1181 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1183 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1186 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1189 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1191 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1194 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1197 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1199 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1202 #define ARM_CP_RW_BIT (1 << 20)
1204 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1206 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1209 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1211 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1214 static inline void gen_op_iwmmxt_movl_wCx_T0(int reg
)
1216 tcg_gen_st_i32(cpu_T
[0], cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1219 static inline void gen_op_iwmmxt_movl_T0_wCx(int reg
)
1221 tcg_gen_ld_i32(cpu_T
[0], cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1224 static inline void gen_op_iwmmxt_movl_T1_wCx(int reg
)
1226 tcg_gen_ld_i32(cpu_T
[1], cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1229 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1231 iwmmxt_store_reg(cpu_M0
, rn
);
1234 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1236 iwmmxt_load_reg(cpu_M0
, rn
);
1239 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1241 iwmmxt_load_reg(cpu_V1
, rn
);
1242 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1245 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1247 iwmmxt_load_reg(cpu_V1
, rn
);
1248 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1251 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1253 iwmmxt_load_reg(cpu_V1
, rn
);
1254 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1257 #define IWMMXT_OP(name) \
1258 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1260 iwmmxt_load_reg(cpu_V1, rn); \
1261 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1264 #define IWMMXT_OP_ENV(name) \
1265 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1267 iwmmxt_load_reg(cpu_V1, rn); \
1268 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1271 #define IWMMXT_OP_ENV_SIZE(name) \
1272 IWMMXT_OP_ENV(name##b) \
1273 IWMMXT_OP_ENV(name##w) \
1274 IWMMXT_OP_ENV(name##l)
1276 #define IWMMXT_OP_ENV1(name) \
1277 static inline void gen_op_iwmmxt_##name##_M0(void) \
1279 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1293 IWMMXT_OP_ENV_SIZE(unpackl
)
1294 IWMMXT_OP_ENV_SIZE(unpackh
)
1296 IWMMXT_OP_ENV1(unpacklub
)
1297 IWMMXT_OP_ENV1(unpackluw
)
1298 IWMMXT_OP_ENV1(unpacklul
)
1299 IWMMXT_OP_ENV1(unpackhub
)
1300 IWMMXT_OP_ENV1(unpackhuw
)
1301 IWMMXT_OP_ENV1(unpackhul
)
1302 IWMMXT_OP_ENV1(unpacklsb
)
1303 IWMMXT_OP_ENV1(unpacklsw
)
1304 IWMMXT_OP_ENV1(unpacklsl
)
1305 IWMMXT_OP_ENV1(unpackhsb
)
1306 IWMMXT_OP_ENV1(unpackhsw
)
1307 IWMMXT_OP_ENV1(unpackhsl
)
1309 IWMMXT_OP_ENV_SIZE(cmpeq
)
1310 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1311 IWMMXT_OP_ENV_SIZE(cmpgts
)
1313 IWMMXT_OP_ENV_SIZE(mins
)
1314 IWMMXT_OP_ENV_SIZE(minu
)
1315 IWMMXT_OP_ENV_SIZE(maxs
)
1316 IWMMXT_OP_ENV_SIZE(maxu
)
1318 IWMMXT_OP_ENV_SIZE(subn
)
1319 IWMMXT_OP_ENV_SIZE(addn
)
1320 IWMMXT_OP_ENV_SIZE(subu
)
1321 IWMMXT_OP_ENV_SIZE(addu
)
1322 IWMMXT_OP_ENV_SIZE(subs
)
1323 IWMMXT_OP_ENV_SIZE(adds
)
1325 IWMMXT_OP_ENV(avgb0
)
1326 IWMMXT_OP_ENV(avgb1
)
1327 IWMMXT_OP_ENV(avgw0
)
1328 IWMMXT_OP_ENV(avgw1
)
1332 IWMMXT_OP_ENV(packuw
)
1333 IWMMXT_OP_ENV(packul
)
1334 IWMMXT_OP_ENV(packuq
)
1335 IWMMXT_OP_ENV(packsw
)
1336 IWMMXT_OP_ENV(packsl
)
1337 IWMMXT_OP_ENV(packsq
)
1339 static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1341 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1]);
1344 static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1346 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1]);
1349 static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1351 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1]);
1354 static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn
)
1356 iwmmxt_load_reg(cpu_V1
, rn
);
1357 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, cpu_T
[0]);
1360 static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift
)
1362 TCGv tmp
= tcg_const_i32(shift
);
1363 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1], tmp
);
1366 static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift
)
1368 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, shift
);
1369 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_M0
);
1370 tcg_gen_ext8s_i32(cpu_T
[0], cpu_T
[0]);
1373 static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift
)
1375 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, shift
);
1376 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_M0
);
1377 tcg_gen_ext16s_i32(cpu_T
[0], cpu_T
[0]);
1380 static inline void gen_op_iwmmxt_extru_T0_M0(int shift
, uint32_t mask
)
1382 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, shift
);
1383 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_M0
);
1385 tcg_gen_andi_i32(cpu_T
[0], cpu_T
[0], mask
);
1388 static void gen_op_iwmmxt_set_mup(void)
1391 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1392 tcg_gen_ori_i32(tmp
, tmp
, 2);
1393 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1396 static void gen_op_iwmmxt_set_cup(void)
1399 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1400 tcg_gen_ori_i32(tmp
, tmp
, 1);
1401 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1404 static void gen_op_iwmmxt_setpsr_nz(void)
1406 TCGv tmp
= new_tmp();
1407 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1408 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1411 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1413 iwmmxt_load_reg(cpu_V1
, rn
);
1414 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1415 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1419 static void gen_iwmmxt_movl_T0_T1_wRn(int rn
)
1421 iwmmxt_load_reg(cpu_V0
, rn
);
1422 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_V0
);
1423 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1424 tcg_gen_trunc_i64_i32(cpu_T
[1], cpu_V0
);
1427 static void gen_iwmmxt_movl_wRn_T0_T1(int rn
)
1429 tcg_gen_concat_i32_i64(cpu_V0
, cpu_T
[0], cpu_T
[1]);
1430 iwmmxt_store_reg(cpu_V0
, rn
);
1433 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
)
1438 rd
= (insn
>> 16) & 0xf;
1439 gen_movl_T1_reg(s
, rd
);
1441 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1442 if (insn
& (1 << 24)) {
1444 if (insn
& (1 << 23))
1445 gen_op_addl_T1_im(offset
);
1447 gen_op_addl_T1_im(-offset
);
1449 if (insn
& (1 << 21))
1450 gen_movl_reg_T1(s
, rd
);
1451 } else if (insn
& (1 << 21)) {
1453 if (insn
& (1 << 23))
1454 gen_op_movl_T0_im(offset
);
1456 gen_op_movl_T0_im(- offset
);
1457 gen_op_addl_T0_T1();
1458 gen_movl_reg_T0(s
, rd
);
1459 } else if (!(insn
& (1 << 23)))
1464 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
)
1466 int rd
= (insn
>> 0) & 0xf;
1468 if (insn
& (1 << 8))
1469 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
)
1472 gen_op_iwmmxt_movl_T0_wCx(rd
);
1474 gen_iwmmxt_movl_T0_T1_wRn(rd
);
1476 gen_op_movl_T1_im(mask
);
1477 gen_op_andl_T0_T1();
1481 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1482 (ie. an undefined instruction). */
1483 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1486 int rdhi
, rdlo
, rd0
, rd1
, i
;
1489 if ((insn
& 0x0e000e00) == 0x0c000000) {
1490 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1492 rdlo
= (insn
>> 12) & 0xf;
1493 rdhi
= (insn
>> 16) & 0xf;
1494 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1495 gen_iwmmxt_movl_T0_T1_wRn(wrd
);
1496 gen_movl_reg_T0(s
, rdlo
);
1497 gen_movl_reg_T1(s
, rdhi
);
1498 } else { /* TMCRR */
1499 gen_movl_T0_reg(s
, rdlo
);
1500 gen_movl_T1_reg(s
, rdhi
);
1501 gen_iwmmxt_movl_wRn_T0_T1(wrd
);
1502 gen_op_iwmmxt_set_mup();
1507 wrd
= (insn
>> 12) & 0xf;
1508 if (gen_iwmmxt_address(s
, insn
))
1510 if (insn
& ARM_CP_RW_BIT
) {
1511 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1512 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
1513 tcg_gen_mov_i32(cpu_T
[0], tmp
);
1515 gen_op_iwmmxt_movl_wCx_T0(wrd
);
1518 if (insn
& (1 << 8)) {
1519 if (insn
& (1 << 22)) { /* WLDRD */
1520 tcg_gen_qemu_ld64(cpu_M0
, cpu_T
[1], IS_USER(s
));
1522 } else { /* WLDRW wRd */
1523 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
1526 if (insn
& (1 << 22)) { /* WLDRH */
1527 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
1528 } else { /* WLDRB */
1529 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
1533 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1536 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1539 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1540 gen_op_iwmmxt_movl_T0_wCx(wrd
);
1542 tcg_gen_mov_i32(tmp
, cpu_T
[0]);
1543 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
1545 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1547 if (insn
& (1 << 8)) {
1548 if (insn
& (1 << 22)) { /* WSTRD */
1550 tcg_gen_qemu_st64(cpu_M0
, cpu_T
[1], IS_USER(s
));
1551 } else { /* WSTRW wRd */
1552 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1553 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
1556 if (insn
& (1 << 22)) { /* WSTRH */
1557 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1558 gen_st16(tmp
, cpu_T
[1], IS_USER(s
));
1559 } else { /* WSTRB */
1560 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1561 gen_st8(tmp
, cpu_T
[1], IS_USER(s
));
1569 if ((insn
& 0x0f000000) != 0x0e000000)
1572 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1573 case 0x000: /* WOR */
1574 wrd
= (insn
>> 12) & 0xf;
1575 rd0
= (insn
>> 0) & 0xf;
1576 rd1
= (insn
>> 16) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1578 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1579 gen_op_iwmmxt_setpsr_nz();
1580 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1581 gen_op_iwmmxt_set_mup();
1582 gen_op_iwmmxt_set_cup();
1584 case 0x011: /* TMCR */
1587 rd
= (insn
>> 12) & 0xf;
1588 wrd
= (insn
>> 16) & 0xf;
1590 case ARM_IWMMXT_wCID
:
1591 case ARM_IWMMXT_wCASF
:
1593 case ARM_IWMMXT_wCon
:
1594 gen_op_iwmmxt_set_cup();
1596 case ARM_IWMMXT_wCSSF
:
1597 gen_op_iwmmxt_movl_T0_wCx(wrd
);
1598 gen_movl_T1_reg(s
, rd
);
1599 gen_op_bicl_T0_T1();
1600 gen_op_iwmmxt_movl_wCx_T0(wrd
);
1602 case ARM_IWMMXT_wCGR0
:
1603 case ARM_IWMMXT_wCGR1
:
1604 case ARM_IWMMXT_wCGR2
:
1605 case ARM_IWMMXT_wCGR3
:
1606 gen_op_iwmmxt_set_cup();
1607 gen_movl_reg_T0(s
, rd
);
1608 gen_op_iwmmxt_movl_wCx_T0(wrd
);
1614 case 0x100: /* WXOR */
1615 wrd
= (insn
>> 12) & 0xf;
1616 rd0
= (insn
>> 0) & 0xf;
1617 rd1
= (insn
>> 16) & 0xf;
1618 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1619 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1620 gen_op_iwmmxt_setpsr_nz();
1621 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1622 gen_op_iwmmxt_set_mup();
1623 gen_op_iwmmxt_set_cup();
1625 case 0x111: /* TMRC */
1628 rd
= (insn
>> 12) & 0xf;
1629 wrd
= (insn
>> 16) & 0xf;
1630 gen_op_iwmmxt_movl_T0_wCx(wrd
);
1631 gen_movl_reg_T0(s
, rd
);
1633 case 0x300: /* WANDN */
1634 wrd
= (insn
>> 12) & 0xf;
1635 rd0
= (insn
>> 0) & 0xf;
1636 rd1
= (insn
>> 16) & 0xf;
1637 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1638 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1639 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1640 gen_op_iwmmxt_setpsr_nz();
1641 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1642 gen_op_iwmmxt_set_mup();
1643 gen_op_iwmmxt_set_cup();
1645 case 0x200: /* WAND */
1646 wrd
= (insn
>> 12) & 0xf;
1647 rd0
= (insn
>> 0) & 0xf;
1648 rd1
= (insn
>> 16) & 0xf;
1649 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1650 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1651 gen_op_iwmmxt_setpsr_nz();
1652 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1656 case 0x810: case 0xa10: /* WMADD */
1657 wrd
= (insn
>> 12) & 0xf;
1658 rd0
= (insn
>> 0) & 0xf;
1659 rd1
= (insn
>> 16) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1661 if (insn
& (1 << 21))
1662 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1664 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1665 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1666 gen_op_iwmmxt_set_mup();
1668 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1669 wrd
= (insn
>> 12) & 0xf;
1670 rd0
= (insn
>> 16) & 0xf;
1671 rd1
= (insn
>> 0) & 0xf;
1672 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1673 switch ((insn
>> 22) & 3) {
1675 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1678 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1681 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1686 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1687 gen_op_iwmmxt_set_mup();
1688 gen_op_iwmmxt_set_cup();
1690 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1691 wrd
= (insn
>> 12) & 0xf;
1692 rd0
= (insn
>> 16) & 0xf;
1693 rd1
= (insn
>> 0) & 0xf;
1694 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1695 switch ((insn
>> 22) & 3) {
1697 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1700 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1703 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1708 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1709 gen_op_iwmmxt_set_mup();
1710 gen_op_iwmmxt_set_cup();
1712 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1713 wrd
= (insn
>> 12) & 0xf;
1714 rd0
= (insn
>> 16) & 0xf;
1715 rd1
= (insn
>> 0) & 0xf;
1716 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1717 if (insn
& (1 << 22))
1718 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1720 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1721 if (!(insn
& (1 << 20)))
1722 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1723 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1724 gen_op_iwmmxt_set_mup();
1726 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1727 wrd
= (insn
>> 12) & 0xf;
1728 rd0
= (insn
>> 16) & 0xf;
1729 rd1
= (insn
>> 0) & 0xf;
1730 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1731 if (insn
& (1 << 21)) {
1732 if (insn
& (1 << 20))
1733 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1735 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1737 if (insn
& (1 << 20))
1738 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1740 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1742 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1743 gen_op_iwmmxt_set_mup();
1745 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1746 wrd
= (insn
>> 12) & 0xf;
1747 rd0
= (insn
>> 16) & 0xf;
1748 rd1
= (insn
>> 0) & 0xf;
1749 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1750 if (insn
& (1 << 21))
1751 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1753 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1754 if (!(insn
& (1 << 20))) {
1755 iwmmxt_load_reg(cpu_V1
, wrd
);
1756 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1758 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1759 gen_op_iwmmxt_set_mup();
1761 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1762 wrd
= (insn
>> 12) & 0xf;
1763 rd0
= (insn
>> 16) & 0xf;
1764 rd1
= (insn
>> 0) & 0xf;
1765 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1766 switch ((insn
>> 22) & 3) {
1768 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1771 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1774 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1779 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1780 gen_op_iwmmxt_set_mup();
1781 gen_op_iwmmxt_set_cup();
1783 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1784 wrd
= (insn
>> 12) & 0xf;
1785 rd0
= (insn
>> 16) & 0xf;
1786 rd1
= (insn
>> 0) & 0xf;
1787 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1788 if (insn
& (1 << 22)) {
1789 if (insn
& (1 << 20))
1790 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1792 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1794 if (insn
& (1 << 20))
1795 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1797 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1799 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1800 gen_op_iwmmxt_set_mup();
1801 gen_op_iwmmxt_set_cup();
1803 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1804 wrd
= (insn
>> 12) & 0xf;
1805 rd0
= (insn
>> 16) & 0xf;
1806 rd1
= (insn
>> 0) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1808 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1809 gen_op_movl_T1_im(7);
1810 gen_op_andl_T0_T1();
1811 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
1812 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1813 gen_op_iwmmxt_set_mup();
1815 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1816 rd
= (insn
>> 12) & 0xf;
1817 wrd
= (insn
>> 16) & 0xf;
1818 gen_movl_T0_reg(s
, rd
);
1819 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1820 switch ((insn
>> 6) & 3) {
1822 gen_op_movl_T1_im(0xff);
1823 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 7) << 3);
1826 gen_op_movl_T1_im(0xffff);
1827 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 3) << 4);
1830 gen_op_movl_T1_im(0xffffffff);
1831 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 1) << 5);
1836 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1837 gen_op_iwmmxt_set_mup();
1839 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1840 rd
= (insn
>> 12) & 0xf;
1841 wrd
= (insn
>> 16) & 0xf;
1844 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1845 switch ((insn
>> 22) & 3) {
1848 gen_op_iwmmxt_extrsb_T0_M0((insn
& 7) << 3);
1850 gen_op_iwmmxt_extru_T0_M0((insn
& 7) << 3, 0xff);
1855 gen_op_iwmmxt_extrsw_T0_M0((insn
& 3) << 4);
1857 gen_op_iwmmxt_extru_T0_M0((insn
& 3) << 4, 0xffff);
1861 gen_op_iwmmxt_extru_T0_M0((insn
& 1) << 5, ~0u);
1866 gen_movl_reg_T0(s
, rd
);
1868 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1869 if ((insn
& 0x000ff008) != 0x0003f000)
1871 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1872 switch ((insn
>> 22) & 3) {
1874 gen_op_shrl_T1_im(((insn
& 7) << 2) + 0);
1877 gen_op_shrl_T1_im(((insn
& 3) << 3) + 4);
1880 gen_op_shrl_T1_im(((insn
& 1) << 4) + 12);
1885 gen_op_shll_T1_im(28);
1886 gen_set_nzcv(cpu_T
[1]);
1888 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1889 rd
= (insn
>> 12) & 0xf;
1890 wrd
= (insn
>> 16) & 0xf;
1891 gen_movl_T0_reg(s
, rd
);
1892 switch ((insn
>> 6) & 3) {
1894 gen_helper_iwmmxt_bcstb(cpu_M0
, cpu_T
[0]);
1897 gen_helper_iwmmxt_bcstw(cpu_M0
, cpu_T
[0]);
1900 gen_helper_iwmmxt_bcstl(cpu_M0
, cpu_T
[0]);
1905 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1906 gen_op_iwmmxt_set_mup();
1908 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1909 if ((insn
& 0x000ff00f) != 0x0003f000)
1911 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1912 switch ((insn
>> 22) & 3) {
1914 for (i
= 0; i
< 7; i
++) {
1915 gen_op_shll_T1_im(4);
1916 gen_op_andl_T0_T1();
1920 for (i
= 0; i
< 3; i
++) {
1921 gen_op_shll_T1_im(8);
1922 gen_op_andl_T0_T1();
1926 gen_op_shll_T1_im(16);
1927 gen_op_andl_T0_T1();
1932 gen_set_nzcv(cpu_T
[0]);
1934 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1935 wrd
= (insn
>> 12) & 0xf;
1936 rd0
= (insn
>> 16) & 0xf;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1938 switch ((insn
>> 22) & 3) {
1940 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1943 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1946 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1951 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1952 gen_op_iwmmxt_set_mup();
1954 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1955 if ((insn
& 0x000ff00f) != 0x0003f000)
1957 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1958 switch ((insn
>> 22) & 3) {
1960 for (i
= 0; i
< 7; i
++) {
1961 gen_op_shll_T1_im(4);
1966 for (i
= 0; i
< 3; i
++) {
1967 gen_op_shll_T1_im(8);
1972 gen_op_shll_T1_im(16);
1978 gen_set_nzcv(cpu_T
[0]);
1980 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1981 rd
= (insn
>> 12) & 0xf;
1982 rd0
= (insn
>> 16) & 0xf;
1983 if ((insn
& 0xf) != 0)
1985 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1986 switch ((insn
>> 22) & 3) {
1988 gen_helper_iwmmxt_msbb(cpu_T
[0], cpu_M0
);
1991 gen_helper_iwmmxt_msbw(cpu_T
[0], cpu_M0
);
1994 gen_helper_iwmmxt_msbl(cpu_T
[0], cpu_M0
);
1999 gen_movl_reg_T0(s
, rd
);
2001 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2002 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2003 wrd
= (insn
>> 12) & 0xf;
2004 rd0
= (insn
>> 16) & 0xf;
2005 rd1
= (insn
>> 0) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2007 switch ((insn
>> 22) & 3) {
2009 if (insn
& (1 << 21))
2010 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2012 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2015 if (insn
& (1 << 21))
2016 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2018 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2021 if (insn
& (1 << 21))
2022 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2024 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2029 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2030 gen_op_iwmmxt_set_mup();
2031 gen_op_iwmmxt_set_cup();
2033 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2034 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2035 wrd
= (insn
>> 12) & 0xf;
2036 rd0
= (insn
>> 16) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2038 switch ((insn
>> 22) & 3) {
2040 if (insn
& (1 << 21))
2041 gen_op_iwmmxt_unpacklsb_M0();
2043 gen_op_iwmmxt_unpacklub_M0();
2046 if (insn
& (1 << 21))
2047 gen_op_iwmmxt_unpacklsw_M0();
2049 gen_op_iwmmxt_unpackluw_M0();
2052 if (insn
& (1 << 21))
2053 gen_op_iwmmxt_unpacklsl_M0();
2055 gen_op_iwmmxt_unpacklul_M0();
2060 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2061 gen_op_iwmmxt_set_mup();
2062 gen_op_iwmmxt_set_cup();
2064 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2065 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2066 wrd
= (insn
>> 12) & 0xf;
2067 rd0
= (insn
>> 16) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2069 switch ((insn
>> 22) & 3) {
2071 if (insn
& (1 << 21))
2072 gen_op_iwmmxt_unpackhsb_M0();
2074 gen_op_iwmmxt_unpackhub_M0();
2077 if (insn
& (1 << 21))
2078 gen_op_iwmmxt_unpackhsw_M0();
2080 gen_op_iwmmxt_unpackhuw_M0();
2083 if (insn
& (1 << 21))
2084 gen_op_iwmmxt_unpackhsl_M0();
2086 gen_op_iwmmxt_unpackhul_M0();
2091 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2092 gen_op_iwmmxt_set_mup();
2093 gen_op_iwmmxt_set_cup();
2095 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2096 case 0x214: case 0x614: case 0xa14: case 0xe14:
2097 wrd
= (insn
>> 12) & 0xf;
2098 rd0
= (insn
>> 16) & 0xf;
2099 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2100 if (gen_iwmmxt_shift(insn
, 0xff))
2102 switch ((insn
>> 22) & 3) {
2106 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2109 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2112 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2115 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2119 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2120 case 0x014: case 0x414: case 0x814: case 0xc14:
2121 wrd
= (insn
>> 12) & 0xf;
2122 rd0
= (insn
>> 16) & 0xf;
2123 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2124 if (gen_iwmmxt_shift(insn
, 0xff))
2126 switch ((insn
>> 22) & 3) {
2130 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2133 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2136 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2139 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2143 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2144 case 0x114: case 0x514: case 0x914: case 0xd14:
2145 wrd
= (insn
>> 12) & 0xf;
2146 rd0
= (insn
>> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2148 if (gen_iwmmxt_shift(insn
, 0xff))
2150 switch ((insn
>> 22) & 3) {
2154 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2157 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2160 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2163 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2164 gen_op_iwmmxt_set_mup();
2165 gen_op_iwmmxt_set_cup();
2167 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2168 case 0x314: case 0x714: case 0xb14: case 0xf14:
2169 wrd
= (insn
>> 12) & 0xf;
2170 rd0
= (insn
>> 16) & 0xf;
2171 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2172 switch ((insn
>> 22) & 3) {
2176 if (gen_iwmmxt_shift(insn
, 0xf))
2178 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2181 if (gen_iwmmxt_shift(insn
, 0x1f))
2183 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2186 if (gen_iwmmxt_shift(insn
, 0x3f))
2188 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2191 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2192 gen_op_iwmmxt_set_mup();
2193 gen_op_iwmmxt_set_cup();
2195 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2196 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2197 wrd
= (insn
>> 12) & 0xf;
2198 rd0
= (insn
>> 16) & 0xf;
2199 rd1
= (insn
>> 0) & 0xf;
2200 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2201 switch ((insn
>> 22) & 3) {
2203 if (insn
& (1 << 21))
2204 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2206 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2209 if (insn
& (1 << 21))
2210 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2212 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2215 if (insn
& (1 << 21))
2216 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2218 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2223 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2224 gen_op_iwmmxt_set_mup();
2226 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2227 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2228 wrd
= (insn
>> 12) & 0xf;
2229 rd0
= (insn
>> 16) & 0xf;
2230 rd1
= (insn
>> 0) & 0xf;
2231 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2232 switch ((insn
>> 22) & 3) {
2234 if (insn
& (1 << 21))
2235 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2237 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2240 if (insn
& (1 << 21))
2241 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2243 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2246 if (insn
& (1 << 21))
2247 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2249 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2254 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2255 gen_op_iwmmxt_set_mup();
2257 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2258 case 0x402: case 0x502: case 0x602: case 0x702:
2259 wrd
= (insn
>> 12) & 0xf;
2260 rd0
= (insn
>> 16) & 0xf;
2261 rd1
= (insn
>> 0) & 0xf;
2262 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2263 gen_op_movl_T0_im((insn
>> 20) & 3);
2264 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
2265 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2266 gen_op_iwmmxt_set_mup();
2268 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2269 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2270 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2271 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2272 wrd
= (insn
>> 12) & 0xf;
2273 rd0
= (insn
>> 16) & 0xf;
2274 rd1
= (insn
>> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2276 switch ((insn
>> 20) & 0xf) {
2278 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2281 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2284 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2287 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2290 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2293 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2296 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2299 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2302 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2307 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2308 gen_op_iwmmxt_set_mup();
2309 gen_op_iwmmxt_set_cup();
2311 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2312 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2313 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2314 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2315 wrd
= (insn
>> 12) & 0xf;
2316 rd0
= (insn
>> 16) & 0xf;
2317 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2318 gen_op_movl_T0_im(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2319 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2320 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2324 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2325 case 0x418: case 0x518: case 0x618: case 0x718:
2326 case 0x818: case 0x918: case 0xa18: case 0xb18:
2327 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2328 wrd
= (insn
>> 12) & 0xf;
2329 rd0
= (insn
>> 16) & 0xf;
2330 rd1
= (insn
>> 0) & 0xf;
2331 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2332 switch ((insn
>> 20) & 0xf) {
2334 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2337 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2340 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2343 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2346 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2349 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2352 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2355 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2358 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2363 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2367 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2368 case 0x408: case 0x508: case 0x608: case 0x708:
2369 case 0x808: case 0x908: case 0xa08: case 0xb08:
2370 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2371 wrd
= (insn
>> 12) & 0xf;
2372 rd0
= (insn
>> 16) & 0xf;
2373 rd1
= (insn
>> 0) & 0xf;
2374 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2375 if (!(insn
& (1 << 20)))
2377 switch ((insn
>> 22) & 3) {
2381 if (insn
& (1 << 21))
2382 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2384 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2387 if (insn
& (1 << 21))
2388 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2390 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2393 if (insn
& (1 << 21))
2394 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2396 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2399 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2400 gen_op_iwmmxt_set_mup();
2401 gen_op_iwmmxt_set_cup();
2403 case 0x201: case 0x203: case 0x205: case 0x207:
2404 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2405 case 0x211: case 0x213: case 0x215: case 0x217:
2406 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2407 wrd
= (insn
>> 5) & 0xf;
2408 rd0
= (insn
>> 12) & 0xf;
2409 rd1
= (insn
>> 0) & 0xf;
2410 if (rd0
== 0xf || rd1
== 0xf)
2412 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2413 switch ((insn
>> 16) & 0xf) {
2414 case 0x0: /* TMIA */
2415 gen_movl_T0_reg(s
, rd0
);
2416 gen_movl_T1_reg(s
, rd1
);
2417 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2419 case 0x8: /* TMIAPH */
2420 gen_movl_T0_reg(s
, rd0
);
2421 gen_movl_T1_reg(s
, rd1
);
2422 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2424 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2425 gen_movl_T1_reg(s
, rd0
);
2426 if (insn
& (1 << 16))
2427 gen_op_shrl_T1_im(16);
2428 gen_op_movl_T0_T1();
2429 gen_movl_T1_reg(s
, rd1
);
2430 if (insn
& (1 << 17))
2431 gen_op_shrl_T1_im(16);
2432 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2437 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2438 gen_op_iwmmxt_set_mup();
2447 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2448 (ie. an undefined instruction). */
2449 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2451 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2453 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2454 /* Multiply with Internal Accumulate Format */
2455 rd0
= (insn
>> 12) & 0xf;
2457 acc
= (insn
>> 5) & 7;
2462 switch ((insn
>> 16) & 0xf) {
2464 gen_movl_T0_reg(s
, rd0
);
2465 gen_movl_T1_reg(s
, rd1
);
2466 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2468 case 0x8: /* MIAPH */
2469 gen_movl_T0_reg(s
, rd0
);
2470 gen_movl_T1_reg(s
, rd1
);
2471 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2473 case 0xc: /* MIABB */
2474 case 0xd: /* MIABT */
2475 case 0xe: /* MIATB */
2476 case 0xf: /* MIATT */
2477 gen_movl_T1_reg(s
, rd0
);
2478 if (insn
& (1 << 16))
2479 gen_op_shrl_T1_im(16);
2480 gen_op_movl_T0_T1();
2481 gen_movl_T1_reg(s
, rd1
);
2482 if (insn
& (1 << 17))
2483 gen_op_shrl_T1_im(16);
2484 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2490 gen_op_iwmmxt_movq_wRn_M0(acc
);
2494 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2495 /* Internal Accumulator Access Format */
2496 rdhi
= (insn
>> 16) & 0xf;
2497 rdlo
= (insn
>> 12) & 0xf;
2503 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2504 gen_iwmmxt_movl_T0_T1_wRn(acc
);
2505 gen_movl_reg_T0(s
, rdlo
);
2506 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2507 gen_op_andl_T0_T1();
2508 gen_movl_reg_T0(s
, rdhi
);
2510 gen_movl_T0_reg(s
, rdlo
);
2511 gen_movl_T1_reg(s
, rdhi
);
2512 gen_iwmmxt_movl_wRn_T0_T1(acc
);
2520 /* Disassemble system coprocessor instruction. Return nonzero if
2521 instruction is not defined. */
2522 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2525 uint32_t rd
= (insn
>> 12) & 0xf;
2526 uint32_t cp
= (insn
>> 8) & 0xf;
2531 if (insn
& ARM_CP_RW_BIT
) {
2532 if (!env
->cp
[cp
].cp_read
)
2534 gen_set_pc_im(s
->pc
);
2536 gen_helper_get_cp(tmp
, cpu_env
, tcg_const_i32(insn
));
2537 store_reg(s
, rd
, tmp
);
2539 if (!env
->cp
[cp
].cp_write
)
2541 gen_set_pc_im(s
->pc
);
2542 tmp
= load_reg(s
, rd
);
2543 gen_helper_set_cp(cpu_env
, tcg_const_i32(insn
), tmp
);
2549 static int cp15_user_ok(uint32_t insn
)
2551 int cpn
= (insn
>> 16) & 0xf;
2552 int cpm
= insn
& 0xf;
2553 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2555 if (cpn
== 13 && cpm
== 0) {
2557 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2561 /* ISB, DSB, DMB. */
2562 if ((cpm
== 5 && op
== 4)
2563 || (cpm
== 10 && (op
== 4 || op
== 5)))
2569 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2570 instruction is not defined. */
2571 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2576 /* M profile cores use memory mapped registers instead of cp15. */
2577 if (arm_feature(env
, ARM_FEATURE_M
))
2580 if ((insn
& (1 << 25)) == 0) {
2581 if (insn
& (1 << 20)) {
2585 /* mcrr. Used for block cache operations, so implement as no-op. */
2588 if ((insn
& (1 << 4)) == 0) {
2592 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
2595 if ((insn
& 0x0fff0fff) == 0x0e070f90
2596 || (insn
& 0x0fff0fff) == 0x0e070f58) {
2597 /* Wait for interrupt. */
2598 gen_set_pc_im(s
->pc
);
2599 s
->is_jmp
= DISAS_WFI
;
2602 rd
= (insn
>> 12) & 0xf;
2603 if (insn
& ARM_CP_RW_BIT
) {
2605 gen_helper_get_cp15(tmp
, cpu_env
, tcg_const_i32(insn
));
2606 /* If the destination register is r15 then sets condition codes. */
2608 store_reg(s
, rd
, tmp
);
2612 tmp
= load_reg(s
, rd
);
2613 gen_helper_set_cp15(cpu_env
, tcg_const_i32(insn
), tmp
);
2615 /* Normally we would always end the TB here, but Linux
2616 * arch/arm/mach-pxa/sleep.S expects two instructions following
2617 * an MMU enable to execute from cache. Imitate this behaviour. */
2618 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2619 (insn
& 0x0fff0fff) != 0x0e010f10)
2625 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2626 #define VFP_SREG(insn, bigbit, smallbit) \
2627 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2628 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2629 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2630 reg = (((insn) >> (bigbit)) & 0x0f) \
2631 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2633 if (insn & (1 << (smallbit))) \
2635 reg = ((insn) >> (bigbit)) & 0x0f; \
2638 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2639 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2640 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2641 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2642 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2643 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2645 /* Move between integer and VFP cores. */
2646 static TCGv
gen_vfp_mrs(void)
2648 TCGv tmp
= new_tmp();
2649 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2653 static void gen_vfp_msr(TCGv tmp
)
2655 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2660 vfp_enabled(CPUState
* env
)
2662 return ((env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) != 0);
2665 static void gen_neon_dup_u8(TCGv var
, int shift
)
2667 TCGv tmp
= new_tmp();
2669 tcg_gen_shri_i32(var
, var
, shift
);
2670 tcg_gen_ext8u_i32(var
, var
);
2671 tcg_gen_shli_i32(tmp
, var
, 8);
2672 tcg_gen_or_i32(var
, var
, tmp
);
2673 tcg_gen_shli_i32(tmp
, var
, 16);
2674 tcg_gen_or_i32(var
, var
, tmp
);
2678 static void gen_neon_dup_low16(TCGv var
)
2680 TCGv tmp
= new_tmp();
2681 tcg_gen_ext16u_i32(var
, var
);
2682 tcg_gen_shli_i32(tmp
, var
, 16);
2683 tcg_gen_or_i32(var
, var
, tmp
);
2687 static void gen_neon_dup_high16(TCGv var
)
2689 TCGv tmp
= new_tmp();
2690 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2691 tcg_gen_shri_i32(tmp
, var
, 16);
2692 tcg_gen_or_i32(var
, var
, tmp
);
2696 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2697 (ie. an undefined instruction). */
2698 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2700 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2705 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2708 if (!vfp_enabled(env
)) {
2709 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2710 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2712 rn
= (insn
>> 16) & 0xf;
2713 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2714 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2717 dp
= ((insn
& 0xf00) == 0xb00);
2718 switch ((insn
>> 24) & 0xf) {
2720 if (insn
& (1 << 4)) {
2721 /* single register transfer */
2722 rd
= (insn
>> 12) & 0xf;
2727 VFP_DREG_N(rn
, insn
);
2730 if (insn
& 0x00c00060
2731 && !arm_feature(env
, ARM_FEATURE_NEON
))
2734 pass
= (insn
>> 21) & 1;
2735 if (insn
& (1 << 22)) {
2737 offset
= ((insn
>> 5) & 3) * 8;
2738 } else if (insn
& (1 << 5)) {
2740 offset
= (insn
& (1 << 6)) ? 16 : 0;
2745 if (insn
& ARM_CP_RW_BIT
) {
2747 tmp
= neon_load_reg(rn
, pass
);
2751 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2752 if (insn
& (1 << 23))
2758 if (insn
& (1 << 23)) {
2760 tcg_gen_shri_i32(tmp
, tmp
, 16);
2766 tcg_gen_sari_i32(tmp
, tmp
, 16);
2775 store_reg(s
, rd
, tmp
);
2778 tmp
= load_reg(s
, rd
);
2779 if (insn
& (1 << 23)) {
2782 gen_neon_dup_u8(tmp
, 0);
2783 } else if (size
== 1) {
2784 gen_neon_dup_low16(tmp
);
2787 tcg_gen_mov_i32(tmp2
, tmp
);
2788 neon_store_reg(rn
, 0, tmp2
);
2789 neon_store_reg(rn
, 1, tmp
);
2794 tmp2
= neon_load_reg(rn
, pass
);
2795 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2799 tmp2
= neon_load_reg(rn
, pass
);
2800 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2806 neon_store_reg(rn
, pass
, tmp
);
2810 if ((insn
& 0x6f) != 0x00)
2812 rn
= VFP_SREG_N(insn
);
2813 if (insn
& ARM_CP_RW_BIT
) {
2815 if (insn
& (1 << 21)) {
2816 /* system register */
2821 /* VFP2 allows access to FSID from userspace.
2822 VFP3 restricts all id registers to privileged
2825 && arm_feature(env
, ARM_FEATURE_VFP3
))
2827 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2832 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2834 case ARM_VFP_FPINST
:
2835 case ARM_VFP_FPINST2
:
2836 /* Not present in VFP3. */
2838 || arm_feature(env
, ARM_FEATURE_VFP3
))
2840 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2844 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2845 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2848 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2854 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2856 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2862 gen_mov_F0_vreg(0, rn
);
2863 tmp
= gen_vfp_mrs();
2866 /* Set the 4 flag bits in the CPSR. */
2870 store_reg(s
, rd
, tmp
);
2874 tmp
= load_reg(s
, rd
);
2875 if (insn
& (1 << 21)) {
2877 /* system register */
2882 /* Writes are ignored. */
2885 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2892 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2895 case ARM_VFP_FPINST
:
2896 case ARM_VFP_FPINST2
:
2897 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2904 gen_mov_vreg_F0(0, rn
);
2909 /* data processing */
2910 /* The opcode is in bits 23, 21, 20 and 6. */
2911 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2915 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2917 /* rn is register number */
2918 VFP_DREG_N(rn
, insn
);
2921 if (op
== 15 && (rn
== 15 || rn
> 17)) {
2922 /* Integer or single precision destination. */
2923 rd
= VFP_SREG_D(insn
);
2925 VFP_DREG_D(rd
, insn
);
2928 if (op
== 15 && (rn
== 16 || rn
== 17)) {
2929 /* Integer source. */
2930 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
2932 VFP_DREG_M(rm
, insn
);
2935 rn
= VFP_SREG_N(insn
);
2936 if (op
== 15 && rn
== 15) {
2937 /* Double precision destination. */
2938 VFP_DREG_D(rd
, insn
);
2940 rd
= VFP_SREG_D(insn
);
2942 rm
= VFP_SREG_M(insn
);
2945 veclen
= env
->vfp
.vec_len
;
2946 if (op
== 15 && rn
> 3)
2949 /* Shut up compiler warnings. */
2960 /* Figure out what type of vector operation this is. */
2961 if ((rd
& bank_mask
) == 0) {
2966 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
2968 delta_d
= env
->vfp
.vec_stride
+ 1;
2970 if ((rm
& bank_mask
) == 0) {
2971 /* mixed scalar/vector */
2980 /* Load the initial operands. */
2985 /* Integer source */
2986 gen_mov_F0_vreg(0, rm
);
2991 gen_mov_F0_vreg(dp
, rd
);
2992 gen_mov_F1_vreg(dp
, rm
);
2996 /* Compare with zero */
2997 gen_mov_F0_vreg(dp
, rd
);
3004 /* Source and destination the same. */
3005 gen_mov_F0_vreg(dp
, rd
);
3008 /* One source operand. */
3009 gen_mov_F0_vreg(dp
, rm
);
3013 /* Two source operands. */
3014 gen_mov_F0_vreg(dp
, rn
);
3015 gen_mov_F1_vreg(dp
, rm
);
3019 /* Perform the calculation. */
3021 case 0: /* mac: fd + (fn * fm) */
3023 gen_mov_F1_vreg(dp
, rd
);
3026 case 1: /* nmac: fd - (fn * fm) */
3029 gen_mov_F1_vreg(dp
, rd
);
3032 case 2: /* msc: -fd + (fn * fm) */
3034 gen_mov_F1_vreg(dp
, rd
);
3037 case 3: /* nmsc: -fd - (fn * fm) */
3040 gen_mov_F1_vreg(dp
, rd
);
3043 case 4: /* mul: fn * fm */
3046 case 5: /* nmul: -(fn * fm) */
3050 case 6: /* add: fn + fm */
3053 case 7: /* sub: fn - fm */
3056 case 8: /* div: fn / fm */
3059 case 14: /* fconst */
3060 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3063 n
= (insn
<< 12) & 0x80000000;
3064 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3071 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3078 tcg_gen_movi_i32(cpu_F0s
, n
);
3081 case 15: /* extension space */
3104 case 11: /* cmpez */
3108 case 15: /* single<->double conversion */
3110 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3112 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3114 case 16: /* fuito */
3117 case 17: /* fsito */
3120 case 20: /* fshto */
3121 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3123 gen_vfp_shto(dp
, rm
);
3125 case 21: /* fslto */
3126 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3128 gen_vfp_slto(dp
, rm
);
3130 case 22: /* fuhto */
3131 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3133 gen_vfp_uhto(dp
, rm
);
3135 case 23: /* fulto */
3136 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3138 gen_vfp_ulto(dp
, rm
);
3140 case 24: /* ftoui */
3143 case 25: /* ftouiz */
3146 case 26: /* ftosi */
3149 case 27: /* ftosiz */
3152 case 28: /* ftosh */
3153 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3155 gen_vfp_tosh(dp
, rm
);
3157 case 29: /* ftosl */
3158 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3160 gen_vfp_tosl(dp
, rm
);
3162 case 30: /* ftouh */
3163 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3165 gen_vfp_touh(dp
, rm
);
3167 case 31: /* ftoul */
3168 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3170 gen_vfp_toul(dp
, rm
);
3172 default: /* undefined */
3173 printf ("rn:%d\n", rn
);
3177 default: /* undefined */
3178 printf ("op:%d\n", op
);
3182 /* Write back the result. */
3183 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3184 ; /* Comparison, do nothing. */
3185 else if (op
== 15 && rn
> 17)
3186 /* Integer result. */
3187 gen_mov_vreg_F0(0, rd
);
3188 else if (op
== 15 && rn
== 15)
3190 gen_mov_vreg_F0(!dp
, rd
);
3192 gen_mov_vreg_F0(dp
, rd
);
3194 /* break out of the loop if we have finished */
3198 if (op
== 15 && delta_m
== 0) {
3199 /* single source one-many */
3201 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3203 gen_mov_vreg_F0(dp
, rd
);
3207 /* Setup the next operands. */
3209 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3213 /* One source operand. */
3214 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3216 gen_mov_F0_vreg(dp
, rm
);
3218 /* Two source operands. */
3219 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3221 gen_mov_F0_vreg(dp
, rn
);
3223 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3225 gen_mov_F1_vreg(dp
, rm
);
3233 if (dp
&& (insn
& 0x03e00000) == 0x00400000) {
3234 /* two-register transfer */
3235 rn
= (insn
>> 16) & 0xf;
3236 rd
= (insn
>> 12) & 0xf;
3238 VFP_DREG_M(rm
, insn
);
3240 rm
= VFP_SREG_M(insn
);
3243 if (insn
& ARM_CP_RW_BIT
) {
3246 gen_mov_F0_vreg(0, rm
* 2);
3247 tmp
= gen_vfp_mrs();
3248 store_reg(s
, rd
, tmp
);
3249 gen_mov_F0_vreg(0, rm
* 2 + 1);
3250 tmp
= gen_vfp_mrs();
3251 store_reg(s
, rn
, tmp
);
3253 gen_mov_F0_vreg(0, rm
);
3254 tmp
= gen_vfp_mrs();
3255 store_reg(s
, rn
, tmp
);
3256 gen_mov_F0_vreg(0, rm
+ 1);
3257 tmp
= gen_vfp_mrs();
3258 store_reg(s
, rd
, tmp
);
3263 tmp
= load_reg(s
, rd
);
3265 gen_mov_vreg_F0(0, rm
* 2);
3266 tmp
= load_reg(s
, rn
);
3268 gen_mov_vreg_F0(0, rm
* 2 + 1);
3270 tmp
= load_reg(s
, rn
);
3272 gen_mov_vreg_F0(0, rm
);
3273 tmp
= load_reg(s
, rd
);
3275 gen_mov_vreg_F0(0, rm
+ 1);
3280 rn
= (insn
>> 16) & 0xf;
3282 VFP_DREG_D(rd
, insn
);
3284 rd
= VFP_SREG_D(insn
);
3285 if (s
->thumb
&& rn
== 15) {
3286 gen_op_movl_T1_im(s
->pc
& ~2);
3288 gen_movl_T1_reg(s
, rn
);
3290 if ((insn
& 0x01200000) == 0x01000000) {
3291 /* Single load/store */
3292 offset
= (insn
& 0xff) << 2;
3293 if ((insn
& (1 << 23)) == 0)
3295 gen_op_addl_T1_im(offset
);
3296 if (insn
& (1 << 20)) {
3298 gen_mov_vreg_F0(dp
, rd
);
3300 gen_mov_F0_vreg(dp
, rd
);
3304 /* load/store multiple */
3306 n
= (insn
>> 1) & 0x7f;
3310 if (insn
& (1 << 24)) /* pre-decrement */
3311 gen_op_addl_T1_im(-((insn
& 0xff) << 2));
3317 for (i
= 0; i
< n
; i
++) {
3318 if (insn
& ARM_CP_RW_BIT
) {
3321 gen_mov_vreg_F0(dp
, rd
+ i
);
3324 gen_mov_F0_vreg(dp
, rd
+ i
);
3327 gen_op_addl_T1_im(offset
);
3329 if (insn
& (1 << 21)) {
3331 if (insn
& (1 << 24))
3332 offset
= -offset
* n
;
3333 else if (dp
&& (insn
& 1))
3339 gen_op_addl_T1_im(offset
);
3340 gen_movl_reg_T1(s
, rn
);
3346 /* Should never happen. */
3352 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3354 TranslationBlock
*tb
;
3357 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3359 gen_set_pc_im(dest
);
3360 tcg_gen_exit_tb((long)tb
+ n
);
3362 gen_set_pc_im(dest
);
3367 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3369 if (unlikely(s
->singlestep_enabled
)) {
3370 /* An indirect jump so that we still trigger the debug exception. */
3375 gen_goto_tb(s
, 0, dest
);
3376 s
->is_jmp
= DISAS_TB_JUMP
;
3380 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3383 tcg_gen_sari_i32(t0
, t0
, 16);
3387 tcg_gen_sari_i32(t1
, t1
, 16);
3390 tcg_gen_mul_i32(t0
, t0
, t1
);
3393 /* Return the mask of PSR bits set by a MSR instruction. */
3394 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3398 if (flags
& (1 << 0))
3400 if (flags
& (1 << 1))
3402 if (flags
& (1 << 2))
3404 if (flags
& (1 << 3))
3407 /* Mask out undefined bits. */
3408 mask
&= ~CPSR_RESERVED
;
3409 if (!arm_feature(env
, ARM_FEATURE_V6
))
3410 mask
&= ~(CPSR_E
| CPSR_GE
);
3411 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3413 /* Mask out execution state bits. */
3416 /* Mask out privileged bits. */
3422 /* Returns nonzero if access to the PSR is not permitted. */
3423 static int gen_set_psr_T0(DisasContext
*s
, uint32_t mask
, int spsr
)
3427 /* ??? This is also undefined in system mode. */
3431 tmp
= load_cpu_field(spsr
);
3432 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3433 tcg_gen_andi_i32(cpu_T
[0], cpu_T
[0], mask
);
3434 tcg_gen_or_i32(tmp
, tmp
, cpu_T
[0]);
3435 store_cpu_field(tmp
, spsr
);
3437 gen_set_cpsr(cpu_T
[0], mask
);
3443 /* Generate an old-style exception return. */
3444 static void gen_exception_return(DisasContext
*s
)
3447 gen_movl_reg_T0(s
, 15);
3448 tmp
= load_cpu_field(spsr
);
3449 gen_set_cpsr(tmp
, 0xffffffff);
3451 s
->is_jmp
= DISAS_UPDATE
;
3454 /* Generate a v6 exception return. Marks both values as dead. */
3455 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3457 gen_set_cpsr(cpsr
, 0xffffffff);
3459 store_reg(s
, 15, pc
);
3460 s
->is_jmp
= DISAS_UPDATE
;
3464 gen_set_condexec (DisasContext
*s
)
3466 if (s
->condexec_mask
) {
3467 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3468 TCGv tmp
= new_tmp();
3469 tcg_gen_movi_i32(tmp
, val
);
3470 store_cpu_field(tmp
, condexec_bits
);
3474 static void gen_nop_hint(DisasContext
*s
, int val
)
3478 gen_set_pc_im(s
->pc
);
3479 s
->is_jmp
= DISAS_WFI
;
3483 /* TODO: Implement SEV and WFE. May help SMP performance. */
3489 /* These macros help make the code more readable when migrating from the
3490 old dyngen helpers. They should probably be removed when
3491 T0/T1 are removed. */
3492 #define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3493 #define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
3495 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3497 static inline int gen_neon_add(int size
)
3500 case 0: gen_helper_neon_add_u8(CPU_T001
); break;
3501 case 1: gen_helper_neon_add_u16(CPU_T001
); break;
3502 case 2: gen_op_addl_T0_T1(); break;
3508 static inline void gen_neon_rsb(int size
)
3511 case 0: gen_helper_neon_sub_u8(cpu_T
[0], cpu_T
[1], cpu_T
[0]); break;
3512 case 1: gen_helper_neon_sub_u16(cpu_T
[0], cpu_T
[1], cpu_T
[0]); break;
3513 case 2: gen_op_rsbl_T0_T1(); break;
3518 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3519 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3520 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3521 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3522 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3524 /* FIXME: This is wrong. They set the wrong overflow bit. */
3525 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3526 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3527 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3528 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3530 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3531 switch ((size << 1) | u) { \
3533 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3536 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3539 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3542 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3545 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3548 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3550 default: return 1; \
3553 #define GEN_NEON_INTEGER_OP(name) do { \
3554 switch ((size << 1) | u) { \
3556 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3559 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3562 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3565 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3568 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3571 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3573 default: return 1; \
3577 gen_neon_movl_scratch_T0(int scratch
)
3581 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3582 tcg_gen_st_i32(cpu_T
[0], cpu_env
, offset
);
3586 gen_neon_movl_scratch_T1(int scratch
)
3590 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3591 tcg_gen_st_i32(cpu_T
[1], cpu_env
, offset
);
3595 gen_neon_movl_T0_scratch(int scratch
)
3599 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3600 tcg_gen_ld_i32(cpu_T
[0], cpu_env
, offset
);
3604 gen_neon_movl_T1_scratch(int scratch
)
3608 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3609 tcg_gen_ld_i32(cpu_T
[1], cpu_env
, offset
);
3612 static inline void gen_neon_get_scalar(int size
, int reg
)
3615 NEON_GET_REG(T0
, reg
>> 1, reg
& 1);
3617 NEON_GET_REG(T0
, reg
>> 2, (reg
>> 1) & 1);
3619 gen_neon_dup_low16(cpu_T
[0]);
3621 gen_neon_dup_high16(cpu_T
[0]);
3625 static void gen_neon_unzip(int reg
, int q
, int tmp
, int size
)
3629 for (n
= 0; n
< q
+ 1; n
+= 2) {
3630 NEON_GET_REG(T0
, reg
, n
);
3631 NEON_GET_REG(T0
, reg
, n
+ n
);
3633 case 0: gen_helper_neon_unzip_u8(); break;
3634 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
3635 case 2: /* no-op */; break;
3638 gen_neon_movl_scratch_T0(tmp
+ n
);
3639 gen_neon_movl_scratch_T1(tmp
+ n
+ 1);
3647 } neon_ls_element_type
[11] = {
3661 /* Translate a NEON load/store element instruction. Return nonzero if the
3662 instruction is invalid. */
3663 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3679 if (!vfp_enabled(env
))
3681 VFP_DREG_D(rd
, insn
);
3682 rn
= (insn
>> 16) & 0xf;
3684 load
= (insn
& (1 << 21)) != 0;
3685 if ((insn
& (1 << 23)) == 0) {
3686 /* Load store all elements. */
3687 op
= (insn
>> 8) & 0xf;
3688 size
= (insn
>> 6) & 3;
3689 if (op
> 10 || size
== 3)
3691 nregs
= neon_ls_element_type
[op
].nregs
;
3692 interleave
= neon_ls_element_type
[op
].interleave
;
3693 gen_movl_T1_reg(s
, rn
);
3694 stride
= (1 << size
) * interleave
;
3695 for (reg
= 0; reg
< nregs
; reg
++) {
3696 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3697 gen_movl_T1_reg(s
, rn
);
3698 gen_op_addl_T1_im((1 << size
) * reg
);
3699 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3700 gen_movl_T1_reg(s
, rn
);
3701 gen_op_addl_T1_im(1 << size
);
3703 for (pass
= 0; pass
< 2; pass
++) {
3706 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
3707 neon_store_reg(rd
, pass
, tmp
);
3709 tmp
= neon_load_reg(rd
, pass
);
3710 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
3712 gen_op_addl_T1_im(stride
);
3713 } else if (size
== 1) {
3715 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3716 gen_op_addl_T1_im(stride
);
3717 tmp2
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3718 gen_op_addl_T1_im(stride
);
3719 gen_bfi(tmp
, tmp
, tmp2
, 16, 0xffff);
3721 neon_store_reg(rd
, pass
, tmp
);
3723 tmp
= neon_load_reg(rd
, pass
);
3725 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3726 gen_st16(tmp
, cpu_T
[1], IS_USER(s
));
3727 gen_op_addl_T1_im(stride
);
3728 gen_st16(tmp2
, cpu_T
[1], IS_USER(s
));
3729 gen_op_addl_T1_im(stride
);
3731 } else /* size == 0 */ {
3734 for (n
= 0; n
< 4; n
++) {
3735 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
3736 gen_op_addl_T1_im(stride
);
3740 gen_bfi(tmp2
, tmp2
, tmp
, n
* 8, 0xff);
3744 neon_store_reg(rd
, pass
, tmp2
);
3746 tmp2
= neon_load_reg(rd
, pass
);
3747 for (n
= 0; n
< 4; n
++) {
3750 tcg_gen_mov_i32(tmp
, tmp2
);
3752 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3754 gen_st8(tmp
, cpu_T
[1], IS_USER(s
));
3755 gen_op_addl_T1_im(stride
);
3761 rd
+= neon_ls_element_type
[op
].spacing
;
3765 size
= (insn
>> 10) & 3;
3767 /* Load single element to all lanes. */
3770 size
= (insn
>> 6) & 3;
3771 nregs
= ((insn
>> 8) & 3) + 1;
3772 stride
= (insn
& (1 << 5)) ? 2 : 1;
3773 gen_movl_T1_reg(s
, rn
);
3774 for (reg
= 0; reg
< nregs
; reg
++) {
3777 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
3778 gen_neon_dup_u8(tmp
, 0);
3781 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3782 gen_neon_dup_low16(tmp
);
3785 tmp
= gen_ld32(cpu_T
[0], IS_USER(s
));
3789 default: /* Avoid compiler warnings. */
3792 gen_op_addl_T1_im(1 << size
);
3794 tcg_gen_mov_i32(tmp2
, tmp
);
3795 neon_store_reg(rd
, 0, tmp2
);
3796 neon_store_reg(rd
, 1, tmp
);
3799 stride
= (1 << size
) * nregs
;
3801 /* Single element. */
3802 pass
= (insn
>> 7) & 1;
3805 shift
= ((insn
>> 5) & 3) * 8;
3809 shift
= ((insn
>> 6) & 1) * 16;
3810 stride
= (insn
& (1 << 5)) ? 2 : 1;
3814 stride
= (insn
& (1 << 6)) ? 2 : 1;
3819 nregs
= ((insn
>> 8) & 3) + 1;
3820 gen_movl_T1_reg(s
, rn
);
3821 for (reg
= 0; reg
< nregs
; reg
++) {
3825 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
3828 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3831 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
3833 default: /* Avoid compiler warnings. */
3837 tmp2
= neon_load_reg(rd
, pass
);
3838 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
3841 neon_store_reg(rd
, pass
, tmp
);
3842 } else { /* Store */
3843 tmp
= neon_load_reg(rd
, pass
);
3845 tcg_gen_shri_i32(tmp
, tmp
, shift
);
3848 gen_st8(tmp
, cpu_T
[1], IS_USER(s
));
3851 gen_st16(tmp
, cpu_T
[1], IS_USER(s
));
3854 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
3859 gen_op_addl_T1_im(1 << size
);
3861 stride
= nregs
* (1 << size
);
3867 base
= load_reg(s
, rn
);
3869 tcg_gen_addi_i32(base
, base
, stride
);
3872 index
= load_reg(s
, rm
);
3873 tcg_gen_add_i32(base
, base
, index
);
3876 store_reg(s
, rn
, base
);
3881 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3882 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
3884 tcg_gen_and_i32(t
, t
, c
);
3885 tcg_gen_bic_i32(f
, f
, c
);
3886 tcg_gen_or_i32(dest
, t
, f
);
3889 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
3892 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
3893 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
3894 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
3899 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
3902 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
3903 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
3904 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
3909 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
3912 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
3913 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
3914 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
3919 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
3925 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
3926 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
3931 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
3932 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
3939 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
3940 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
3945 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
3946 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
3953 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
3957 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
3958 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
3959 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
3964 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
3965 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
3966 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
3973 static inline void gen_neon_addl(int size
)
3976 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
3977 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
3978 case 2: tcg_gen_add_i64(CPU_V001
); break;
3983 static inline void gen_neon_subl(int size
)
3986 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
3987 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
3988 case 2: tcg_gen_sub_i64(CPU_V001
); break;
3993 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
3996 case 0: gen_helper_neon_negl_u16(var
, var
); break;
3997 case 1: gen_helper_neon_negl_u32(var
, var
); break;
3998 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4003 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4006 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4007 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4012 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4016 switch ((size
<< 1) | u
) {
4017 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4018 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4019 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4020 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4022 tmp
= gen_muls_i64_i32(a
, b
);
4023 tcg_gen_mov_i64(dest
, tmp
);
4026 tmp
= gen_mulu_i64_i32(a
, b
);
4027 tcg_gen_mov_i64(dest
, tmp
);
4037 /* Translate a NEON data processing instruction. Return nonzero if the
4038 instruction is invalid.
4039 We process data in a mixture of 32-bit and 64-bit chunks.
4040 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4042 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4060 if (!vfp_enabled(env
))
4062 q
= (insn
& (1 << 6)) != 0;
4063 u
= (insn
>> 24) & 1;
4064 VFP_DREG_D(rd
, insn
);
4065 VFP_DREG_N(rn
, insn
);
4066 VFP_DREG_M(rm
, insn
);
4067 size
= (insn
>> 20) & 3;
4068 if ((insn
& (1 << 23)) == 0) {
4069 /* Three register same length. */
4070 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4071 if (size
== 3 && (op
== 1 || op
== 5 || op
== 8 || op
== 9
4072 || op
== 10 || op
== 11 || op
== 16)) {
4073 /* 64-bit element instructions. */
4074 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4075 neon_load_reg64(cpu_V0
, rn
+ pass
);
4076 neon_load_reg64(cpu_V1
, rm
+ pass
);
4080 gen_helper_neon_add_saturate_u64(CPU_V001
);
4082 gen_helper_neon_add_saturate_s64(CPU_V001
);
4087 gen_helper_neon_sub_saturate_u64(CPU_V001
);
4089 gen_helper_neon_sub_saturate_s64(CPU_V001
);
4094 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4096 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4101 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4104 gen_helper_neon_qshl_s64(cpu_V1
, cpu_env
,
4108 case 10: /* VRSHL */
4110 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4112 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4115 case 11: /* VQRSHL */
4117 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4120 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4126 tcg_gen_sub_i64(CPU_V001
);
4128 tcg_gen_add_i64(CPU_V001
);
4134 neon_store_reg64(cpu_V0
, rd
+ pass
);
4141 case 10: /* VRSHL */
4142 case 11: /* VQRSHL */
4145 /* Shift instruction operands are reversed. */
4152 case 20: /* VPMAX */
4153 case 21: /* VPMIN */
4154 case 23: /* VPADD */
4157 case 26: /* VPADD (float) */
4158 pairwise
= (u
&& size
< 2);
4160 case 30: /* VPMIN/VPMAX (float) */
4167 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4176 NEON_GET_REG(T0
, rn
, n
);
4177 NEON_GET_REG(T1
, rn
, n
+ 1);
4179 NEON_GET_REG(T0
, rm
, n
);
4180 NEON_GET_REG(T1
, rm
, n
+ 1);
4184 NEON_GET_REG(T0
, rn
, pass
);
4185 NEON_GET_REG(T1
, rm
, pass
);
4189 GEN_NEON_INTEGER_OP(hadd
);
4192 GEN_NEON_INTEGER_OP_ENV(qadd
);
4194 case 2: /* VRHADD */
4195 GEN_NEON_INTEGER_OP(rhadd
);
4197 case 3: /* Logic ops. */
4198 switch ((u
<< 2) | size
) {
4200 gen_op_andl_T0_T1();
4203 gen_op_bicl_T0_T1();
4213 gen_op_xorl_T0_T1();
4216 tmp
= neon_load_reg(rd
, pass
);
4217 gen_neon_bsl(cpu_T
[0], cpu_T
[0], cpu_T
[1], tmp
);
4221 tmp
= neon_load_reg(rd
, pass
);
4222 gen_neon_bsl(cpu_T
[0], cpu_T
[0], tmp
, cpu_T
[1]);
4226 tmp
= neon_load_reg(rd
, pass
);
4227 gen_neon_bsl(cpu_T
[0], tmp
, cpu_T
[0], cpu_T
[1]);
4233 GEN_NEON_INTEGER_OP(hsub
);
4236 GEN_NEON_INTEGER_OP_ENV(qsub
);
4239 GEN_NEON_INTEGER_OP(cgt
);
4242 GEN_NEON_INTEGER_OP(cge
);
4245 GEN_NEON_INTEGER_OP(shl
);
4248 GEN_NEON_INTEGER_OP_ENV(qshl
);
4250 case 10: /* VRSHL */
4251 GEN_NEON_INTEGER_OP(rshl
);
4253 case 11: /* VQRSHL */
4254 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4257 GEN_NEON_INTEGER_OP(max
);
4260 GEN_NEON_INTEGER_OP(min
);
4263 GEN_NEON_INTEGER_OP(abd
);
4266 GEN_NEON_INTEGER_OP(abd
);
4267 NEON_GET_REG(T1
, rd
, pass
);
4271 if (!u
) { /* VADD */
4272 if (gen_neon_add(size
))
4276 case 0: gen_helper_neon_sub_u8(CPU_T001
); break;
4277 case 1: gen_helper_neon_sub_u16(CPU_T001
); break;
4278 case 2: gen_op_subl_T0_T1(); break;
4284 if (!u
) { /* VTST */
4286 case 0: gen_helper_neon_tst_u8(CPU_T001
); break;
4287 case 1: gen_helper_neon_tst_u16(CPU_T001
); break;
4288 case 2: gen_helper_neon_tst_u32(CPU_T001
); break;
4293 case 0: gen_helper_neon_ceq_u8(CPU_T001
); break;
4294 case 1: gen_helper_neon_ceq_u16(CPU_T001
); break;
4295 case 2: gen_helper_neon_ceq_u32(CPU_T001
); break;
4300 case 18: /* Multiply. */
4302 case 0: gen_helper_neon_mul_u8(CPU_T001
); break;
4303 case 1: gen_helper_neon_mul_u16(CPU_T001
); break;
4304 case 2: gen_op_mul_T0_T1(); break;
4307 NEON_GET_REG(T1
, rd
, pass
);
4315 if (u
) { /* polynomial */
4316 gen_helper_neon_mul_p8(CPU_T001
);
4317 } else { /* Integer */
4319 case 0: gen_helper_neon_mul_u8(CPU_T001
); break;
4320 case 1: gen_helper_neon_mul_u16(CPU_T001
); break;
4321 case 2: gen_op_mul_T0_T1(); break;
4326 case 20: /* VPMAX */
4327 GEN_NEON_INTEGER_OP(pmax
);
4329 case 21: /* VPMIN */
4330 GEN_NEON_INTEGER_OP(pmin
);
4332 case 22: /* Hultiply high. */
4333 if (!u
) { /* VQDMULH */
4335 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01
); break;
4336 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01
); break;
4339 } else { /* VQRDHMUL */
4341 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01
); break;
4342 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01
); break;
4347 case 23: /* VPADD */
4351 case 0: gen_helper_neon_padd_u8(CPU_T001
); break;
4352 case 1: gen_helper_neon_padd_u16(CPU_T001
); break;
4353 case 2: gen_op_addl_T0_T1(); break;
4357 case 26: /* Floating point arithnetic. */
4358 switch ((u
<< 2) | size
) {
4360 gen_helper_neon_add_f32(CPU_T001
);
4363 gen_helper_neon_sub_f32(CPU_T001
);
4366 gen_helper_neon_add_f32(CPU_T001
);
4369 gen_helper_neon_abd_f32(CPU_T001
);
4375 case 27: /* Float multiply. */
4376 gen_helper_neon_mul_f32(CPU_T001
);
4378 NEON_GET_REG(T1
, rd
, pass
);
4380 gen_helper_neon_add_f32(CPU_T001
);
4382 gen_helper_neon_sub_f32(cpu_T
[0], cpu_T
[1], cpu_T
[0]);
4386 case 28: /* Float compare. */
4388 gen_helper_neon_ceq_f32(CPU_T001
);
4391 gen_helper_neon_cge_f32(CPU_T001
);
4393 gen_helper_neon_cgt_f32(CPU_T001
);
4396 case 29: /* Float compare absolute. */
4400 gen_helper_neon_acge_f32(CPU_T001
);
4402 gen_helper_neon_acgt_f32(CPU_T001
);
4404 case 30: /* Float min/max. */
4406 gen_helper_neon_max_f32(CPU_T001
);
4408 gen_helper_neon_min_f32(CPU_T001
);
4412 gen_helper_recps_f32(cpu_T
[0], cpu_T
[0], cpu_T
[1], cpu_env
);
4414 gen_helper_rsqrts_f32(cpu_T
[0], cpu_T
[0], cpu_T
[1], cpu_env
);
4419 /* Save the result. For elementwise operations we can put it
4420 straight into the destination register. For pairwise operations
4421 we have to be careful to avoid clobbering the source operands. */
4422 if (pairwise
&& rd
== rm
) {
4423 gen_neon_movl_scratch_T0(pass
);
4425 NEON_SET_REG(T0
, rd
, pass
);
4429 if (pairwise
&& rd
== rm
) {
4430 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4431 gen_neon_movl_T0_scratch(pass
);
4432 NEON_SET_REG(T0
, rd
, pass
);
4435 /* End of 3 register same size operations. */
4436 } else if (insn
& (1 << 4)) {
4437 if ((insn
& 0x00380080) != 0) {
4438 /* Two registers and shift. */
4439 op
= (insn
>> 8) & 0xf;
4440 if (insn
& (1 << 7)) {
4445 while ((insn
& (1 << (size
+ 19))) == 0)
4448 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4449 /* To avoid excessive dumplication of ops we implement shift
4450 by immediate using the variable shift operations. */
4452 /* Shift by immediate:
4453 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4454 /* Right shifts are encoded as N - shift, where N is the
4455 element size in bits. */
4457 shift
= shift
- (1 << (size
+ 3));
4465 imm
= (uint8_t) shift
;
4470 imm
= (uint16_t) shift
;
4481 for (pass
= 0; pass
< count
; pass
++) {
4483 neon_load_reg64(cpu_V0
, rm
+ pass
);
4484 tcg_gen_movi_i64(cpu_V1
, imm
);
4489 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4491 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4496 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4498 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4503 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4505 case 5: /* VSHL, VSLI */
4506 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4510 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4512 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4514 case 7: /* VQSHLU */
4515 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4518 if (op
== 1 || op
== 3) {
4520 neon_load_reg64(cpu_V0
, rd
+ pass
);
4521 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4522 } else if (op
== 4 || (op
== 5 && u
)) {
4524 cpu_abort(env
, "VS[LR]I.64 not implemented");
4526 neon_store_reg64(cpu_V0
, rd
+ pass
);
4527 } else { /* size < 3 */
4528 /* Operands in T0 and T1. */
4529 gen_op_movl_T1_im(imm
);
4530 NEON_GET_REG(T0
, rm
, pass
);
4534 GEN_NEON_INTEGER_OP(shl
);
4538 GEN_NEON_INTEGER_OP(rshl
);
4543 GEN_NEON_INTEGER_OP(shl
);
4545 case 5: /* VSHL, VSLI */
4547 case 0: gen_helper_neon_shl_u8(CPU_T001
); break;
4548 case 1: gen_helper_neon_shl_u16(CPU_T001
); break;
4549 case 2: gen_helper_neon_shl_u32(CPU_T001
); break;
4554 GEN_NEON_INTEGER_OP_ENV(qshl
);
4556 case 7: /* VQSHLU */
4558 case 0: gen_helper_neon_qshl_u8(CPU_T0E01
); break;
4559 case 1: gen_helper_neon_qshl_u16(CPU_T0E01
); break;
4560 case 2: gen_helper_neon_qshl_u32(CPU_T0E01
); break;
4566 if (op
== 1 || op
== 3) {
4568 NEON_GET_REG(T1
, rd
, pass
);
4570 } else if (op
== 4 || (op
== 5 && u
)) {
4575 imm
= 0xff >> -shift
;
4577 imm
= (uint8_t)(0xff << shift
);
4583 imm
= 0xffff >> -shift
;
4585 imm
= (uint16_t)(0xffff << shift
);
4590 imm
= 0xffffffffu
>> -shift
;
4592 imm
= 0xffffffffu
<< shift
;
4597 tmp
= neon_load_reg(rd
, pass
);
4598 tcg_gen_andi_i32(cpu_T
[0], cpu_T
[0], imm
);
4599 tcg_gen_andi_i32(tmp
, tmp
, ~imm
);
4600 tcg_gen_or_i32(cpu_T
[0], cpu_T
[0], tmp
);
4602 NEON_SET_REG(T0
, rd
, pass
);
4605 } else if (op
< 10) {
4606 /* Shift by immediate and narrow:
4607 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4608 shift
= shift
- (1 << (size
+ 3));
4612 imm
= (uint16_t)shift
;
4614 tmp2
= tcg_const_i32(imm
);
4615 TCGV_UNUSED_I64(tmp64
);
4618 imm
= (uint32_t)shift
;
4619 tmp2
= tcg_const_i32(imm
);
4620 TCGV_UNUSED_I64(tmp64
);
4623 tmp64
= tcg_const_i64(shift
);
4630 for (pass
= 0; pass
< 2; pass
++) {
4632 neon_load_reg64(cpu_V0
, rm
+ pass
);
4635 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, tmp64
);
4637 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, tmp64
);
4640 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, tmp64
);
4642 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, tmp64
);
4645 tmp
= neon_load_reg(rm
+ pass
, 0);
4646 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
, u
);
4647 tmp3
= neon_load_reg(rm
+ pass
, 1);
4648 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
, u
);
4649 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
4654 if (op
== 8 && !u
) {
4655 gen_neon_narrow(size
- 1, tmp
, cpu_V0
);
4658 gen_neon_narrow_sats(size
- 1, tmp
, cpu_V0
);
4660 gen_neon_narrow_satu(size
- 1, tmp
, cpu_V0
);
4665 neon_store_reg(rd
, 0, tmp2
);
4666 neon_store_reg(rd
, 1, tmp
);
4669 } else if (op
== 10) {
4673 tmp
= neon_load_reg(rm
, 0);
4674 tmp2
= neon_load_reg(rm
, 1);
4675 for (pass
= 0; pass
< 2; pass
++) {
4679 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4682 /* The shift is less than the width of the source
4683 type, so we can just shift the whole register. */
4684 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
4685 if (size
< 2 || !u
) {
4688 imm
= (0xffu
>> (8 - shift
));
4691 imm
= 0xffff >> (16 - shift
);
4693 imm64
= imm
| (((uint64_t)imm
) << 32);
4694 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, imm64
);
4697 neon_store_reg64(cpu_V0
, rd
+ pass
);
4699 } else if (op
== 15 || op
== 16) {
4700 /* VCVT fixed-point. */
4701 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4702 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
4705 gen_vfp_ulto(0, shift
);
4707 gen_vfp_slto(0, shift
);
4710 gen_vfp_toul(0, shift
);
4712 gen_vfp_tosl(0, shift
);
4714 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
4719 } else { /* (insn & 0x00380080) == 0 */
4722 op
= (insn
>> 8) & 0xf;
4723 /* One register and immediate. */
4724 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
4725 invert
= (insn
& (1 << 5)) != 0;
4743 imm
= (imm
<< 8) | (imm
<< 24);
4746 imm
= (imm
< 8) | 0xff;
4749 imm
= (imm
<< 16) | 0xffff;
4752 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
4757 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
4758 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
4764 if (op
!= 14 || !invert
)
4765 gen_op_movl_T1_im(imm
);
4767 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4768 if (op
& 1 && op
< 12) {
4769 tmp
= neon_load_reg(rd
, pass
);
4771 /* The immediate value has already been inverted, so
4773 tcg_gen_andi_i32(tmp
, tmp
, imm
);
4775 tcg_gen_ori_i32(tmp
, tmp
, imm
);
4780 if (op
== 14 && invert
) {
4783 for (n
= 0; n
< 4; n
++) {
4784 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
4785 val
|= 0xff << (n
* 8);
4787 tcg_gen_movi_i32(tmp
, val
);
4789 tcg_gen_movi_i32(tmp
, imm
);
4792 neon_store_reg(rd
, pass
, tmp
);
4795 } else { /* (insn & 0x00800010 == 0x00800000) */
4797 op
= (insn
>> 8) & 0xf;
4798 if ((insn
& (1 << 6)) == 0) {
4799 /* Three registers of different lengths. */
4803 /* prewiden, src1_wide, src2_wide */
4804 static const int neon_3reg_wide
[16][3] = {
4805 {1, 0, 0}, /* VADDL */
4806 {1, 1, 0}, /* VADDW */
4807 {1, 0, 0}, /* VSUBL */
4808 {1, 1, 0}, /* VSUBW */
4809 {0, 1, 1}, /* VADDHN */
4810 {0, 0, 0}, /* VABAL */
4811 {0, 1, 1}, /* VSUBHN */
4812 {0, 0, 0}, /* VABDL */
4813 {0, 0, 0}, /* VMLAL */
4814 {0, 0, 0}, /* VQDMLAL */
4815 {0, 0, 0}, /* VMLSL */
4816 {0, 0, 0}, /* VQDMLSL */
4817 {0, 0, 0}, /* Integer VMULL */
4818 {0, 0, 0}, /* VQDMULL */
4819 {0, 0, 0} /* Polynomial VMULL */
4822 prewiden
= neon_3reg_wide
[op
][0];
4823 src1_wide
= neon_3reg_wide
[op
][1];
4824 src2_wide
= neon_3reg_wide
[op
][2];
4826 if (size
== 0 && (op
== 9 || op
== 11 || op
== 13))
4829 /* Avoid overlapping operands. Wide source operands are
4830 always aligned so will never overlap with wide
4831 destinations in problematic ways. */
4832 if (rd
== rm
&& !src2_wide
) {
4833 NEON_GET_REG(T0
, rm
, 1);
4834 gen_neon_movl_scratch_T0(2);
4835 } else if (rd
== rn
&& !src1_wide
) {
4836 NEON_GET_REG(T0
, rn
, 1);
4837 gen_neon_movl_scratch_T0(2);
4840 for (pass
= 0; pass
< 2; pass
++) {
4842 neon_load_reg64(cpu_V0
, rn
+ pass
);
4845 if (pass
== 1 && rd
== rn
) {
4846 gen_neon_movl_T0_scratch(2);
4848 tcg_gen_mov_i32(tmp
, cpu_T
[0]);
4850 tmp
= neon_load_reg(rn
, pass
);
4853 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4857 neon_load_reg64(cpu_V1
, rm
+ pass
);
4860 if (pass
== 1 && rd
== rm
) {
4861 gen_neon_movl_T0_scratch(2);
4863 tcg_gen_mov_i32(tmp2
, cpu_T
[0]);
4865 tmp2
= neon_load_reg(rm
, pass
);
4868 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
4872 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4873 gen_neon_addl(size
);
4875 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4876 gen_neon_subl(size
);
4878 case 5: case 7: /* VABAL, VABDL */
4879 switch ((size
<< 1) | u
) {
4881 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
4884 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
4887 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
4890 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
4893 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
4896 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
4903 case 8: case 9: case 10: case 11: case 12: case 13:
4904 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4905 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
4907 case 14: /* Polynomial VMULL */
4908 cpu_abort(env
, "Polynomial VMULL not implemented");
4910 default: /* 15 is RESERVED. */
4913 if (op
== 5 || op
== 13 || (op
>= 8 && op
<= 11)) {
4915 if (op
== 10 || op
== 11) {
4916 gen_neon_negl(cpu_V0
, size
);
4920 neon_load_reg64(cpu_V1
, rd
+ pass
);
4924 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4925 gen_neon_addl(size
);
4927 case 9: case 11: /* VQDMLAL, VQDMLSL */
4928 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
4929 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
4932 case 13: /* VQDMULL */
4933 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
4938 neon_store_reg64(cpu_V0
, rd
+ pass
);
4939 } else if (op
== 4 || op
== 6) {
4940 /* Narrowing operation. */
4945 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
4948 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
4951 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
4952 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
4959 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
4962 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
4965 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
4966 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
4967 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
4975 neon_store_reg(rd
, 0, tmp3
);
4976 neon_store_reg(rd
, 1, tmp
);
4979 /* Write back the result. */
4980 neon_store_reg64(cpu_V0
, rd
+ pass
);
4984 /* Two registers and a scalar. */
4986 case 0: /* Integer VMLA scalar */
4987 case 1: /* Float VMLA scalar */
4988 case 4: /* Integer VMLS scalar */
4989 case 5: /* Floating point VMLS scalar */
4990 case 8: /* Integer VMUL scalar */
4991 case 9: /* Floating point VMUL scalar */
4992 case 12: /* VQDMULH scalar */
4993 case 13: /* VQRDMULH scalar */
4994 gen_neon_get_scalar(size
, rm
);
4995 gen_neon_movl_scratch_T0(0);
4996 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
4998 gen_neon_movl_T0_scratch(0);
4999 NEON_GET_REG(T1
, rn
, pass
);
5002 gen_helper_neon_qdmulh_s16(CPU_T0E01
);
5004 gen_helper_neon_qdmulh_s32(CPU_T0E01
);
5006 } else if (op
== 13) {
5008 gen_helper_neon_qrdmulh_s16(CPU_T0E01
);
5010 gen_helper_neon_qrdmulh_s32(CPU_T0E01
);
5012 } else if (op
& 1) {
5013 gen_helper_neon_mul_f32(CPU_T001
);
5016 case 0: gen_helper_neon_mul_u8(CPU_T001
); break;
5017 case 1: gen_helper_neon_mul_u16(CPU_T001
); break;
5018 case 2: gen_op_mul_T0_T1(); break;
5024 NEON_GET_REG(T1
, rd
, pass
);
5030 gen_helper_neon_add_f32(CPU_T001
);
5036 gen_helper_neon_sub_f32(cpu_T
[0], cpu_T
[1], cpu_T
[0]);
5042 NEON_SET_REG(T0
, rd
, pass
);
5045 case 2: /* VMLAL sclar */
5046 case 3: /* VQDMLAL scalar */
5047 case 6: /* VMLSL scalar */
5048 case 7: /* VQDMLSL scalar */
5049 case 10: /* VMULL scalar */
5050 case 11: /* VQDMULL scalar */
5051 if (size
== 0 && (op
== 3 || op
== 7 || op
== 11))
5054 gen_neon_get_scalar(size
, rm
);
5055 NEON_GET_REG(T1
, rn
, 1);
5057 for (pass
= 0; pass
< 2; pass
++) {
5059 tmp
= neon_load_reg(rn
, 0);
5062 tcg_gen_mov_i32(tmp
, cpu_T
[1]);
5065 tcg_gen_mov_i32(tmp2
, cpu_T
[0]);
5066 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5067 if (op
== 6 || op
== 7) {
5068 gen_neon_negl(cpu_V0
, size
);
5071 neon_load_reg64(cpu_V1
, rd
+ pass
);
5075 gen_neon_addl(size
);
5078 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5079 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5085 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5090 neon_store_reg64(cpu_V0
, rd
+ pass
);
5093 default: /* 14 and 15 are RESERVED */
5097 } else { /* size == 3 */
5100 imm
= (insn
>> 8) & 0xf;
5107 neon_load_reg64(cpu_V0
, rn
);
5109 neon_load_reg64(cpu_V1
, rn
+ 1);
5111 } else if (imm
== 8) {
5112 neon_load_reg64(cpu_V0
, rn
+ 1);
5114 neon_load_reg64(cpu_V1
, rm
);
5117 tmp64
= tcg_temp_new_i64();
5119 neon_load_reg64(cpu_V0
, rn
);
5120 neon_load_reg64(tmp64
, rn
+ 1);
5122 neon_load_reg64(cpu_V0
, rn
+ 1);
5123 neon_load_reg64(tmp64
, rm
);
5125 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5126 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5127 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5129 neon_load_reg64(cpu_V1
, rm
);
5131 neon_load_reg64(cpu_V1
, rm
+ 1);
5134 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5135 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5136 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5139 neon_load_reg64(cpu_V0
, rn
);
5140 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5141 neon_load_reg64(cpu_V1
, rm
);
5142 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5143 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5145 neon_store_reg64(cpu_V0
, rd
);
5147 neon_store_reg64(cpu_V1
, rd
+ 1);
5149 } else if ((insn
& (1 << 11)) == 0) {
5150 /* Two register misc. */
5151 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5152 size
= (insn
>> 18) & 3;
5154 case 0: /* VREV64 */
5157 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5158 NEON_GET_REG(T0
, rm
, pass
* 2);
5159 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
5161 case 0: tcg_gen_bswap_i32(cpu_T
[0], cpu_T
[0]); break;
5162 case 1: gen_swap_half(cpu_T
[0]); break;
5163 case 2: /* no-op */ break;
5166 NEON_SET_REG(T0
, rd
, pass
* 2 + 1);
5168 NEON_SET_REG(T1
, rd
, pass
* 2);
5170 gen_op_movl_T0_T1();
5172 case 0: tcg_gen_bswap_i32(cpu_T
[0], cpu_T
[0]); break;
5173 case 1: gen_swap_half(cpu_T
[0]); break;
5176 NEON_SET_REG(T0
, rd
, pass
* 2);
5180 case 4: case 5: /* VPADDL */
5181 case 12: case 13: /* VPADAL */
5184 for (pass
= 0; pass
< q
+ 1; pass
++) {
5185 tmp
= neon_load_reg(rm
, pass
* 2);
5186 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5187 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5188 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5190 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5191 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5192 case 2: tcg_gen_add_i64(CPU_V001
); break;
5197 neon_load_reg64(cpu_V1
, rd
+ pass
);
5198 gen_neon_addl(size
);
5200 neon_store_reg64(cpu_V0
, rd
+ pass
);
5205 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5206 NEON_GET_REG(T0
, rm
, n
);
5207 NEON_GET_REG(T1
, rd
, n
+ 1);
5208 NEON_SET_REG(T1
, rm
, n
);
5209 NEON_SET_REG(T0
, rd
, n
+ 1);
5217 Rd A3 A2 A1 A0 B2 B0 A2 A0
5218 Rm B3 B2 B1 B0 B3 B1 A3 A1
5222 gen_neon_unzip(rd
, q
, 0, size
);
5223 gen_neon_unzip(rm
, q
, 4, size
);
5225 static int unzip_order_q
[8] =
5226 {0, 2, 4, 6, 1, 3, 5, 7};
5227 for (n
= 0; n
< 8; n
++) {
5228 int reg
= (n
< 4) ? rd
: rm
;
5229 gen_neon_movl_T0_scratch(unzip_order_q
[n
]);
5230 NEON_SET_REG(T0
, reg
, n
% 4);
5233 static int unzip_order
[4] =
5235 for (n
= 0; n
< 4; n
++) {
5236 int reg
= (n
< 2) ? rd
: rm
;
5237 gen_neon_movl_T0_scratch(unzip_order
[n
]);
5238 NEON_SET_REG(T0
, reg
, n
% 2);
5244 Rd A3 A2 A1 A0 B1 A1 B0 A0
5245 Rm B3 B2 B1 B0 B3 A3 B2 A2
5249 count
= (q
? 4 : 2);
5250 for (n
= 0; n
< count
; n
++) {
5251 NEON_GET_REG(T0
, rd
, n
);
5252 NEON_GET_REG(T1
, rd
, n
);
5254 case 0: gen_helper_neon_zip_u8(); break;
5255 case 1: gen_helper_neon_zip_u16(); break;
5256 case 2: /* no-op */; break;
5259 gen_neon_movl_scratch_T0(n
* 2);
5260 gen_neon_movl_scratch_T1(n
* 2 + 1);
5262 for (n
= 0; n
< count
* 2; n
++) {
5263 int reg
= (n
< count
) ? rd
: rm
;
5264 gen_neon_movl_T0_scratch(n
);
5265 NEON_SET_REG(T0
, reg
, n
% count
);
5268 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5272 for (pass
= 0; pass
< 2; pass
++) {
5273 neon_load_reg64(cpu_V0
, rm
+ pass
);
5275 if (op
== 36 && q
== 0) {
5276 gen_neon_narrow(size
, tmp
, cpu_V0
);
5278 gen_neon_narrow_satu(size
, tmp
, cpu_V0
);
5280 gen_neon_narrow_sats(size
, tmp
, cpu_V0
);
5285 neon_store_reg(rd
, 0, tmp2
);
5286 neon_store_reg(rd
, 1, tmp
);
5290 case 38: /* VSHLL */
5293 tmp
= neon_load_reg(rm
, 0);
5294 tmp2
= neon_load_reg(rm
, 1);
5295 for (pass
= 0; pass
< 2; pass
++) {
5298 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5299 neon_store_reg64(cpu_V0
, rd
+ pass
);
5304 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5305 if (op
== 30 || op
== 31 || op
>= 58) {
5306 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5307 neon_reg_offset(rm
, pass
));
5309 NEON_GET_REG(T0
, rm
, pass
);
5312 case 1: /* VREV32 */
5314 case 0: tcg_gen_bswap_i32(cpu_T
[0], cpu_T
[0]); break;
5315 case 1: gen_swap_half(cpu_T
[0]); break;
5319 case 2: /* VREV16 */
5322 gen_rev16(cpu_T
[0]);
5326 case 0: gen_helper_neon_cls_s8(cpu_T
[0], cpu_T
[0]); break;
5327 case 1: gen_helper_neon_cls_s16(cpu_T
[0], cpu_T
[0]); break;
5328 case 2: gen_helper_neon_cls_s32(cpu_T
[0], cpu_T
[0]); break;
5334 case 0: gen_helper_neon_clz_u8(cpu_T
[0], cpu_T
[0]); break;
5335 case 1: gen_helper_neon_clz_u16(cpu_T
[0], cpu_T
[0]); break;
5336 case 2: gen_helper_clz(cpu_T
[0], cpu_T
[0]); break;
5343 gen_helper_neon_cnt_u8(cpu_T
[0], cpu_T
[0]);
5350 case 14: /* VQABS */
5352 case 0: gen_helper_neon_qabs_s8(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5353 case 1: gen_helper_neon_qabs_s16(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5354 case 2: gen_helper_neon_qabs_s32(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5358 case 15: /* VQNEG */
5360 case 0: gen_helper_neon_qneg_s8(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5361 case 1: gen_helper_neon_qneg_s16(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5362 case 2: gen_helper_neon_qneg_s32(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5366 case 16: case 19: /* VCGT #0, VCLE #0 */
5367 gen_op_movl_T1_im(0);
5369 case 0: gen_helper_neon_cgt_s8(CPU_T001
); break;
5370 case 1: gen_helper_neon_cgt_s16(CPU_T001
); break;
5371 case 2: gen_helper_neon_cgt_s32(CPU_T001
); break;
5377 case 17: case 20: /* VCGE #0, VCLT #0 */
5378 gen_op_movl_T1_im(0);
5380 case 0: gen_helper_neon_cge_s8(CPU_T001
); break;
5381 case 1: gen_helper_neon_cge_s16(CPU_T001
); break;
5382 case 2: gen_helper_neon_cge_s32(CPU_T001
); break;
5388 case 18: /* VCEQ #0 */
5389 gen_op_movl_T1_im(0);
5391 case 0: gen_helper_neon_ceq_u8(CPU_T001
); break;
5392 case 1: gen_helper_neon_ceq_u16(CPU_T001
); break;
5393 case 2: gen_helper_neon_ceq_u32(CPU_T001
); break;
5399 case 0: gen_helper_neon_abs_s8(cpu_T
[0], cpu_T
[0]); break;
5400 case 1: gen_helper_neon_abs_s16(cpu_T
[0], cpu_T
[0]); break;
5401 case 2: tcg_gen_abs_i32(cpu_T
[0], cpu_T
[0]); break;
5406 gen_op_movl_T1_im(0);
5411 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5412 gen_op_movl_T1_im(0);
5413 gen_helper_neon_cgt_f32(CPU_T001
);
5417 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5418 gen_op_movl_T1_im(0);
5419 gen_helper_neon_cge_f32(CPU_T001
);
5423 case 26: /* Float VCEQ #0 */
5424 gen_op_movl_T1_im(0);
5425 gen_helper_neon_ceq_f32(CPU_T001
);
5427 case 30: /* Float VABS */
5430 case 31: /* Float VNEG */
5434 NEON_GET_REG(T1
, rd
, pass
);
5435 NEON_SET_REG(T1
, rm
, pass
);
5438 NEON_GET_REG(T1
, rd
, pass
);
5440 case 0: gen_helper_neon_trn_u8(); break;
5441 case 1: gen_helper_neon_trn_u16(); break;
5445 NEON_SET_REG(T1
, rm
, pass
);
5447 case 56: /* Integer VRECPE */
5448 gen_helper_recpe_u32(cpu_T
[0], cpu_T
[0], cpu_env
);
5450 case 57: /* Integer VRSQRTE */
5451 gen_helper_rsqrte_u32(cpu_T
[0], cpu_T
[0], cpu_env
);
5453 case 58: /* Float VRECPE */
5454 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5456 case 59: /* Float VRSQRTE */
5457 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5459 case 60: /* VCVT.F32.S32 */
5462 case 61: /* VCVT.F32.U32 */
5465 case 62: /* VCVT.S32.F32 */
5468 case 63: /* VCVT.U32.F32 */
5472 /* Reserved: 21, 29, 39-56 */
5475 if (op
== 30 || op
== 31 || op
>= 58) {
5476 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
5477 neon_reg_offset(rd
, pass
));
5479 NEON_SET_REG(T0
, rd
, pass
);
5484 } else if ((insn
& (1 << 10)) == 0) {
5486 n
= ((insn
>> 5) & 0x18) + 8;
5487 if (insn
& (1 << 6)) {
5488 tmp
= neon_load_reg(rd
, 0);
5491 tcg_gen_movi_i32(tmp
, 0);
5493 tmp2
= neon_load_reg(rm
, 0);
5494 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tcg_const_i32(rn
),
5497 if (insn
& (1 << 6)) {
5498 tmp
= neon_load_reg(rd
, 1);
5501 tcg_gen_movi_i32(tmp
, 0);
5503 tmp3
= neon_load_reg(rm
, 1);
5504 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tcg_const_i32(rn
),
5506 neon_store_reg(rd
, 0, tmp2
);
5507 neon_store_reg(rd
, 1, tmp3
);
5509 } else if ((insn
& 0x380) == 0) {
5511 if (insn
& (1 << 19)) {
5512 NEON_SET_REG(T0
, rm
, 1);
5514 NEON_SET_REG(T0
, rm
, 0);
5516 if (insn
& (1 << 16)) {
5517 gen_neon_dup_u8(cpu_T
[0], ((insn
>> 17) & 3) * 8);
5518 } else if (insn
& (1 << 17)) {
5519 if ((insn
>> 18) & 1)
5520 gen_neon_dup_high16(cpu_T
[0]);
5522 gen_neon_dup_low16(cpu_T
[0]);
5524 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5525 NEON_SET_REG(T0
, rd
, pass
);
5535 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5539 cpnum
= (insn
>> 8) & 0xf;
5540 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
5541 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
5547 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5548 return disas_iwmmxt_insn(env
, s
, insn
);
5549 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5550 return disas_dsp_insn(env
, s
, insn
);
5555 return disas_vfp_insn (env
, s
, insn
);
5557 return disas_cp15_insn (env
, s
, insn
);
5559 /* Unknown coprocessor. See if the board has hooked it. */
5560 return disas_cp_insn (env
, s
, insn
);
5565 /* Store a 64-bit value to a register pair. Clobbers val. */
5566 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
5570 tcg_gen_trunc_i64_i32(tmp
, val
);
5571 store_reg(s
, rlow
, tmp
);
5573 tcg_gen_shri_i64(val
, val
, 32);
5574 tcg_gen_trunc_i64_i32(tmp
, val
);
5575 store_reg(s
, rhigh
, tmp
);
5578 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5579 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
5584 /* Load value and extend to 64 bits. */
5585 tmp
= tcg_temp_new_i64();
5586 tmp2
= load_reg(s
, rlow
);
5587 tcg_gen_extu_i32_i64(tmp
, tmp2
);
5589 tcg_gen_add_i64(val
, val
, tmp
);
5592 /* load and add a 64-bit value from a register pair. */
5593 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
5599 /* Load 64-bit value rd:rn. */
5600 tmpl
= load_reg(s
, rlow
);
5601 tmph
= load_reg(s
, rhigh
);
5602 tmp
= tcg_temp_new_i64();
5603 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
5606 tcg_gen_add_i64(val
, val
, tmp
);
5609 /* Set N and Z flags from a 64-bit value. */
5610 static void gen_logicq_cc(TCGv_i64 val
)
5612 TCGv tmp
= new_tmp();
5613 gen_helper_logicq_cc(tmp
, val
);
5618 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
5620 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
5627 insn
= ldl_code(s
->pc
);
5630 /* M variants do not implement ARM mode. */
5635 /* Unconditional instructions. */
5636 if (((insn
>> 25) & 7) == 1) {
5637 /* NEON Data processing. */
5638 if (!arm_feature(env
, ARM_FEATURE_NEON
))
5641 if (disas_neon_data_insn(env
, s
, insn
))
5645 if ((insn
& 0x0f100000) == 0x04000000) {
5646 /* NEON load/store. */
5647 if (!arm_feature(env
, ARM_FEATURE_NEON
))
5650 if (disas_neon_ls_insn(env
, s
, insn
))
5654 if ((insn
& 0x0d70f000) == 0x0550f000)
5656 else if ((insn
& 0x0ffffdff) == 0x01010000) {
5659 if (insn
& (1 << 9)) {
5660 /* BE8 mode not implemented. */
5664 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
5665 switch ((insn
>> 4) & 0xf) {
5668 gen_helper_clrex(cpu_env
);
5674 /* We don't emulate caches so these are a no-op. */
5679 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
5685 op1
= (insn
& 0x1f);
5686 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
5687 addr
= load_reg(s
, 13);
5690 gen_helper_get_r13_banked(addr
, cpu_env
, tcg_const_i32(op1
));
5692 i
= (insn
>> 23) & 3;
5694 case 0: offset
= -4; break; /* DA */
5695 case 1: offset
= -8; break; /* DB */
5696 case 2: offset
= 0; break; /* IA */
5697 case 3: offset
= 4; break; /* IB */
5701 tcg_gen_addi_i32(addr
, addr
, offset
);
5702 tmp
= load_reg(s
, 14);
5703 gen_st32(tmp
, addr
, 0);
5705 gen_helper_cpsr_read(tmp
);
5706 tcg_gen_addi_i32(addr
, addr
, 4);
5707 gen_st32(tmp
, addr
, 0);
5708 if (insn
& (1 << 21)) {
5709 /* Base writeback. */
5711 case 0: offset
= -8; break;
5712 case 1: offset
= -4; break;
5713 case 2: offset
= 4; break;
5714 case 3: offset
= 0; break;
5718 tcg_gen_addi_i32(addr
, tmp
, offset
);
5719 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
5720 gen_movl_reg_T1(s
, 13);
5722 gen_helper_set_r13_banked(cpu_env
, tcg_const_i32(op1
), cpu_T
[1]);
5727 } else if ((insn
& 0x0e5fffe0) == 0x081d0a00) {
5733 rn
= (insn
>> 16) & 0xf;
5734 addr
= load_reg(s
, rn
);
5735 i
= (insn
>> 23) & 3;
5737 case 0: offset
= -4; break; /* DA */
5738 case 1: offset
= -8; break; /* DB */
5739 case 2: offset
= 0; break; /* IA */
5740 case 3: offset
= 4; break; /* IB */
5744 tcg_gen_addi_i32(addr
, addr
, offset
);
5745 /* Load PC into tmp and CPSR into tmp2. */
5746 tmp
= gen_ld32(addr
, 0);
5747 tcg_gen_addi_i32(addr
, addr
, 4);
5748 tmp2
= gen_ld32(addr
, 0);
5749 if (insn
& (1 << 21)) {
5750 /* Base writeback. */
5752 case 0: offset
= -8; break;
5753 case 1: offset
= -4; break;
5754 case 2: offset
= 4; break;
5755 case 3: offset
= 0; break;
5759 tcg_gen_addi_i32(addr
, addr
, offset
);
5760 store_reg(s
, rn
, addr
);
5764 gen_rfe(s
, tmp
, tmp2
);
5765 } else if ((insn
& 0x0e000000) == 0x0a000000) {
5766 /* branch link and change to thumb (blx <offset>) */
5769 val
= (uint32_t)s
->pc
;
5771 tcg_gen_movi_i32(tmp
, val
);
5772 store_reg(s
, 14, tmp
);
5773 /* Sign-extend the 24-bit offset */
5774 offset
= (((int32_t)insn
) << 8) >> 8;
5775 /* offset * 4 + bit24 * 2 + (thumb bit) */
5776 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
5777 /* pipeline offset */
5781 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
5782 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5783 /* iWMMXt register transfer. */
5784 if (env
->cp15
.c15_cpar
& (1 << 1))
5785 if (!disas_iwmmxt_insn(env
, s
, insn
))
5788 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
5789 /* Coprocessor double register transfer. */
5790 } else if ((insn
& 0x0f000010) == 0x0e000010) {
5791 /* Additional coprocessor register transfer. */
5792 } else if ((insn
& 0x0ff10020) == 0x01000000) {
5795 /* cps (privileged) */
5799 if (insn
& (1 << 19)) {
5800 if (insn
& (1 << 8))
5802 if (insn
& (1 << 7))
5804 if (insn
& (1 << 6))
5806 if (insn
& (1 << 18))
5809 if (insn
& (1 << 17)) {
5811 val
|= (insn
& 0x1f);
5814 gen_op_movl_T0_im(val
);
5815 gen_set_psr_T0(s
, mask
, 0);
5822 /* if not always execute, we generate a conditional jump to
5824 s
->condlabel
= gen_new_label();
5825 gen_test_cc(cond
^ 1, s
->condlabel
);
5828 if ((insn
& 0x0f900000) == 0x03000000) {
5829 if ((insn
& (1 << 21)) == 0) {
5831 rd
= (insn
>> 12) & 0xf;
5832 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
5833 if ((insn
& (1 << 22)) == 0) {
5836 tcg_gen_movi_i32(tmp
, val
);
5839 tmp
= load_reg(s
, rd
);
5840 tcg_gen_ext16u_i32(tmp
, tmp
);
5841 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
5843 store_reg(s
, rd
, tmp
);
5845 if (((insn
>> 12) & 0xf) != 0xf)
5847 if (((insn
>> 16) & 0xf) == 0) {
5848 gen_nop_hint(s
, insn
& 0xff);
5850 /* CPSR = immediate */
5852 shift
= ((insn
>> 8) & 0xf) * 2;
5854 val
= (val
>> shift
) | (val
<< (32 - shift
));
5855 gen_op_movl_T0_im(val
);
5856 i
= ((insn
& (1 << 22)) != 0);
5857 if (gen_set_psr_T0(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
))
5861 } else if ((insn
& 0x0f900000) == 0x01000000
5862 && (insn
& 0x00000090) != 0x00000090) {
5863 /* miscellaneous instructions */
5864 op1
= (insn
>> 21) & 3;
5865 sh
= (insn
>> 4) & 0xf;
5868 case 0x0: /* move program status register */
5871 gen_movl_T0_reg(s
, rm
);
5872 i
= ((op1
& 2) != 0);
5873 if (gen_set_psr_T0(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
))
5877 rd
= (insn
>> 12) & 0xf;
5881 tmp
= load_cpu_field(spsr
);
5884 gen_helper_cpsr_read(tmp
);
5886 store_reg(s
, rd
, tmp
);
5891 /* branch/exchange thumb (bx). */
5892 tmp
= load_reg(s
, rm
);
5894 } else if (op1
== 3) {
5896 rd
= (insn
>> 12) & 0xf;
5897 tmp
= load_reg(s
, rm
);
5898 gen_helper_clz(tmp
, tmp
);
5899 store_reg(s
, rd
, tmp
);
5907 /* Trivial implementation equivalent to bx. */
5908 tmp
= load_reg(s
, rm
);
5918 /* branch link/exchange thumb (blx) */
5919 tmp
= load_reg(s
, rm
);
5921 tcg_gen_movi_i32(tmp2
, s
->pc
);
5922 store_reg(s
, 14, tmp2
);
5925 case 0x5: /* saturating add/subtract */
5926 rd
= (insn
>> 12) & 0xf;
5927 rn
= (insn
>> 16) & 0xf;
5928 tmp
= load_reg(s
, rm
);
5929 tmp2
= load_reg(s
, rn
);
5931 gen_helper_double_saturate(tmp2
, tmp2
);
5933 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
5935 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
5937 store_reg(s
, rd
, tmp
);
5940 gen_set_condexec(s
);
5941 gen_set_pc_im(s
->pc
- 4);
5942 gen_exception(EXCP_BKPT
);
5943 s
->is_jmp
= DISAS_JUMP
;
5945 case 0x8: /* signed multiply */
5949 rs
= (insn
>> 8) & 0xf;
5950 rn
= (insn
>> 12) & 0xf;
5951 rd
= (insn
>> 16) & 0xf;
5953 /* (32 * 16) >> 16 */
5954 tmp
= load_reg(s
, rm
);
5955 tmp2
= load_reg(s
, rs
);
5957 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
5960 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
5961 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
5963 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
5964 if ((sh
& 2) == 0) {
5965 tmp2
= load_reg(s
, rn
);
5966 gen_helper_add_setq(tmp
, tmp
, tmp2
);
5969 store_reg(s
, rd
, tmp
);
5972 tmp
= load_reg(s
, rm
);
5973 tmp2
= load_reg(s
, rs
);
5974 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
5977 tmp64
= tcg_temp_new_i64();
5978 tcg_gen_ext_i32_i64(tmp64
, tmp
);
5980 gen_addq(s
, tmp64
, rn
, rd
);
5981 gen_storeq_reg(s
, rn
, rd
, tmp64
);
5984 tmp2
= load_reg(s
, rn
);
5985 gen_helper_add_setq(tmp
, tmp
, tmp2
);
5988 store_reg(s
, rd
, tmp
);
5995 } else if (((insn
& 0x0e000000) == 0 &&
5996 (insn
& 0x00000090) != 0x90) ||
5997 ((insn
& 0x0e000000) == (1 << 25))) {
5998 int set_cc
, logic_cc
, shiftop
;
6000 op1
= (insn
>> 21) & 0xf;
6001 set_cc
= (insn
>> 20) & 1;
6002 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6004 /* data processing instruction */
6005 if (insn
& (1 << 25)) {
6006 /* immediate operand */
6008 shift
= ((insn
>> 8) & 0xf) * 2;
6010 val
= (val
>> shift
) | (val
<< (32 - shift
));
6011 gen_op_movl_T1_im(val
);
6012 if (logic_cc
&& shift
)
6013 gen_set_CF_bit31(cpu_T
[1]);
6017 gen_movl_T1_reg(s
, rm
);
6018 shiftop
= (insn
>> 5) & 3;
6019 if (!(insn
& (1 << 4))) {
6020 shift
= (insn
>> 7) & 0x1f;
6021 gen_arm_shift_im(cpu_T
[1], shiftop
, shift
, logic_cc
);
6023 rs
= (insn
>> 8) & 0xf;
6024 tmp
= load_reg(s
, rs
);
6025 gen_arm_shift_reg(cpu_T
[1], shiftop
, tmp
, logic_cc
);
6028 if (op1
!= 0x0f && op1
!= 0x0d) {
6029 rn
= (insn
>> 16) & 0xf;
6030 gen_movl_T0_reg(s
, rn
);
6032 rd
= (insn
>> 12) & 0xf;
6035 gen_op_andl_T0_T1();
6036 gen_movl_reg_T0(s
, rd
);
6038 gen_op_logic_T0_cc();
6041 gen_op_xorl_T0_T1();
6042 gen_movl_reg_T0(s
, rd
);
6044 gen_op_logic_T0_cc();
6047 if (set_cc
&& rd
== 15) {
6048 /* SUBS r15, ... is used for exception return. */
6051 gen_op_subl_T0_T1_cc();
6052 gen_exception_return(s
);
6055 gen_op_subl_T0_T1_cc();
6057 gen_op_subl_T0_T1();
6058 gen_movl_reg_T0(s
, rd
);
6063 gen_op_rsbl_T0_T1_cc();
6065 gen_op_rsbl_T0_T1();
6066 gen_movl_reg_T0(s
, rd
);
6070 gen_op_addl_T0_T1_cc();
6072 gen_op_addl_T0_T1();
6073 gen_movl_reg_T0(s
, rd
);
6077 gen_op_adcl_T0_T1_cc();
6080 gen_movl_reg_T0(s
, rd
);
6084 gen_op_sbcl_T0_T1_cc();
6087 gen_movl_reg_T0(s
, rd
);
6091 gen_op_rscl_T0_T1_cc();
6094 gen_movl_reg_T0(s
, rd
);
6098 gen_op_andl_T0_T1();
6099 gen_op_logic_T0_cc();
6104 gen_op_xorl_T0_T1();
6105 gen_op_logic_T0_cc();
6110 gen_op_subl_T0_T1_cc();
6115 gen_op_addl_T0_T1_cc();
6120 gen_movl_reg_T0(s
, rd
);
6122 gen_op_logic_T0_cc();
6125 if (logic_cc
&& rd
== 15) {
6126 /* MOVS r15, ... is used for exception return. */
6129 gen_op_movl_T0_T1();
6130 gen_exception_return(s
);
6132 gen_movl_reg_T1(s
, rd
);
6134 gen_op_logic_T1_cc();
6138 gen_op_bicl_T0_T1();
6139 gen_movl_reg_T0(s
, rd
);
6141 gen_op_logic_T0_cc();
6146 gen_movl_reg_T1(s
, rd
);
6148 gen_op_logic_T1_cc();
6152 /* other instructions */
6153 op1
= (insn
>> 24) & 0xf;
6157 /* multiplies, extra load/stores */
6158 sh
= (insn
>> 5) & 3;
6161 rd
= (insn
>> 16) & 0xf;
6162 rn
= (insn
>> 12) & 0xf;
6163 rs
= (insn
>> 8) & 0xf;
6165 op1
= (insn
>> 20) & 0xf;
6167 case 0: case 1: case 2: case 3: case 6:
6169 tmp
= load_reg(s
, rs
);
6170 tmp2
= load_reg(s
, rm
);
6171 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
6173 if (insn
& (1 << 22)) {
6174 /* Subtract (mls) */
6176 tmp2
= load_reg(s
, rn
);
6177 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6179 } else if (insn
& (1 << 21)) {
6181 tmp2
= load_reg(s
, rn
);
6182 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6185 if (insn
& (1 << 20))
6187 store_reg(s
, rd
, tmp
);
6191 tmp
= load_reg(s
, rs
);
6192 tmp2
= load_reg(s
, rm
);
6193 if (insn
& (1 << 22))
6194 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6196 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6197 if (insn
& (1 << 21)) /* mult accumulate */
6198 gen_addq(s
, tmp64
, rn
, rd
);
6199 if (!(insn
& (1 << 23))) { /* double accumulate */
6201 gen_addq_lo(s
, tmp64
, rn
);
6202 gen_addq_lo(s
, tmp64
, rd
);
6204 if (insn
& (1 << 20))
6205 gen_logicq_cc(tmp64
);
6206 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6210 rn
= (insn
>> 16) & 0xf;
6211 rd
= (insn
>> 12) & 0xf;
6212 if (insn
& (1 << 23)) {
6213 /* load/store exclusive */
6214 op1
= (insn
>> 21) & 0x3;
6219 gen_movl_T1_reg(s
, rn
);
6221 if (insn
& (1 << 20)) {
6222 gen_helper_mark_exclusive(cpu_env
, cpu_T
[1]);
6225 tmp
= gen_ld32(addr
, IS_USER(s
));
6227 case 1: /* ldrexd */
6228 tmp
= gen_ld32(addr
, IS_USER(s
));
6229 store_reg(s
, rd
, tmp
);
6230 tcg_gen_addi_i32(addr
, addr
, 4);
6231 tmp
= gen_ld32(addr
, IS_USER(s
));
6234 case 2: /* ldrexb */
6235 tmp
= gen_ld8u(addr
, IS_USER(s
));
6237 case 3: /* ldrexh */
6238 tmp
= gen_ld16u(addr
, IS_USER(s
));
6243 store_reg(s
, rd
, tmp
);
6245 int label
= gen_new_label();
6247 gen_helper_test_exclusive(cpu_T
[0], cpu_env
, addr
);
6248 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_T
[0],
6250 tmp
= load_reg(s
,rm
);
6253 gen_st32(tmp
, addr
, IS_USER(s
));
6255 case 1: /* strexd */
6256 gen_st32(tmp
, addr
, IS_USER(s
));
6257 tcg_gen_addi_i32(addr
, addr
, 4);
6258 tmp
= load_reg(s
, rm
+ 1);
6259 gen_st32(tmp
, addr
, IS_USER(s
));
6261 case 2: /* strexb */
6262 gen_st8(tmp
, addr
, IS_USER(s
));
6264 case 3: /* strexh */
6265 gen_st16(tmp
, addr
, IS_USER(s
));
6270 gen_set_label(label
);
6271 gen_movl_reg_T0(s
, rd
);
6274 /* SWP instruction */
6277 /* ??? This is not really atomic. However we know
6278 we never have multiple CPUs running in parallel,
6279 so it is good enough. */
6280 addr
= load_reg(s
, rn
);
6281 tmp
= load_reg(s
, rm
);
6282 if (insn
& (1 << 22)) {
6283 tmp2
= gen_ld8u(addr
, IS_USER(s
));
6284 gen_st8(tmp
, addr
, IS_USER(s
));
6286 tmp2
= gen_ld32(addr
, IS_USER(s
));
6287 gen_st32(tmp
, addr
, IS_USER(s
));
6290 store_reg(s
, rd
, tmp2
);
6296 /* Misc load/store */
6297 rn
= (insn
>> 16) & 0xf;
6298 rd
= (insn
>> 12) & 0xf;
6299 addr
= load_reg(s
, rn
);
6300 if (insn
& (1 << 24))
6301 gen_add_datah_offset(s
, insn
, 0, addr
);
6303 if (insn
& (1 << 20)) {
6307 tmp
= gen_ld16u(addr
, IS_USER(s
));
6310 tmp
= gen_ld8s(addr
, IS_USER(s
));
6314 tmp
= gen_ld16s(addr
, IS_USER(s
));
6318 } else if (sh
& 2) {
6322 tmp
= load_reg(s
, rd
);
6323 gen_st32(tmp
, addr
, IS_USER(s
));
6324 tcg_gen_addi_i32(addr
, addr
, 4);
6325 tmp
= load_reg(s
, rd
+ 1);
6326 gen_st32(tmp
, addr
, IS_USER(s
));
6330 tmp
= gen_ld32(addr
, IS_USER(s
));
6331 store_reg(s
, rd
, tmp
);
6332 tcg_gen_addi_i32(addr
, addr
, 4);
6333 tmp
= gen_ld32(addr
, IS_USER(s
));
6337 address_offset
= -4;
6340 tmp
= load_reg(s
, rd
);
6341 gen_st16(tmp
, addr
, IS_USER(s
));
6344 /* Perform base writeback before the loaded value to
6345 ensure correct behavior with overlapping index registers.
6346 ldrd with base writeback is is undefined if the
6347 destination and index registers overlap. */
6348 if (!(insn
& (1 << 24))) {
6349 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
6350 store_reg(s
, rn
, addr
);
6351 } else if (insn
& (1 << 21)) {
6353 tcg_gen_addi_i32(addr
, addr
, address_offset
);
6354 store_reg(s
, rn
, addr
);
6359 /* Complete the load. */
6360 store_reg(s
, rd
, tmp
);
6369 if (insn
& (1 << 4)) {
6371 /* Armv6 Media instructions. */
6373 rn
= (insn
>> 16) & 0xf;
6374 rd
= (insn
>> 12) & 0xf;
6375 rs
= (insn
>> 8) & 0xf;
6376 switch ((insn
>> 23) & 3) {
6377 case 0: /* Parallel add/subtract. */
6378 op1
= (insn
>> 20) & 7;
6379 tmp
= load_reg(s
, rn
);
6380 tmp2
= load_reg(s
, rm
);
6381 sh
= (insn
>> 5) & 7;
6382 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
6384 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
6386 store_reg(s
, rd
, tmp
);
6389 if ((insn
& 0x00700020) == 0) {
6390 /* Halfword pack. */
6391 tmp
= load_reg(s
, rn
);
6392 tmp2
= load_reg(s
, rm
);
6393 shift
= (insn
>> 7) & 0x1f;
6394 if (insn
& (1 << 6)) {
6398 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
6399 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
6400 tcg_gen_ext16u_i32(tmp2
, tmp2
);
6404 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
6405 tcg_gen_ext16u_i32(tmp
, tmp
);
6406 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
6408 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6410 store_reg(s
, rd
, tmp
);
6411 } else if ((insn
& 0x00200020) == 0x00200000) {
6413 tmp
= load_reg(s
, rm
);
6414 shift
= (insn
>> 7) & 0x1f;
6415 if (insn
& (1 << 6)) {
6418 tcg_gen_sari_i32(tmp
, tmp
, shift
);
6420 tcg_gen_shli_i32(tmp
, tmp
, shift
);
6422 sh
= (insn
>> 16) & 0x1f;
6424 if (insn
& (1 << 22))
6425 gen_helper_usat(tmp
, tmp
, tcg_const_i32(sh
));
6427 gen_helper_ssat(tmp
, tmp
, tcg_const_i32(sh
));
6429 store_reg(s
, rd
, tmp
);
6430 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
6432 tmp
= load_reg(s
, rm
);
6433 sh
= (insn
>> 16) & 0x1f;
6435 if (insn
& (1 << 22))
6436 gen_helper_usat16(tmp
, tmp
, tcg_const_i32(sh
));
6438 gen_helper_ssat16(tmp
, tmp
, tcg_const_i32(sh
));
6440 store_reg(s
, rd
, tmp
);
6441 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
6443 tmp
= load_reg(s
, rn
);
6444 tmp2
= load_reg(s
, rm
);
6446 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
6447 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
6450 store_reg(s
, rd
, tmp
);
6451 } else if ((insn
& 0x000003e0) == 0x00000060) {
6452 tmp
= load_reg(s
, rm
);
6453 shift
= (insn
>> 10) & 3;
6454 /* ??? In many cases it's not neccessary to do a
6455 rotate, a shift is sufficient. */
6457 tcg_gen_rori_i32(tmp
, tmp
, shift
* 8);
6458 op1
= (insn
>> 20) & 7;
6460 case 0: gen_sxtb16(tmp
); break;
6461 case 2: gen_sxtb(tmp
); break;
6462 case 3: gen_sxth(tmp
); break;
6463 case 4: gen_uxtb16(tmp
); break;
6464 case 6: gen_uxtb(tmp
); break;
6465 case 7: gen_uxth(tmp
); break;
6466 default: goto illegal_op
;
6469 tmp2
= load_reg(s
, rn
);
6470 if ((op1
& 3) == 0) {
6471 gen_add16(tmp
, tmp2
);
6473 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6477 store_reg(s
, rd
, tmp
);
6478 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
6480 tmp
= load_reg(s
, rm
);
6481 if (insn
& (1 << 22)) {
6482 if (insn
& (1 << 7)) {
6486 gen_helper_rbit(tmp
, tmp
);
6489 if (insn
& (1 << 7))
6492 tcg_gen_bswap_i32(tmp
, tmp
);
6494 store_reg(s
, rd
, tmp
);
6499 case 2: /* Multiplies (Type 3). */
6500 tmp
= load_reg(s
, rm
);
6501 tmp2
= load_reg(s
, rs
);
6502 if (insn
& (1 << 20)) {
6503 /* Signed multiply most significant [accumulate]. */
6504 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6505 if (insn
& (1 << 5))
6506 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
6507 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6509 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6511 tmp2
= load_reg(s
, rd
);
6512 if (insn
& (1 << 6)) {
6513 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6515 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6519 store_reg(s
, rn
, tmp
);
6521 if (insn
& (1 << 5))
6522 gen_swap_half(tmp2
);
6523 gen_smul_dual(tmp
, tmp2
);
6524 /* This addition cannot overflow. */
6525 if (insn
& (1 << 6)) {
6526 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6528 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6531 if (insn
& (1 << 22)) {
6532 /* smlald, smlsld */
6533 tmp64
= tcg_temp_new_i64();
6534 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6536 gen_addq(s
, tmp64
, rd
, rn
);
6537 gen_storeq_reg(s
, rd
, rn
, tmp64
);
6539 /* smuad, smusd, smlad, smlsd */
6542 tmp2
= load_reg(s
, rd
);
6543 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6546 store_reg(s
, rn
, tmp
);
6551 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
6553 case 0: /* Unsigned sum of absolute differences. */
6555 tmp
= load_reg(s
, rm
);
6556 tmp2
= load_reg(s
, rs
);
6557 gen_helper_usad8(tmp
, tmp
, tmp2
);
6560 tmp2
= load_reg(s
, rd
);
6561 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6564 store_reg(s
, rn
, tmp
);
6566 case 0x20: case 0x24: case 0x28: case 0x2c:
6567 /* Bitfield insert/clear. */
6569 shift
= (insn
>> 7) & 0x1f;
6570 i
= (insn
>> 16) & 0x1f;
6574 tcg_gen_movi_i32(tmp
, 0);
6576 tmp
= load_reg(s
, rm
);
6579 tmp2
= load_reg(s
, rd
);
6580 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
6583 store_reg(s
, rd
, tmp
);
6585 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6586 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6588 tmp
= load_reg(s
, rm
);
6589 shift
= (insn
>> 7) & 0x1f;
6590 i
= ((insn
>> 16) & 0x1f) + 1;
6595 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
6597 gen_sbfx(tmp
, shift
, i
);
6600 store_reg(s
, rd
, tmp
);
6610 /* Check for undefined extension instructions
6611 * per the ARM Bible IE:
6612 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6614 sh
= (0xf << 20) | (0xf << 4);
6615 if (op1
== 0x7 && ((insn
& sh
) == sh
))
6619 /* load/store byte/word */
6620 rn
= (insn
>> 16) & 0xf;
6621 rd
= (insn
>> 12) & 0xf;
6622 tmp2
= load_reg(s
, rn
);
6623 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
6624 if (insn
& (1 << 24))
6625 gen_add_data_offset(s
, insn
, tmp2
);
6626 if (insn
& (1 << 20)) {
6628 if (insn
& (1 << 22)) {
6629 tmp
= gen_ld8u(tmp2
, i
);
6631 tmp
= gen_ld32(tmp2
, i
);
6635 tmp
= load_reg(s
, rd
);
6636 if (insn
& (1 << 22))
6637 gen_st8(tmp
, tmp2
, i
);
6639 gen_st32(tmp
, tmp2
, i
);
6641 if (!(insn
& (1 << 24))) {
6642 gen_add_data_offset(s
, insn
, tmp2
);
6643 store_reg(s
, rn
, tmp2
);
6644 } else if (insn
& (1 << 21)) {
6645 store_reg(s
, rn
, tmp2
);
6649 if (insn
& (1 << 20)) {
6650 /* Complete the load. */
6654 store_reg(s
, rd
, tmp
);
6660 int j
, n
, user
, loaded_base
;
6662 /* load/store multiple words */
6663 /* XXX: store correct base if write back */
6665 if (insn
& (1 << 22)) {
6667 goto illegal_op
; /* only usable in supervisor mode */
6669 if ((insn
& (1 << 15)) == 0)
6672 rn
= (insn
>> 16) & 0xf;
6673 addr
= load_reg(s
, rn
);
6675 /* compute total size */
6677 TCGV_UNUSED(loaded_var
);
6680 if (insn
& (1 << i
))
6683 /* XXX: test invalid n == 0 case ? */
6684 if (insn
& (1 << 23)) {
6685 if (insn
& (1 << 24)) {
6687 tcg_gen_addi_i32(addr
, addr
, 4);
6689 /* post increment */
6692 if (insn
& (1 << 24)) {
6694 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
6696 /* post decrement */
6698 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
6703 if (insn
& (1 << i
)) {
6704 if (insn
& (1 << 20)) {
6706 tmp
= gen_ld32(addr
, IS_USER(s
));
6710 gen_helper_set_user_reg(tcg_const_i32(i
), tmp
);
6712 } else if (i
== rn
) {
6716 store_reg(s
, i
, tmp
);
6721 /* special case: r15 = PC + 8 */
6722 val
= (long)s
->pc
+ 4;
6724 tcg_gen_movi_i32(tmp
, val
);
6727 gen_helper_get_user_reg(tmp
, tcg_const_i32(i
));
6729 tmp
= load_reg(s
, i
);
6731 gen_st32(tmp
, addr
, IS_USER(s
));
6734 /* no need to add after the last transfer */
6736 tcg_gen_addi_i32(addr
, addr
, 4);
6739 if (insn
& (1 << 21)) {
6741 if (insn
& (1 << 23)) {
6742 if (insn
& (1 << 24)) {
6745 /* post increment */
6746 tcg_gen_addi_i32(addr
, addr
, 4);
6749 if (insn
& (1 << 24)) {
6752 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
6754 /* post decrement */
6755 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
6758 store_reg(s
, rn
, addr
);
6763 store_reg(s
, rn
, loaded_var
);
6765 if ((insn
& (1 << 22)) && !user
) {
6766 /* Restore CPSR from SPSR. */
6767 tmp
= load_cpu_field(spsr
);
6768 gen_set_cpsr(tmp
, 0xffffffff);
6770 s
->is_jmp
= DISAS_UPDATE
;
6779 /* branch (and link) */
6780 val
= (int32_t)s
->pc
;
6781 if (insn
& (1 << 24)) {
6783 tcg_gen_movi_i32(tmp
, val
);
6784 store_reg(s
, 14, tmp
);
6786 offset
= (((int32_t)insn
<< 8) >> 8);
6787 val
+= (offset
<< 2) + 4;
6795 if (disas_coproc_insn(env
, s
, insn
))
6800 gen_set_pc_im(s
->pc
);
6801 s
->is_jmp
= DISAS_SWI
;
6805 gen_set_condexec(s
);
6806 gen_set_pc_im(s
->pc
- 4);
6807 gen_exception(EXCP_UDEF
);
6808 s
->is_jmp
= DISAS_JUMP
;
6814 /* Return true if this is a Thumb-2 logical op. */
6816 thumb2_logic_op(int op
)
6821 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6822 then set condition code flags based on the result of the operation.
6823 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6824 to the high bit of T1.
6825 Returns zero if the opcode is valid. */
6828 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
)
6835 gen_op_andl_T0_T1();
6839 gen_op_bicl_T0_T1();
6852 gen_op_xorl_T0_T1();
6857 gen_op_addl_T0_T1_cc();
6859 gen_op_addl_T0_T1();
6863 gen_op_adcl_T0_T1_cc();
6869 gen_op_sbcl_T0_T1_cc();
6875 gen_op_subl_T0_T1_cc();
6877 gen_op_subl_T0_T1();
6881 gen_op_rsbl_T0_T1_cc();
6883 gen_op_rsbl_T0_T1();
6885 default: /* 5, 6, 7, 9, 12, 15. */
6889 gen_op_logic_T0_cc();
6891 gen_set_CF_bit31(cpu_T
[1]);
6896 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6898 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
6900 uint32_t insn
, imm
, shift
, offset
;
6901 uint32_t rd
, rn
, rm
, rs
;
6912 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
6913 || arm_feature (env
, ARM_FEATURE_M
))) {
6914 /* Thumb-1 cores may need to treat bl and blx as a pair of
6915 16-bit instructions to get correct prefetch abort behavior. */
6917 if ((insn
& (1 << 12)) == 0) {
6918 /* Second half of blx. */
6919 offset
= ((insn
& 0x7ff) << 1);
6920 tmp
= load_reg(s
, 14);
6921 tcg_gen_addi_i32(tmp
, tmp
, offset
);
6922 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
6925 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
6926 store_reg(s
, 14, tmp2
);
6930 if (insn
& (1 << 11)) {
6931 /* Second half of bl. */
6932 offset
= ((insn
& 0x7ff) << 1) | 1;
6933 tmp
= load_reg(s
, 14);
6934 tcg_gen_addi_i32(tmp
, tmp
, offset
);
6937 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
6938 store_reg(s
, 14, tmp2
);
6942 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
6943 /* Instruction spans a page boundary. Implement it as two
6944 16-bit instructions in case the second half causes an
6946 offset
= ((int32_t)insn
<< 21) >> 9;
6947 gen_op_movl_T0_im(s
->pc
+ 2 + offset
);
6948 gen_movl_reg_T0(s
, 14);
6951 /* Fall through to 32-bit decode. */
6954 insn
= lduw_code(s
->pc
);
6956 insn
|= (uint32_t)insn_hw1
<< 16;
6958 if ((insn
& 0xf800e800) != 0xf000e800) {
6962 rn
= (insn
>> 16) & 0xf;
6963 rs
= (insn
>> 12) & 0xf;
6964 rd
= (insn
>> 8) & 0xf;
6966 switch ((insn
>> 25) & 0xf) {
6967 case 0: case 1: case 2: case 3:
6968 /* 16-bit instructions. Should never happen. */
6971 if (insn
& (1 << 22)) {
6972 /* Other load/store, table branch. */
6973 if (insn
& 0x01200000) {
6974 /* Load/store doubleword. */
6977 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
6979 addr
= load_reg(s
, rn
);
6981 offset
= (insn
& 0xff) * 4;
6982 if ((insn
& (1 << 23)) == 0)
6984 if (insn
& (1 << 24)) {
6985 tcg_gen_addi_i32(addr
, addr
, offset
);
6988 if (insn
& (1 << 20)) {
6990 tmp
= gen_ld32(addr
, IS_USER(s
));
6991 store_reg(s
, rs
, tmp
);
6992 tcg_gen_addi_i32(addr
, addr
, 4);
6993 tmp
= gen_ld32(addr
, IS_USER(s
));
6994 store_reg(s
, rd
, tmp
);
6997 tmp
= load_reg(s
, rs
);
6998 gen_st32(tmp
, addr
, IS_USER(s
));
6999 tcg_gen_addi_i32(addr
, addr
, 4);
7000 tmp
= load_reg(s
, rd
);
7001 gen_st32(tmp
, addr
, IS_USER(s
));
7003 if (insn
& (1 << 21)) {
7004 /* Base writeback. */
7007 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7008 store_reg(s
, rn
, addr
);
7012 } else if ((insn
& (1 << 23)) == 0) {
7013 /* Load/store exclusive word. */
7014 gen_movl_T1_reg(s
, rn
);
7016 if (insn
& (1 << 20)) {
7017 gen_helper_mark_exclusive(cpu_env
, cpu_T
[1]);
7018 tmp
= gen_ld32(addr
, IS_USER(s
));
7019 store_reg(s
, rd
, tmp
);
7021 int label
= gen_new_label();
7022 gen_helper_test_exclusive(cpu_T
[0], cpu_env
, addr
);
7023 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_T
[0],
7025 tmp
= load_reg(s
, rs
);
7026 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
7027 gen_set_label(label
);
7028 gen_movl_reg_T0(s
, rd
);
7030 } else if ((insn
& (1 << 6)) == 0) {
7034 tcg_gen_movi_i32(addr
, s
->pc
);
7036 addr
= load_reg(s
, rn
);
7038 tmp
= load_reg(s
, rm
);
7039 tcg_gen_add_i32(addr
, addr
, tmp
);
7040 if (insn
& (1 << 4)) {
7042 tcg_gen_add_i32(addr
, addr
, tmp
);
7044 tmp
= gen_ld16u(addr
, IS_USER(s
));
7047 tmp
= gen_ld8u(addr
, IS_USER(s
));
7050 tcg_gen_shli_i32(tmp
, tmp
, 1);
7051 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
7052 store_reg(s
, 15, tmp
);
7054 /* Load/store exclusive byte/halfword/doubleword. */
7055 /* ??? These are not really atomic. However we know
7056 we never have multiple CPUs running in parallel,
7057 so it is good enough. */
7058 op
= (insn
>> 4) & 0x3;
7059 /* Must use a global reg for the address because we have
7060 a conditional branch in the store instruction. */
7061 gen_movl_T1_reg(s
, rn
);
7063 if (insn
& (1 << 20)) {
7064 gen_helper_mark_exclusive(cpu_env
, addr
);
7067 tmp
= gen_ld8u(addr
, IS_USER(s
));
7070 tmp
= gen_ld16u(addr
, IS_USER(s
));
7073 tmp
= gen_ld32(addr
, IS_USER(s
));
7074 tcg_gen_addi_i32(addr
, addr
, 4);
7075 tmp2
= gen_ld32(addr
, IS_USER(s
));
7076 store_reg(s
, rd
, tmp2
);
7081 store_reg(s
, rs
, tmp
);
7083 int label
= gen_new_label();
7084 /* Must use a global that is not killed by the branch. */
7085 gen_helper_test_exclusive(cpu_T
[0], cpu_env
, addr
);
7086 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_T
[0], 0, label
);
7087 tmp
= load_reg(s
, rs
);
7090 gen_st8(tmp
, addr
, IS_USER(s
));
7093 gen_st16(tmp
, addr
, IS_USER(s
));
7096 gen_st32(tmp
, addr
, IS_USER(s
));
7097 tcg_gen_addi_i32(addr
, addr
, 4);
7098 tmp
= load_reg(s
, rd
);
7099 gen_st32(tmp
, addr
, IS_USER(s
));
7104 gen_set_label(label
);
7105 gen_movl_reg_T0(s
, rm
);
7109 /* Load/store multiple, RFE, SRS. */
7110 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
7111 /* Not available in user mode. */
7114 if (insn
& (1 << 20)) {
7116 addr
= load_reg(s
, rn
);
7117 if ((insn
& (1 << 24)) == 0)
7118 tcg_gen_addi_i32(addr
, addr
, -8);
7119 /* Load PC into tmp and CPSR into tmp2. */
7120 tmp
= gen_ld32(addr
, 0);
7121 tcg_gen_addi_i32(addr
, addr
, 4);
7122 tmp2
= gen_ld32(addr
, 0);
7123 if (insn
& (1 << 21)) {
7124 /* Base writeback. */
7125 if (insn
& (1 << 24)) {
7126 tcg_gen_addi_i32(addr
, addr
, 4);
7128 tcg_gen_addi_i32(addr
, addr
, -4);
7130 store_reg(s
, rn
, addr
);
7134 gen_rfe(s
, tmp
, tmp2
);
7138 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7139 addr
= load_reg(s
, 13);
7142 gen_helper_get_r13_banked(addr
, cpu_env
, tcg_const_i32(op
));
7144 if ((insn
& (1 << 24)) == 0) {
7145 tcg_gen_addi_i32(addr
, addr
, -8);
7147 tmp
= load_reg(s
, 14);
7148 gen_st32(tmp
, addr
, 0);
7149 tcg_gen_addi_i32(addr
, addr
, 4);
7151 gen_helper_cpsr_read(tmp
);
7152 gen_st32(tmp
, addr
, 0);
7153 if (insn
& (1 << 21)) {
7154 if ((insn
& (1 << 24)) == 0) {
7155 tcg_gen_addi_i32(addr
, addr
, -4);
7157 tcg_gen_addi_i32(addr
, addr
, 4);
7159 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7160 store_reg(s
, 13, addr
);
7162 gen_helper_set_r13_banked(cpu_env
,
7163 tcg_const_i32(op
), addr
);
7171 /* Load/store multiple. */
7172 addr
= load_reg(s
, rn
);
7174 for (i
= 0; i
< 16; i
++) {
7175 if (insn
& (1 << i
))
7178 if (insn
& (1 << 24)) {
7179 tcg_gen_addi_i32(addr
, addr
, -offset
);
7182 for (i
= 0; i
< 16; i
++) {
7183 if ((insn
& (1 << i
)) == 0)
7185 if (insn
& (1 << 20)) {
7187 tmp
= gen_ld32(addr
, IS_USER(s
));
7191 store_reg(s
, i
, tmp
);
7195 tmp
= load_reg(s
, i
);
7196 gen_st32(tmp
, addr
, IS_USER(s
));
7198 tcg_gen_addi_i32(addr
, addr
, 4);
7200 if (insn
& (1 << 21)) {
7201 /* Base register writeback. */
7202 if (insn
& (1 << 24)) {
7203 tcg_gen_addi_i32(addr
, addr
, -offset
);
7205 /* Fault if writeback register is in register list. */
7206 if (insn
& (1 << rn
))
7208 store_reg(s
, rn
, addr
);
7215 case 5: /* Data processing register constant shift. */
7217 gen_op_movl_T0_im(0);
7219 gen_movl_T0_reg(s
, rn
);
7220 gen_movl_T1_reg(s
, rm
);
7221 op
= (insn
>> 21) & 0xf;
7222 shiftop
= (insn
>> 4) & 3;
7223 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7224 conds
= (insn
& (1 << 20)) != 0;
7225 logic_cc
= (conds
&& thumb2_logic_op(op
));
7226 gen_arm_shift_im(cpu_T
[1], shiftop
, shift
, logic_cc
);
7227 if (gen_thumb2_data_op(s
, op
, conds
, 0))
7230 gen_movl_reg_T0(s
, rd
);
7232 case 13: /* Misc data processing. */
7233 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
7234 if (op
< 4 && (insn
& 0xf000) != 0xf000)
7237 case 0: /* Register controlled shift. */
7238 tmp
= load_reg(s
, rn
);
7239 tmp2
= load_reg(s
, rm
);
7240 if ((insn
& 0x70) != 0)
7242 op
= (insn
>> 21) & 3;
7243 logic_cc
= (insn
& (1 << 20)) != 0;
7244 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
7247 store_reg(s
, rd
, tmp
);
7249 case 1: /* Sign/zero extend. */
7250 tmp
= load_reg(s
, rm
);
7251 shift
= (insn
>> 4) & 3;
7252 /* ??? In many cases it's not neccessary to do a
7253 rotate, a shift is sufficient. */
7255 tcg_gen_rori_i32(tmp
, tmp
, shift
* 8);
7256 op
= (insn
>> 20) & 7;
7258 case 0: gen_sxth(tmp
); break;
7259 case 1: gen_uxth(tmp
); break;
7260 case 2: gen_sxtb16(tmp
); break;
7261 case 3: gen_uxtb16(tmp
); break;
7262 case 4: gen_sxtb(tmp
); break;
7263 case 5: gen_uxtb(tmp
); break;
7264 default: goto illegal_op
;
7267 tmp2
= load_reg(s
, rn
);
7268 if ((op
>> 1) == 1) {
7269 gen_add16(tmp
, tmp2
);
7271 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7275 store_reg(s
, rd
, tmp
);
7277 case 2: /* SIMD add/subtract. */
7278 op
= (insn
>> 20) & 7;
7279 shift
= (insn
>> 4) & 7;
7280 if ((op
& 3) == 3 || (shift
& 3) == 3)
7282 tmp
= load_reg(s
, rn
);
7283 tmp2
= load_reg(s
, rm
);
7284 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
7286 store_reg(s
, rd
, tmp
);
7288 case 3: /* Other data processing. */
7289 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
7291 /* Saturating add/subtract. */
7292 tmp
= load_reg(s
, rn
);
7293 tmp2
= load_reg(s
, rm
);
7295 gen_helper_double_saturate(tmp
, tmp
);
7297 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
7299 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
7302 tmp
= load_reg(s
, rn
);
7304 case 0x0a: /* rbit */
7305 gen_helper_rbit(tmp
, tmp
);
7307 case 0x08: /* rev */
7308 tcg_gen_bswap_i32(tmp
, tmp
);
7310 case 0x09: /* rev16 */
7313 case 0x0b: /* revsh */
7316 case 0x10: /* sel */
7317 tmp2
= load_reg(s
, rm
);
7319 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7320 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7324 case 0x18: /* clz */
7325 gen_helper_clz(tmp
, tmp
);
7331 store_reg(s
, rd
, tmp
);
7333 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7334 op
= (insn
>> 4) & 0xf;
7335 tmp
= load_reg(s
, rn
);
7336 tmp2
= load_reg(s
, rm
);
7337 switch ((insn
>> 20) & 7) {
7338 case 0: /* 32 x 32 -> 32 */
7339 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7342 tmp2
= load_reg(s
, rs
);
7344 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7346 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7350 case 1: /* 16 x 16 -> 32 */
7351 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7354 tmp2
= load_reg(s
, rs
);
7355 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7359 case 2: /* Dual multiply add. */
7360 case 4: /* Dual multiply subtract. */
7362 gen_swap_half(tmp2
);
7363 gen_smul_dual(tmp
, tmp2
);
7364 /* This addition cannot overflow. */
7365 if (insn
& (1 << 22)) {
7366 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7368 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7373 tmp2
= load_reg(s
, rs
);
7374 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7378 case 3: /* 32 * 16 -> 32msb */
7380 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7383 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7384 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7386 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7389 tmp2
= load_reg(s
, rs
);
7390 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7394 case 5: case 6: /* 32 * 32 -> 32msb */
7395 gen_imull(tmp
, tmp2
);
7396 if (insn
& (1 << 5)) {
7397 gen_roundqd(tmp
, tmp2
);
7404 tmp2
= load_reg(s
, rs
);
7405 if (insn
& (1 << 21)) {
7406 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7408 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7413 case 7: /* Unsigned sum of absolute differences. */
7414 gen_helper_usad8(tmp
, tmp
, tmp2
);
7417 tmp2
= load_reg(s
, rs
);
7418 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7423 store_reg(s
, rd
, tmp
);
7425 case 6: case 7: /* 64-bit multiply, Divide. */
7426 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
7427 tmp
= load_reg(s
, rn
);
7428 tmp2
= load_reg(s
, rm
);
7429 if ((op
& 0x50) == 0x10) {
7431 if (!arm_feature(env
, ARM_FEATURE_DIV
))
7434 gen_helper_udiv(tmp
, tmp
, tmp2
);
7436 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7438 store_reg(s
, rd
, tmp
);
7439 } else if ((op
& 0xe) == 0xc) {
7440 /* Dual multiply accumulate long. */
7442 gen_swap_half(tmp2
);
7443 gen_smul_dual(tmp
, tmp2
);
7445 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7447 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7451 tmp64
= tcg_temp_new_i64();
7452 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7454 gen_addq(s
, tmp64
, rs
, rd
);
7455 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7458 /* Unsigned 64-bit multiply */
7459 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7463 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7465 tmp64
= tcg_temp_new_i64();
7466 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7469 /* Signed 64-bit multiply */
7470 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7475 gen_addq_lo(s
, tmp64
, rs
);
7476 gen_addq_lo(s
, tmp64
, rd
);
7477 } else if (op
& 0x40) {
7478 /* 64-bit accumulate. */
7479 gen_addq(s
, tmp64
, rs
, rd
);
7481 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7486 case 6: case 7: case 14: case 15:
7488 if (((insn
>> 24) & 3) == 3) {
7489 /* Translate into the equivalent ARM encoding. */
7490 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4);
7491 if (disas_neon_data_insn(env
, s
, insn
))
7494 if (insn
& (1 << 28))
7496 if (disas_coproc_insn (env
, s
, insn
))
7500 case 8: case 9: case 10: case 11:
7501 if (insn
& (1 << 15)) {
7502 /* Branches, misc control. */
7503 if (insn
& 0x5000) {
7504 /* Unconditional branch. */
7505 /* signextend(hw1[10:0]) -> offset[:12]. */
7506 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
7507 /* hw1[10:0] -> offset[11:1]. */
7508 offset
|= (insn
& 0x7ff) << 1;
7509 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7510 offset[24:22] already have the same value because of the
7511 sign extension above. */
7512 offset
^= ((~insn
) & (1 << 13)) << 10;
7513 offset
^= ((~insn
) & (1 << 11)) << 11;
7515 if (insn
& (1 << 14)) {
7516 /* Branch and link. */
7517 gen_op_movl_T1_im(s
->pc
| 1);
7518 gen_movl_reg_T1(s
, 14);
7522 if (insn
& (1 << 12)) {
7527 offset
&= ~(uint32_t)2;
7528 gen_bx_im(s
, offset
);
7530 } else if (((insn
>> 23) & 7) == 7) {
7532 if (insn
& (1 << 13))
7535 if (insn
& (1 << 26)) {
7536 /* Secure monitor call (v6Z) */
7537 goto illegal_op
; /* not implemented. */
7539 op
= (insn
>> 20) & 7;
7541 case 0: /* msr cpsr. */
7543 tmp
= load_reg(s
, rn
);
7544 addr
= tcg_const_i32(insn
& 0xff);
7545 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
7550 case 1: /* msr spsr. */
7553 gen_movl_T0_reg(s
, rn
);
7554 if (gen_set_psr_T0(s
,
7555 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
7559 case 2: /* cps, nop-hint. */
7560 if (((insn
>> 8) & 7) == 0) {
7561 gen_nop_hint(s
, insn
& 0xff);
7563 /* Implemented as NOP in user mode. */
7568 if (insn
& (1 << 10)) {
7569 if (insn
& (1 << 7))
7571 if (insn
& (1 << 6))
7573 if (insn
& (1 << 5))
7575 if (insn
& (1 << 9))
7576 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
7578 if (insn
& (1 << 8)) {
7580 imm
|= (insn
& 0x1f);
7583 gen_op_movl_T0_im(imm
);
7584 gen_set_psr_T0(s
, offset
, 0);
7587 case 3: /* Special control operations. */
7588 op
= (insn
>> 4) & 0xf;
7591 gen_helper_clrex(cpu_env
);
7596 /* These execute as NOPs. */
7604 /* Trivial implementation equivalent to bx. */
7605 tmp
= load_reg(s
, rn
);
7608 case 5: /* Exception return. */
7609 /* Unpredictable in user mode. */
7611 case 6: /* mrs cpsr. */
7614 addr
= tcg_const_i32(insn
& 0xff);
7615 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
7617 gen_helper_cpsr_read(tmp
);
7619 store_reg(s
, rd
, tmp
);
7621 case 7: /* mrs spsr. */
7622 /* Not accessible in user mode. */
7623 if (IS_USER(s
) || IS_M(env
))
7625 tmp
= load_cpu_field(spsr
);
7626 store_reg(s
, rd
, tmp
);
7631 /* Conditional branch. */
7632 op
= (insn
>> 22) & 0xf;
7633 /* Generate a conditional jump to next instruction. */
7634 s
->condlabel
= gen_new_label();
7635 gen_test_cc(op
^ 1, s
->condlabel
);
7638 /* offset[11:1] = insn[10:0] */
7639 offset
= (insn
& 0x7ff) << 1;
7640 /* offset[17:12] = insn[21:16]. */
7641 offset
|= (insn
& 0x003f0000) >> 4;
7642 /* offset[31:20] = insn[26]. */
7643 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
7644 /* offset[18] = insn[13]. */
7645 offset
|= (insn
& (1 << 13)) << 5;
7646 /* offset[19] = insn[11]. */
7647 offset
|= (insn
& (1 << 11)) << 8;
7649 /* jump to the offset */
7650 gen_jmp(s
, s
->pc
+ offset
);
7653 /* Data processing immediate. */
7654 if (insn
& (1 << 25)) {
7655 if (insn
& (1 << 24)) {
7656 if (insn
& (1 << 20))
7658 /* Bitfield/Saturate. */
7659 op
= (insn
>> 21) & 7;
7661 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7664 tcg_gen_movi_i32(tmp
, 0);
7666 tmp
= load_reg(s
, rn
);
7669 case 2: /* Signed bitfield extract. */
7671 if (shift
+ imm
> 32)
7674 gen_sbfx(tmp
, shift
, imm
);
7676 case 6: /* Unsigned bitfield extract. */
7678 if (shift
+ imm
> 32)
7681 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
7683 case 3: /* Bitfield insert/clear. */
7686 imm
= imm
+ 1 - shift
;
7688 tmp2
= load_reg(s
, rd
);
7689 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
7695 default: /* Saturate. */
7698 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7700 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7702 tmp2
= tcg_const_i32(imm
);
7705 if ((op
& 1) && shift
== 0)
7706 gen_helper_usat16(tmp
, tmp
, tmp2
);
7708 gen_helper_usat(tmp
, tmp
, tmp2
);
7711 if ((op
& 1) && shift
== 0)
7712 gen_helper_ssat16(tmp
, tmp
, tmp2
);
7714 gen_helper_ssat(tmp
, tmp
, tmp2
);
7718 store_reg(s
, rd
, tmp
);
7720 imm
= ((insn
& 0x04000000) >> 15)
7721 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
7722 if (insn
& (1 << 22)) {
7723 /* 16-bit immediate. */
7724 imm
|= (insn
>> 4) & 0xf000;
7725 if (insn
& (1 << 23)) {
7727 tmp
= load_reg(s
, rd
);
7728 tcg_gen_ext16u_i32(tmp
, tmp
);
7729 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
7733 tcg_gen_movi_i32(tmp
, imm
);
7736 /* Add/sub 12-bit immediate. */
7738 offset
= s
->pc
& ~(uint32_t)3;
7739 if (insn
& (1 << 23))
7744 tcg_gen_movi_i32(tmp
, offset
);
7746 tmp
= load_reg(s
, rn
);
7747 if (insn
& (1 << 23))
7748 tcg_gen_subi_i32(tmp
, tmp
, imm
);
7750 tcg_gen_addi_i32(tmp
, tmp
, imm
);
7753 store_reg(s
, rd
, tmp
);
7756 int shifter_out
= 0;
7757 /* modified 12-bit immediate. */
7758 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
7759 imm
= (insn
& 0xff);
7762 /* Nothing to do. */
7764 case 1: /* 00XY00XY */
7767 case 2: /* XY00XY00 */
7771 case 3: /* XYXYXYXY */
7775 default: /* Rotated constant. */
7776 shift
= (shift
<< 1) | (imm
>> 7);
7778 imm
= imm
<< (32 - shift
);
7782 gen_op_movl_T1_im(imm
);
7783 rn
= (insn
>> 16) & 0xf;
7785 gen_op_movl_T0_im(0);
7787 gen_movl_T0_reg(s
, rn
);
7788 op
= (insn
>> 21) & 0xf;
7789 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
7792 rd
= (insn
>> 8) & 0xf;
7794 gen_movl_reg_T0(s
, rd
);
7799 case 12: /* Load/store single data item. */
7804 if ((insn
& 0x01100000) == 0x01000000) {
7805 if (disas_neon_ls_insn(env
, s
, insn
))
7813 /* s->pc has already been incremented by 4. */
7814 imm
= s
->pc
& 0xfffffffc;
7815 if (insn
& (1 << 23))
7816 imm
+= insn
& 0xfff;
7818 imm
-= insn
& 0xfff;
7819 tcg_gen_movi_i32(addr
, imm
);
7821 addr
= load_reg(s
, rn
);
7822 if (insn
& (1 << 23)) {
7823 /* Positive offset. */
7825 tcg_gen_addi_i32(addr
, addr
, imm
);
7827 op
= (insn
>> 8) & 7;
7830 case 0: case 8: /* Shifted Register. */
7831 shift
= (insn
>> 4) & 0xf;
7834 tmp
= load_reg(s
, rm
);
7836 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7837 tcg_gen_add_i32(addr
, addr
, tmp
);
7840 case 4: /* Negative offset. */
7841 tcg_gen_addi_i32(addr
, addr
, -imm
);
7843 case 6: /* User privilege. */
7844 tcg_gen_addi_i32(addr
, addr
, imm
);
7847 case 1: /* Post-decrement. */
7850 case 3: /* Post-increment. */
7854 case 5: /* Pre-decrement. */
7857 case 7: /* Pre-increment. */
7858 tcg_gen_addi_i32(addr
, addr
, imm
);
7866 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
7867 if (insn
& (1 << 20)) {
7869 if (rs
== 15 && op
!= 2) {
7872 /* Memory hint. Implemented as NOP. */
7875 case 0: tmp
= gen_ld8u(addr
, user
); break;
7876 case 4: tmp
= gen_ld8s(addr
, user
); break;
7877 case 1: tmp
= gen_ld16u(addr
, user
); break;
7878 case 5: tmp
= gen_ld16s(addr
, user
); break;
7879 case 2: tmp
= gen_ld32(addr
, user
); break;
7880 default: goto illegal_op
;
7885 store_reg(s
, rs
, tmp
);
7892 tmp
= load_reg(s
, rs
);
7894 case 0: gen_st8(tmp
, addr
, user
); break;
7895 case 1: gen_st16(tmp
, addr
, user
); break;
7896 case 2: gen_st32(tmp
, addr
, user
); break;
7897 default: goto illegal_op
;
7901 tcg_gen_addi_i32(addr
, addr
, imm
);
7903 store_reg(s
, rn
, addr
);
7917 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
7919 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
7926 if (s
->condexec_mask
) {
7927 cond
= s
->condexec_cond
;
7928 s
->condlabel
= gen_new_label();
7929 gen_test_cc(cond
^ 1, s
->condlabel
);
7933 insn
= lduw_code(s
->pc
);
7936 switch (insn
>> 12) {
7939 op
= (insn
>> 11) & 3;
7942 rn
= (insn
>> 3) & 7;
7943 gen_movl_T0_reg(s
, rn
);
7944 if (insn
& (1 << 10)) {
7946 gen_op_movl_T1_im((insn
>> 6) & 7);
7949 rm
= (insn
>> 6) & 7;
7950 gen_movl_T1_reg(s
, rm
);
7952 if (insn
& (1 << 9)) {
7953 if (s
->condexec_mask
)
7954 gen_op_subl_T0_T1();
7956 gen_op_subl_T0_T1_cc();
7958 if (s
->condexec_mask
)
7959 gen_op_addl_T0_T1();
7961 gen_op_addl_T0_T1_cc();
7963 gen_movl_reg_T0(s
, rd
);
7965 /* shift immediate */
7966 rm
= (insn
>> 3) & 7;
7967 shift
= (insn
>> 6) & 0x1f;
7968 tmp
= load_reg(s
, rm
);
7969 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
7970 if (!s
->condexec_mask
)
7972 store_reg(s
, rd
, tmp
);
7976 /* arithmetic large immediate */
7977 op
= (insn
>> 11) & 3;
7978 rd
= (insn
>> 8) & 0x7;
7980 gen_op_movl_T0_im(insn
& 0xff);
7982 gen_movl_T0_reg(s
, rd
);
7983 gen_op_movl_T1_im(insn
& 0xff);
7987 if (!s
->condexec_mask
)
7988 gen_op_logic_T0_cc();
7991 gen_op_subl_T0_T1_cc();
7994 if (s
->condexec_mask
)
7995 gen_op_addl_T0_T1();
7997 gen_op_addl_T0_T1_cc();
8000 if (s
->condexec_mask
)
8001 gen_op_subl_T0_T1();
8003 gen_op_subl_T0_T1_cc();
8007 gen_movl_reg_T0(s
, rd
);
8010 if (insn
& (1 << 11)) {
8011 rd
= (insn
>> 8) & 7;
8012 /* load pc-relative. Bit 1 of PC is ignored. */
8013 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
8014 val
&= ~(uint32_t)2;
8016 tcg_gen_movi_i32(addr
, val
);
8017 tmp
= gen_ld32(addr
, IS_USER(s
));
8019 store_reg(s
, rd
, tmp
);
8022 if (insn
& (1 << 10)) {
8023 /* data processing extended or blx */
8024 rd
= (insn
& 7) | ((insn
>> 4) & 8);
8025 rm
= (insn
>> 3) & 0xf;
8026 op
= (insn
>> 8) & 3;
8029 gen_movl_T0_reg(s
, rd
);
8030 gen_movl_T1_reg(s
, rm
);
8031 gen_op_addl_T0_T1();
8032 gen_movl_reg_T0(s
, rd
);
8035 gen_movl_T0_reg(s
, rd
);
8036 gen_movl_T1_reg(s
, rm
);
8037 gen_op_subl_T0_T1_cc();
8039 case 2: /* mov/cpy */
8040 gen_movl_T0_reg(s
, rm
);
8041 gen_movl_reg_T0(s
, rd
);
8043 case 3:/* branch [and link] exchange thumb register */
8044 tmp
= load_reg(s
, rm
);
8045 if (insn
& (1 << 7)) {
8046 val
= (uint32_t)s
->pc
| 1;
8048 tcg_gen_movi_i32(tmp2
, val
);
8049 store_reg(s
, 14, tmp2
);
8057 /* data processing register */
8059 rm
= (insn
>> 3) & 7;
8060 op
= (insn
>> 6) & 0xf;
8061 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
8062 /* the shift/rotate ops want the operands backwards */
8071 if (op
== 9) /* neg */
8072 gen_op_movl_T0_im(0);
8073 else if (op
!= 0xf) /* mvn doesn't read its first operand */
8074 gen_movl_T0_reg(s
, rd
);
8076 gen_movl_T1_reg(s
, rm
);
8079 gen_op_andl_T0_T1();
8080 if (!s
->condexec_mask
)
8081 gen_op_logic_T0_cc();
8084 gen_op_xorl_T0_T1();
8085 if (!s
->condexec_mask
)
8086 gen_op_logic_T0_cc();
8089 if (s
->condexec_mask
) {
8090 gen_helper_shl(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8092 gen_helper_shl_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8093 gen_op_logic_T1_cc();
8097 if (s
->condexec_mask
) {
8098 gen_helper_shr(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8100 gen_helper_shr_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8101 gen_op_logic_T1_cc();
8105 if (s
->condexec_mask
) {
8106 gen_helper_sar(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8108 gen_helper_sar_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8109 gen_op_logic_T1_cc();
8113 if (s
->condexec_mask
)
8116 gen_op_adcl_T0_T1_cc();
8119 if (s
->condexec_mask
)
8122 gen_op_sbcl_T0_T1_cc();
8125 if (s
->condexec_mask
) {
8126 gen_helper_ror(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8128 gen_helper_ror_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8129 gen_op_logic_T1_cc();
8133 gen_op_andl_T0_T1();
8134 gen_op_logic_T0_cc();
8138 if (s
->condexec_mask
)
8139 tcg_gen_neg_i32(cpu_T
[0], cpu_T
[1]);
8141 gen_op_subl_T0_T1_cc();
8144 gen_op_subl_T0_T1_cc();
8148 gen_op_addl_T0_T1_cc();
8153 if (!s
->condexec_mask
)
8154 gen_op_logic_T0_cc();
8157 gen_op_mull_T0_T1();
8158 if (!s
->condexec_mask
)
8159 gen_op_logic_T0_cc();
8162 gen_op_bicl_T0_T1();
8163 if (!s
->condexec_mask
)
8164 gen_op_logic_T0_cc();
8168 if (!s
->condexec_mask
)
8169 gen_op_logic_T1_cc();
8176 gen_movl_reg_T1(s
, rm
);
8178 gen_movl_reg_T0(s
, rd
);
8183 /* load/store register offset. */
8185 rn
= (insn
>> 3) & 7;
8186 rm
= (insn
>> 6) & 7;
8187 op
= (insn
>> 9) & 7;
8188 addr
= load_reg(s
, rn
);
8189 tmp
= load_reg(s
, rm
);
8190 tcg_gen_add_i32(addr
, addr
, tmp
);
8193 if (op
< 3) /* store */
8194 tmp
= load_reg(s
, rd
);
8198 gen_st32(tmp
, addr
, IS_USER(s
));
8201 gen_st16(tmp
, addr
, IS_USER(s
));
8204 gen_st8(tmp
, addr
, IS_USER(s
));
8207 tmp
= gen_ld8s(addr
, IS_USER(s
));
8210 tmp
= gen_ld32(addr
, IS_USER(s
));
8213 tmp
= gen_ld16u(addr
, IS_USER(s
));
8216 tmp
= gen_ld8u(addr
, IS_USER(s
));
8219 tmp
= gen_ld16s(addr
, IS_USER(s
));
8222 if (op
>= 3) /* load */
8223 store_reg(s
, rd
, tmp
);
8228 /* load/store word immediate offset */
8230 rn
= (insn
>> 3) & 7;
8231 addr
= load_reg(s
, rn
);
8232 val
= (insn
>> 4) & 0x7c;
8233 tcg_gen_addi_i32(addr
, addr
, val
);
8235 if (insn
& (1 << 11)) {
8237 tmp
= gen_ld32(addr
, IS_USER(s
));
8238 store_reg(s
, rd
, tmp
);
8241 tmp
= load_reg(s
, rd
);
8242 gen_st32(tmp
, addr
, IS_USER(s
));
8248 /* load/store byte immediate offset */
8250 rn
= (insn
>> 3) & 7;
8251 addr
= load_reg(s
, rn
);
8252 val
= (insn
>> 6) & 0x1f;
8253 tcg_gen_addi_i32(addr
, addr
, val
);
8255 if (insn
& (1 << 11)) {
8257 tmp
= gen_ld8u(addr
, IS_USER(s
));
8258 store_reg(s
, rd
, tmp
);
8261 tmp
= load_reg(s
, rd
);
8262 gen_st8(tmp
, addr
, IS_USER(s
));
8268 /* load/store halfword immediate offset */
8270 rn
= (insn
>> 3) & 7;
8271 addr
= load_reg(s
, rn
);
8272 val
= (insn
>> 5) & 0x3e;
8273 tcg_gen_addi_i32(addr
, addr
, val
);
8275 if (insn
& (1 << 11)) {
8277 tmp
= gen_ld16u(addr
, IS_USER(s
));
8278 store_reg(s
, rd
, tmp
);
8281 tmp
= load_reg(s
, rd
);
8282 gen_st16(tmp
, addr
, IS_USER(s
));
8288 /* load/store from stack */
8289 rd
= (insn
>> 8) & 7;
8290 addr
= load_reg(s
, 13);
8291 val
= (insn
& 0xff) * 4;
8292 tcg_gen_addi_i32(addr
, addr
, val
);
8294 if (insn
& (1 << 11)) {
8296 tmp
= gen_ld32(addr
, IS_USER(s
));
8297 store_reg(s
, rd
, tmp
);
8300 tmp
= load_reg(s
, rd
);
8301 gen_st32(tmp
, addr
, IS_USER(s
));
8307 /* add to high reg */
8308 rd
= (insn
>> 8) & 7;
8309 if (insn
& (1 << 11)) {
8311 tmp
= load_reg(s
, 13);
8313 /* PC. bit 1 is ignored. */
8315 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
8317 val
= (insn
& 0xff) * 4;
8318 tcg_gen_addi_i32(tmp
, tmp
, val
);
8319 store_reg(s
, rd
, tmp
);
8324 op
= (insn
>> 8) & 0xf;
8327 /* adjust stack pointer */
8328 tmp
= load_reg(s
, 13);
8329 val
= (insn
& 0x7f) * 4;
8330 if (insn
& (1 << 7))
8331 val
= -(int32_t)val
;
8332 tcg_gen_addi_i32(tmp
, tmp
, val
);
8333 store_reg(s
, 13, tmp
);
8336 case 2: /* sign/zero extend. */
8339 rm
= (insn
>> 3) & 7;
8340 tmp
= load_reg(s
, rm
);
8341 switch ((insn
>> 6) & 3) {
8342 case 0: gen_sxth(tmp
); break;
8343 case 1: gen_sxtb(tmp
); break;
8344 case 2: gen_uxth(tmp
); break;
8345 case 3: gen_uxtb(tmp
); break;
8347 store_reg(s
, rd
, tmp
);
8349 case 4: case 5: case 0xc: case 0xd:
8351 addr
= load_reg(s
, 13);
8352 if (insn
& (1 << 8))
8356 for (i
= 0; i
< 8; i
++) {
8357 if (insn
& (1 << i
))
8360 if ((insn
& (1 << 11)) == 0) {
8361 tcg_gen_addi_i32(addr
, addr
, -offset
);
8363 for (i
= 0; i
< 8; i
++) {
8364 if (insn
& (1 << i
)) {
8365 if (insn
& (1 << 11)) {
8367 tmp
= gen_ld32(addr
, IS_USER(s
));
8368 store_reg(s
, i
, tmp
);
8371 tmp
= load_reg(s
, i
);
8372 gen_st32(tmp
, addr
, IS_USER(s
));
8374 /* advance to the next address. */
8375 tcg_gen_addi_i32(addr
, addr
, 4);
8379 if (insn
& (1 << 8)) {
8380 if (insn
& (1 << 11)) {
8382 tmp
= gen_ld32(addr
, IS_USER(s
));
8383 /* don't set the pc until the rest of the instruction
8387 tmp
= load_reg(s
, 14);
8388 gen_st32(tmp
, addr
, IS_USER(s
));
8390 tcg_gen_addi_i32(addr
, addr
, 4);
8392 if ((insn
& (1 << 11)) == 0) {
8393 tcg_gen_addi_i32(addr
, addr
, -offset
);
8395 /* write back the new stack pointer */
8396 store_reg(s
, 13, addr
);
8397 /* set the new PC value */
8398 if ((insn
& 0x0900) == 0x0900)
8402 case 1: case 3: case 9: case 11: /* czb */
8404 tmp
= load_reg(s
, rm
);
8405 s
->condlabel
= gen_new_label();
8407 if (insn
& (1 << 11))
8408 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
8410 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
8412 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
8413 val
= (uint32_t)s
->pc
+ 2;
8418 case 15: /* IT, nop-hint. */
8419 if ((insn
& 0xf) == 0) {
8420 gen_nop_hint(s
, (insn
>> 4) & 0xf);
8424 s
->condexec_cond
= (insn
>> 4) & 0xe;
8425 s
->condexec_mask
= insn
& 0x1f;
8426 /* No actual code generated for this insn, just setup state. */
8429 case 0xe: /* bkpt */
8430 gen_set_condexec(s
);
8431 gen_set_pc_im(s
->pc
- 2);
8432 gen_exception(EXCP_BKPT
);
8433 s
->is_jmp
= DISAS_JUMP
;
8438 rn
= (insn
>> 3) & 0x7;
8440 tmp
= load_reg(s
, rn
);
8441 switch ((insn
>> 6) & 3) {
8442 case 0: tcg_gen_bswap_i32(tmp
, tmp
); break;
8443 case 1: gen_rev16(tmp
); break;
8444 case 3: gen_revsh(tmp
); break;
8445 default: goto illegal_op
;
8447 store_reg(s
, rd
, tmp
);
8455 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
8458 addr
= tcg_const_i32(16);
8459 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8463 addr
= tcg_const_i32(17);
8464 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8468 if (insn
& (1 << 4))
8469 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
8473 val
= ((insn
& 7) << 6) & shift
;
8474 gen_op_movl_T0_im(val
);
8475 gen_set_psr_T0(s
, shift
, 0);
8485 /* load/store multiple */
8486 rn
= (insn
>> 8) & 0x7;
8487 addr
= load_reg(s
, rn
);
8488 for (i
= 0; i
< 8; i
++) {
8489 if (insn
& (1 << i
)) {
8490 if (insn
& (1 << 11)) {
8492 tmp
= gen_ld32(addr
, IS_USER(s
));
8493 store_reg(s
, i
, tmp
);
8496 tmp
= load_reg(s
, i
);
8497 gen_st32(tmp
, addr
, IS_USER(s
));
8499 /* advance to the next address */
8500 tcg_gen_addi_i32(addr
, addr
, 4);
8503 /* Base register writeback. */
8504 if ((insn
& (1 << rn
)) == 0) {
8505 store_reg(s
, rn
, addr
);
8512 /* conditional branch or swi */
8513 cond
= (insn
>> 8) & 0xf;
8519 gen_set_condexec(s
);
8520 gen_set_pc_im(s
->pc
);
8521 s
->is_jmp
= DISAS_SWI
;
8524 /* generate a conditional jump to next instruction */
8525 s
->condlabel
= gen_new_label();
8526 gen_test_cc(cond
^ 1, s
->condlabel
);
8528 gen_movl_T1_reg(s
, 15);
8530 /* jump to the offset */
8531 val
= (uint32_t)s
->pc
+ 2;
8532 offset
= ((int32_t)insn
<< 24) >> 24;
8538 if (insn
& (1 << 11)) {
8539 if (disas_thumb2_insn(env
, s
, insn
))
8543 /* unconditional branch */
8544 val
= (uint32_t)s
->pc
;
8545 offset
= ((int32_t)insn
<< 21) >> 21;
8546 val
+= (offset
<< 1) + 2;
8551 if (disas_thumb2_insn(env
, s
, insn
))
8557 gen_set_condexec(s
);
8558 gen_set_pc_im(s
->pc
- 4);
8559 gen_exception(EXCP_UDEF
);
8560 s
->is_jmp
= DISAS_JUMP
;
8564 gen_set_condexec(s
);
8565 gen_set_pc_im(s
->pc
- 2);
8566 gen_exception(EXCP_UDEF
);
8567 s
->is_jmp
= DISAS_JUMP
;
8570 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8571 basic block 'tb'. If search_pc is TRUE, also generate PC
8572 information for each intermediate instruction. */
8573 static inline void gen_intermediate_code_internal(CPUState
*env
,
8574 TranslationBlock
*tb
,
8577 DisasContext dc1
, *dc
= &dc1
;
8579 uint16_t *gen_opc_end
;
8581 target_ulong pc_start
;
8582 uint32_t next_page_start
;
8586 /* generate intermediate code */
8588 memset(temps
, 0, sizeof(temps
));
8594 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
8596 dc
->is_jmp
= DISAS_NEXT
;
8598 dc
->singlestep_enabled
= env
->singlestep_enabled
;
8600 dc
->thumb
= env
->thumb
;
8601 dc
->condexec_mask
= (env
->condexec_bits
& 0xf) << 1;
8602 dc
->condexec_cond
= env
->condexec_bits
>> 4;
8603 #if !defined(CONFIG_USER_ONLY)
8605 dc
->user
= ((env
->v7m
.exception
== 0) && (env
->v7m
.control
& 1));
8607 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
8610 cpu_F0s
= tcg_temp_new_i32();
8611 cpu_F1s
= tcg_temp_new_i32();
8612 cpu_F0d
= tcg_temp_new_i64();
8613 cpu_F1d
= tcg_temp_new_i64();
8616 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8617 cpu_M0
= tcg_temp_new_i64();
8618 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
8621 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8623 max_insns
= CF_COUNT_MASK
;
8626 /* Reset the conditional execution bits immediately. This avoids
8627 complications trying to do it at the end of the block. */
8628 if (env
->condexec_bits
)
8630 TCGv tmp
= new_tmp();
8631 tcg_gen_movi_i32(tmp
, 0);
8632 store_cpu_field(tmp
, condexec_bits
);
8635 #ifdef CONFIG_USER_ONLY
8636 /* Intercept jump to the magic kernel page. */
8637 if (dc
->pc
>= 0xffff0000) {
8638 /* We always get here via a jump, so know we are not in a
8639 conditional execution block. */
8640 gen_exception(EXCP_KERNEL_TRAP
);
8641 dc
->is_jmp
= DISAS_UPDATE
;
8645 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
8646 /* We always get here via a jump, so know we are not in a
8647 conditional execution block. */
8648 gen_exception(EXCP_EXCEPTION_EXIT
);
8649 dc
->is_jmp
= DISAS_UPDATE
;
8654 if (unlikely(!TAILQ_EMPTY(&env
->breakpoints
))) {
8655 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
8656 if (bp
->pc
== dc
->pc
) {
8657 gen_set_condexec(dc
);
8658 gen_set_pc_im(dc
->pc
);
8659 gen_exception(EXCP_DEBUG
);
8660 dc
->is_jmp
= DISAS_JUMP
;
8661 /* Advance PC so that clearing the breakpoint will
8662 invalidate this TB. */
8664 goto done_generating
;
8670 j
= gen_opc_ptr
- gen_opc_buf
;
8674 gen_opc_instr_start
[lj
++] = 0;
8676 gen_opc_pc
[lj
] = dc
->pc
;
8677 gen_opc_instr_start
[lj
] = 1;
8678 gen_opc_icount
[lj
] = num_insns
;
8681 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
8685 disas_thumb_insn(env
, dc
);
8686 if (dc
->condexec_mask
) {
8687 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
8688 | ((dc
->condexec_mask
>> 4) & 1);
8689 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
8690 if (dc
->condexec_mask
== 0) {
8691 dc
->condexec_cond
= 0;
8695 disas_arm_insn(env
, dc
);
8698 fprintf(stderr
, "Internal resource leak before %08x\n", dc
->pc
);
8702 if (dc
->condjmp
&& !dc
->is_jmp
) {
8703 gen_set_label(dc
->condlabel
);
8706 /* Translation stops when a conditional branch is encountered.
8707 * Otherwise the subsequent code could get translated several times.
8708 * Also stop translation when a page boundary is reached. This
8709 * ensures prefetch aborts occur at the right place. */
8711 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
8712 !env
->singlestep_enabled
&&
8713 dc
->pc
< next_page_start
&&
8714 num_insns
< max_insns
);
8716 if (tb
->cflags
& CF_LAST_IO
) {
8718 /* FIXME: This can theoretically happen with self-modifying
8720 cpu_abort(env
, "IO on conditional branch instruction");
8725 /* At this stage dc->condjmp will only be set when the skipped
8726 instruction was a conditional branch or trap, and the PC has
8727 already been written. */
8728 if (unlikely(env
->singlestep_enabled
)) {
8729 /* Make sure the pc is updated, and raise a debug exception. */
8731 gen_set_condexec(dc
);
8732 if (dc
->is_jmp
== DISAS_SWI
) {
8733 gen_exception(EXCP_SWI
);
8735 gen_exception(EXCP_DEBUG
);
8737 gen_set_label(dc
->condlabel
);
8739 if (dc
->condjmp
|| !dc
->is_jmp
) {
8740 gen_set_pc_im(dc
->pc
);
8743 gen_set_condexec(dc
);
8744 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
8745 gen_exception(EXCP_SWI
);
8747 /* FIXME: Single stepping a WFI insn will not halt
8749 gen_exception(EXCP_DEBUG
);
8752 /* While branches must always occur at the end of an IT block,
8753 there are a few other things that can cause us to terminate
8754 the TB in the middel of an IT block:
8755 - Exception generating instructions (bkpt, swi, undefined).
8757 - Hardware watchpoints.
8758 Hardware breakpoints have already been handled and skip this code.
8760 gen_set_condexec(dc
);
8761 switch(dc
->is_jmp
) {
8763 gen_goto_tb(dc
, 1, dc
->pc
);
8768 /* indicate that the hash table must be used to find the next TB */
8772 /* nothing more to generate */
8778 gen_exception(EXCP_SWI
);
8782 gen_set_label(dc
->condlabel
);
8783 gen_set_condexec(dc
);
8784 gen_goto_tb(dc
, 1, dc
->pc
);
8790 gen_icount_end(tb
, num_insns
);
8791 *gen_opc_ptr
= INDEX_op_end
;
8794 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
8795 fprintf(logfile
, "----------------\n");
8796 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc_start
));
8797 target_disas(logfile
, pc_start
, dc
->pc
- pc_start
, env
->thumb
);
8798 fprintf(logfile
, "\n");
8802 j
= gen_opc_ptr
- gen_opc_buf
;
8805 gen_opc_instr_start
[lj
++] = 0;
8807 tb
->size
= dc
->pc
- pc_start
;
8808 tb
->icount
= num_insns
;
8812 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
8814 gen_intermediate_code_internal(env
, tb
, 0);
8817 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
8819 gen_intermediate_code_internal(env
, tb
, 1);
8822 static const char *cpu_mode_names
[16] = {
8823 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8824 "???", "???", "???", "und", "???", "???", "???", "sys"
8827 void cpu_dump_state(CPUState
*env
, FILE *f
,
8828 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
8838 /* ??? This assumes float64 and double have the same layout.
8839 Oh well, it's only debug dumps. */
8848 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
8850 cpu_fprintf(f
, "\n");
8852 cpu_fprintf(f
, " ");
8854 psr
= cpsr_read(env
);
8855 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
8857 psr
& (1 << 31) ? 'N' : '-',
8858 psr
& (1 << 30) ? 'Z' : '-',
8859 psr
& (1 << 29) ? 'C' : '-',
8860 psr
& (1 << 28) ? 'V' : '-',
8861 psr
& CPSR_T
? 'T' : 'A',
8862 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
8865 for (i
= 0; i
< 16; i
++) {
8866 d
.d
= env
->vfp
.regs
[i
];
8870 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
8871 i
* 2, (int)s0
.i
, s0
.s
,
8872 i
* 2 + 1, (int)s1
.i
, s1
.s
,
8873 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
8876 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
8880 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
8881 unsigned long searched_pc
, int pc_pos
, void *puc
)
8883 env
->regs
[15] = gen_opc_pc
[pc_pos
];