4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
38 #define DYNAMIC_PC 1 /* dynamic pc value */
39 #define JUMP_PC 2 /* dynamic pc value which takes only two values
40 according to jump_pc[T2] */
42 /* global register indexes */
43 static TCGv_env cpu_env
;
44 static TCGv_ptr cpu_regwptr
;
45 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
46 static TCGv_i32 cpu_cc_op
;
47 static TCGv_i32 cpu_psr
;
48 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
49 static TCGv cpu_regs
[32];
51 #ifndef CONFIG_USER_ONLY
56 static TCGv_i32 cpu_xcc
, cpu_fprs
;
58 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
59 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
63 /* Floating point registers */
64 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
66 #include "exec/gen-icount.h"
68 typedef struct DisasContext
{
69 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
70 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
71 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
75 int address_mask_32bit
;
77 uint32_t cc_op
; /* current CC operation */
78 struct TranslationBlock
*tb
;
96 // This function uses non-native bit order
97 #define GET_FIELD(X, FROM, TO) \
98 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
100 // This function uses the order in the manuals, i.e. bit 0 is 2^0
101 #define GET_FIELD_SP(X, FROM, TO) \
102 GET_FIELD(X, 31 - (TO), 31 - (FROM))
104 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
105 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
107 #ifdef TARGET_SPARC64
108 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
109 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
111 #define DFPREG(r) (r & 0x1e)
112 #define QFPREG(r) (r & 0x1c)
115 #define UA2005_HTRAP_MASK 0xff
116 #define V8_TRAP_MASK 0x7f
118 static int sign_extend(int x
, int len
)
121 return (x
<< len
) >> len
;
124 #define IS_IMM (insn & (1<<13))
126 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
129 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
130 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
134 static inline TCGv
get_temp_tl(DisasContext
*dc
)
137 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
138 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
142 static inline void gen_update_fprs_dirty(int rd
)
144 #if defined(TARGET_SPARC64)
145 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, (rd
< 32) ? 1 : 2);
149 /* floating point registers moves */
150 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
152 #if TCG_TARGET_REG_BITS == 32
154 return TCGV_LOW(cpu_fpr
[src
/ 2]);
156 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
160 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr
[src
/ 2]));
162 TCGv_i32 ret
= get_temp_i32(dc
);
163 TCGv_i64 t
= tcg_temp_new_i64();
165 tcg_gen_shri_i64(t
, cpu_fpr
[src
/ 2], 32);
166 tcg_gen_extrl_i64_i32(ret
, t
);
167 tcg_temp_free_i64(t
);
174 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
176 #if TCG_TARGET_REG_BITS == 32
178 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
180 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
183 TCGv_i64 t
= MAKE_TCGV_I64(GET_TCGV_I32(v
));
184 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
185 (dst
& 1 ? 0 : 32), 32);
187 gen_update_fprs_dirty(dst
);
190 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
192 return get_temp_i32(dc
);
195 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
198 return cpu_fpr
[src
/ 2];
201 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
204 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
205 gen_update_fprs_dirty(dst
);
208 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
210 return cpu_fpr
[DFPREG(dst
) / 2];
213 static void gen_op_load_fpr_QT0(unsigned int src
)
215 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
216 offsetof(CPU_QuadU
, ll
.upper
));
217 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
218 offsetof(CPU_QuadU
, ll
.lower
));
221 static void gen_op_load_fpr_QT1(unsigned int src
)
223 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
224 offsetof(CPU_QuadU
, ll
.upper
));
225 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
226 offsetof(CPU_QuadU
, ll
.lower
));
229 static void gen_op_store_QT0_fpr(unsigned int dst
)
231 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
232 offsetof(CPU_QuadU
, ll
.upper
));
233 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
234 offsetof(CPU_QuadU
, ll
.lower
));
237 #ifdef TARGET_SPARC64
238 static void gen_move_Q(unsigned int rd
, unsigned int rs
)
243 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
244 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
245 gen_update_fprs_dirty(rd
);
250 #ifdef CONFIG_USER_ONLY
251 #define supervisor(dc) 0
252 #ifdef TARGET_SPARC64
253 #define hypervisor(dc) 0
256 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
257 #ifdef TARGET_SPARC64
258 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
263 #ifdef TARGET_SPARC64
265 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
267 #define AM_CHECK(dc) (1)
271 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
273 #ifdef TARGET_SPARC64
275 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
279 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
283 return cpu_regs
[reg
];
285 TCGv t
= get_temp_tl(dc
);
286 tcg_gen_movi_tl(t
, 0);
291 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
295 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
299 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
303 return cpu_regs
[reg
];
305 return get_temp_tl(dc
);
309 static inline bool use_goto_tb(DisasContext
*s
, target_ulong pc
,
312 if (unlikely(s
->singlestep
)) {
316 #ifndef CONFIG_USER_ONLY
317 return (pc
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) &&
318 (npc
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
);
324 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
,
325 target_ulong pc
, target_ulong npc
)
327 if (use_goto_tb(s
, pc
, npc
)) {
328 /* jump to same page: we can use a direct jump */
329 tcg_gen_goto_tb(tb_num
);
330 tcg_gen_movi_tl(cpu_pc
, pc
);
331 tcg_gen_movi_tl(cpu_npc
, npc
);
332 tcg_gen_exit_tb((uintptr_t)s
->tb
+ tb_num
);
334 /* jump to another page: currently not optimized */
335 tcg_gen_movi_tl(cpu_pc
, pc
);
336 tcg_gen_movi_tl(cpu_npc
, npc
);
342 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
344 tcg_gen_extu_i32_tl(reg
, src
);
345 tcg_gen_shri_tl(reg
, reg
, PSR_NEG_SHIFT
);
346 tcg_gen_andi_tl(reg
, reg
, 0x1);
349 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
351 tcg_gen_extu_i32_tl(reg
, src
);
352 tcg_gen_shri_tl(reg
, reg
, PSR_ZERO_SHIFT
);
353 tcg_gen_andi_tl(reg
, reg
, 0x1);
356 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
358 tcg_gen_extu_i32_tl(reg
, src
);
359 tcg_gen_shri_tl(reg
, reg
, PSR_OVF_SHIFT
);
360 tcg_gen_andi_tl(reg
, reg
, 0x1);
363 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
365 tcg_gen_extu_i32_tl(reg
, src
);
366 tcg_gen_shri_tl(reg
, reg
, PSR_CARRY_SHIFT
);
367 tcg_gen_andi_tl(reg
, reg
, 0x1);
370 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
372 tcg_gen_mov_tl(cpu_cc_src
, src1
);
373 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
374 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
375 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
378 static TCGv_i32
gen_add32_carry32(void)
380 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
382 /* Carry is computed from a previous add: (dst < src) */
383 #if TARGET_LONG_BITS == 64
384 cc_src1_32
= tcg_temp_new_i32();
385 cc_src2_32
= tcg_temp_new_i32();
386 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
387 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
389 cc_src1_32
= cpu_cc_dst
;
390 cc_src2_32
= cpu_cc_src
;
393 carry_32
= tcg_temp_new_i32();
394 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
396 #if TARGET_LONG_BITS == 64
397 tcg_temp_free_i32(cc_src1_32
);
398 tcg_temp_free_i32(cc_src2_32
);
404 static TCGv_i32
gen_sub32_carry32(void)
406 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
408 /* Carry is computed from a previous borrow: (src1 < src2) */
409 #if TARGET_LONG_BITS == 64
410 cc_src1_32
= tcg_temp_new_i32();
411 cc_src2_32
= tcg_temp_new_i32();
412 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
413 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
415 cc_src1_32
= cpu_cc_src
;
416 cc_src2_32
= cpu_cc_src2
;
419 carry_32
= tcg_temp_new_i32();
420 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
422 #if TARGET_LONG_BITS == 64
423 tcg_temp_free_i32(cc_src1_32
);
424 tcg_temp_free_i32(cc_src2_32
);
430 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
431 TCGv src2
, int update_cc
)
439 /* Carry is known to be zero. Fall back to plain ADD. */
441 gen_op_add_cc(dst
, src1
, src2
);
443 tcg_gen_add_tl(dst
, src1
, src2
);
450 if (TARGET_LONG_BITS
== 32) {
451 /* We can re-use the host's hardware carry generation by using
452 an ADD2 opcode. We discard the low part of the output.
453 Ideally we'd combine this operation with the add that
454 generated the carry in the first place. */
455 carry
= tcg_temp_new();
456 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
457 tcg_temp_free(carry
);
460 carry_32
= gen_add32_carry32();
466 carry_32
= gen_sub32_carry32();
470 /* We need external help to produce the carry. */
471 carry_32
= tcg_temp_new_i32();
472 gen_helper_compute_C_icc(carry_32
, cpu_env
);
476 #if TARGET_LONG_BITS == 64
477 carry
= tcg_temp_new();
478 tcg_gen_extu_i32_i64(carry
, carry_32
);
483 tcg_gen_add_tl(dst
, src1
, src2
);
484 tcg_gen_add_tl(dst
, dst
, carry
);
486 tcg_temp_free_i32(carry_32
);
487 #if TARGET_LONG_BITS == 64
488 tcg_temp_free(carry
);
493 tcg_gen_mov_tl(cpu_cc_src
, src1
);
494 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
495 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
496 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
497 dc
->cc_op
= CC_OP_ADDX
;
501 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
503 tcg_gen_mov_tl(cpu_cc_src
, src1
);
504 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
505 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
506 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
509 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
510 TCGv src2
, int update_cc
)
518 /* Carry is known to be zero. Fall back to plain SUB. */
520 gen_op_sub_cc(dst
, src1
, src2
);
522 tcg_gen_sub_tl(dst
, src1
, src2
);
529 carry_32
= gen_add32_carry32();
535 if (TARGET_LONG_BITS
== 32) {
536 /* We can re-use the host's hardware carry generation by using
537 a SUB2 opcode. We discard the low part of the output.
538 Ideally we'd combine this operation with the add that
539 generated the carry in the first place. */
540 carry
= tcg_temp_new();
541 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
542 tcg_temp_free(carry
);
545 carry_32
= gen_sub32_carry32();
549 /* We need external help to produce the carry. */
550 carry_32
= tcg_temp_new_i32();
551 gen_helper_compute_C_icc(carry_32
, cpu_env
);
555 #if TARGET_LONG_BITS == 64
556 carry
= tcg_temp_new();
557 tcg_gen_extu_i32_i64(carry
, carry_32
);
562 tcg_gen_sub_tl(dst
, src1
, src2
);
563 tcg_gen_sub_tl(dst
, dst
, carry
);
565 tcg_temp_free_i32(carry_32
);
566 #if TARGET_LONG_BITS == 64
567 tcg_temp_free(carry
);
572 tcg_gen_mov_tl(cpu_cc_src
, src1
);
573 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
574 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
575 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
576 dc
->cc_op
= CC_OP_SUBX
;
580 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
582 TCGv r_temp
, zero
, t0
;
584 r_temp
= tcg_temp_new();
591 zero
= tcg_const_tl(0);
592 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
593 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
594 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
595 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
600 // env->y = (b2 << 31) | (env->y >> 1);
601 tcg_gen_andi_tl(r_temp
, cpu_cc_src
, 0x1);
602 tcg_gen_shli_tl(r_temp
, r_temp
, 31);
603 tcg_gen_shri_tl(t0
, cpu_y
, 1);
604 tcg_gen_andi_tl(t0
, t0
, 0x7fffffff);
605 tcg_gen_or_tl(t0
, t0
, r_temp
);
606 tcg_gen_andi_tl(cpu_y
, t0
, 0xffffffff);
609 gen_mov_reg_N(t0
, cpu_psr
);
610 gen_mov_reg_V(r_temp
, cpu_psr
);
611 tcg_gen_xor_tl(t0
, t0
, r_temp
);
612 tcg_temp_free(r_temp
);
614 // T0 = (b1 << 31) | (T0 >> 1);
616 tcg_gen_shli_tl(t0
, t0
, 31);
617 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
618 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
621 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
623 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
626 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
628 #if TARGET_LONG_BITS == 32
630 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
632 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
635 TCGv t0
= tcg_temp_new_i64();
636 TCGv t1
= tcg_temp_new_i64();
639 tcg_gen_ext32s_i64(t0
, src1
);
640 tcg_gen_ext32s_i64(t1
, src2
);
642 tcg_gen_ext32u_i64(t0
, src1
);
643 tcg_gen_ext32u_i64(t1
, src2
);
646 tcg_gen_mul_i64(dst
, t0
, t1
);
650 tcg_gen_shri_i64(cpu_y
, dst
, 32);
654 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
656 /* zero-extend truncated operands before multiplication */
657 gen_op_multiply(dst
, src1
, src2
, 0);
660 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
662 /* sign-extend truncated operands before multiplication */
663 gen_op_multiply(dst
, src1
, src2
, 1);
667 static inline void gen_op_eval_ba(TCGv dst
)
669 tcg_gen_movi_tl(dst
, 1);
673 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
675 gen_mov_reg_Z(dst
, src
);
679 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
681 TCGv t0
= tcg_temp_new();
682 gen_mov_reg_N(t0
, src
);
683 gen_mov_reg_V(dst
, src
);
684 tcg_gen_xor_tl(dst
, dst
, t0
);
685 gen_mov_reg_Z(t0
, src
);
686 tcg_gen_or_tl(dst
, dst
, t0
);
691 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
693 TCGv t0
= tcg_temp_new();
694 gen_mov_reg_V(t0
, src
);
695 gen_mov_reg_N(dst
, src
);
696 tcg_gen_xor_tl(dst
, dst
, t0
);
701 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
703 TCGv t0
= tcg_temp_new();
704 gen_mov_reg_Z(t0
, src
);
705 gen_mov_reg_C(dst
, src
);
706 tcg_gen_or_tl(dst
, dst
, t0
);
711 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
713 gen_mov_reg_C(dst
, src
);
717 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
719 gen_mov_reg_V(dst
, src
);
723 static inline void gen_op_eval_bn(TCGv dst
)
725 tcg_gen_movi_tl(dst
, 0);
729 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
731 gen_mov_reg_N(dst
, src
);
735 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
737 gen_mov_reg_Z(dst
, src
);
738 tcg_gen_xori_tl(dst
, dst
, 0x1);
742 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
744 gen_op_eval_ble(dst
, src
);
745 tcg_gen_xori_tl(dst
, dst
, 0x1);
749 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
751 gen_op_eval_bl(dst
, src
);
752 tcg_gen_xori_tl(dst
, dst
, 0x1);
756 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
758 gen_op_eval_bleu(dst
, src
);
759 tcg_gen_xori_tl(dst
, dst
, 0x1);
763 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
765 gen_mov_reg_C(dst
, src
);
766 tcg_gen_xori_tl(dst
, dst
, 0x1);
770 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
772 gen_mov_reg_N(dst
, src
);
773 tcg_gen_xori_tl(dst
, dst
, 0x1);
777 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
779 gen_mov_reg_V(dst
, src
);
780 tcg_gen_xori_tl(dst
, dst
, 0x1);
784 FPSR bit field FCC1 | FCC0:
790 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
791 unsigned int fcc_offset
)
793 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
794 tcg_gen_andi_tl(reg
, reg
, 0x1);
797 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
798 unsigned int fcc_offset
)
800 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
801 tcg_gen_andi_tl(reg
, reg
, 0x1);
805 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
806 unsigned int fcc_offset
)
808 TCGv t0
= tcg_temp_new();
809 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
810 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
811 tcg_gen_or_tl(dst
, dst
, t0
);
815 // 1 or 2: FCC0 ^ FCC1
816 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
817 unsigned int fcc_offset
)
819 TCGv t0
= tcg_temp_new();
820 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
821 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
822 tcg_gen_xor_tl(dst
, dst
, t0
);
827 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
828 unsigned int fcc_offset
)
830 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
834 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
835 unsigned int fcc_offset
)
837 TCGv t0
= tcg_temp_new();
838 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
839 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
840 tcg_gen_andc_tl(dst
, dst
, t0
);
845 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
846 unsigned int fcc_offset
)
848 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
852 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
853 unsigned int fcc_offset
)
855 TCGv t0
= tcg_temp_new();
856 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
857 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
858 tcg_gen_andc_tl(dst
, t0
, dst
);
863 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
864 unsigned int fcc_offset
)
866 TCGv t0
= tcg_temp_new();
867 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
868 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
869 tcg_gen_and_tl(dst
, dst
, t0
);
874 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
875 unsigned int fcc_offset
)
877 TCGv t0
= tcg_temp_new();
878 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
879 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
880 tcg_gen_or_tl(dst
, dst
, t0
);
881 tcg_gen_xori_tl(dst
, dst
, 0x1);
885 // 0 or 3: !(FCC0 ^ FCC1)
886 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
887 unsigned int fcc_offset
)
889 TCGv t0
= tcg_temp_new();
890 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
891 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
892 tcg_gen_xor_tl(dst
, dst
, t0
);
893 tcg_gen_xori_tl(dst
, dst
, 0x1);
898 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
899 unsigned int fcc_offset
)
901 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
902 tcg_gen_xori_tl(dst
, dst
, 0x1);
905 // !1: !(FCC0 & !FCC1)
906 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
907 unsigned int fcc_offset
)
909 TCGv t0
= tcg_temp_new();
910 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
911 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
912 tcg_gen_andc_tl(dst
, dst
, t0
);
913 tcg_gen_xori_tl(dst
, dst
, 0x1);
918 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
919 unsigned int fcc_offset
)
921 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
922 tcg_gen_xori_tl(dst
, dst
, 0x1);
925 // !2: !(!FCC0 & FCC1)
926 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
927 unsigned int fcc_offset
)
929 TCGv t0
= tcg_temp_new();
930 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
931 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
932 tcg_gen_andc_tl(dst
, t0
, dst
);
933 tcg_gen_xori_tl(dst
, dst
, 0x1);
937 // !3: !(FCC0 & FCC1)
938 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
939 unsigned int fcc_offset
)
941 TCGv t0
= tcg_temp_new();
942 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
943 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
944 tcg_gen_and_tl(dst
, dst
, t0
);
945 tcg_gen_xori_tl(dst
, dst
, 0x1);
949 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
950 target_ulong pc2
, TCGv r_cond
)
952 TCGLabel
*l1
= gen_new_label();
954 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
956 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
959 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
962 static void gen_branch_a(DisasContext
*dc
, target_ulong pc1
)
964 TCGLabel
*l1
= gen_new_label();
965 target_ulong npc
= dc
->npc
;
967 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cond
, 0, l1
);
969 gen_goto_tb(dc
, 0, npc
, pc1
);
972 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
977 static void gen_branch_n(DisasContext
*dc
, target_ulong pc1
)
979 target_ulong npc
= dc
->npc
;
981 if (likely(npc
!= DYNAMIC_PC
)) {
983 dc
->jump_pc
[0] = pc1
;
984 dc
->jump_pc
[1] = npc
+ 4;
989 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
991 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
992 t
= tcg_const_tl(pc1
);
994 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, z
, t
, cpu_npc
);
1002 static inline void gen_generic_branch(DisasContext
*dc
)
1004 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
1005 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
1006 TCGv zero
= tcg_const_tl(0);
1008 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1010 tcg_temp_free(npc0
);
1011 tcg_temp_free(npc1
);
1012 tcg_temp_free(zero
);
1015 /* call this function before using the condition register as it may
1016 have been set for a jump */
1017 static inline void flush_cond(DisasContext
*dc
)
1019 if (dc
->npc
== JUMP_PC
) {
1020 gen_generic_branch(dc
);
1021 dc
->npc
= DYNAMIC_PC
;
1025 static inline void save_npc(DisasContext
*dc
)
1027 if (dc
->npc
== JUMP_PC
) {
1028 gen_generic_branch(dc
);
1029 dc
->npc
= DYNAMIC_PC
;
1030 } else if (dc
->npc
!= DYNAMIC_PC
) {
1031 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1035 static inline void update_psr(DisasContext
*dc
)
1037 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1038 dc
->cc_op
= CC_OP_FLAGS
;
1039 gen_helper_compute_psr(cpu_env
);
1043 static inline void save_state(DisasContext
*dc
)
1045 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1049 static void gen_exception(DisasContext
*dc
, int which
)
1054 t
= tcg_const_i32(which
);
1055 gen_helper_raise_exception(cpu_env
, t
);
1056 tcg_temp_free_i32(t
);
1060 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1062 if (dc
->npc
== JUMP_PC
) {
1063 gen_generic_branch(dc
);
1064 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1065 dc
->pc
= DYNAMIC_PC
;
1066 } else if (dc
->npc
== DYNAMIC_PC
) {
1067 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1068 dc
->pc
= DYNAMIC_PC
;
1074 static inline void gen_op_next_insn(void)
1076 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1077 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1080 static void free_compare(DisasCompare
*cmp
)
1083 tcg_temp_free(cmp
->c1
);
1086 tcg_temp_free(cmp
->c2
);
1090 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1093 static int subcc_cond
[16] = {
1109 -1, /* no overflow */
1112 static int logic_cond
[16] = {
1114 TCG_COND_EQ
, /* eq: Z */
1115 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1116 TCG_COND_LT
, /* lt: N ^ V -> N */
1117 TCG_COND_EQ
, /* leu: C | Z -> Z */
1118 TCG_COND_NEVER
, /* ltu: C -> 0 */
1119 TCG_COND_LT
, /* neg: N */
1120 TCG_COND_NEVER
, /* vs: V -> 0 */
1122 TCG_COND_NE
, /* ne: !Z */
1123 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1124 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1125 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1126 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1127 TCG_COND_GE
, /* pos: !N */
1128 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1134 #ifdef TARGET_SPARC64
1144 switch (dc
->cc_op
) {
1146 cmp
->cond
= logic_cond
[cond
];
1148 cmp
->is_bool
= false;
1150 cmp
->c2
= tcg_const_tl(0);
1151 #ifdef TARGET_SPARC64
1154 cmp
->c1
= tcg_temp_new();
1155 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1160 cmp
->c1
= cpu_cc_dst
;
1167 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1168 goto do_compare_dst_0
;
1170 case 7: /* overflow */
1171 case 15: /* !overflow */
1175 cmp
->cond
= subcc_cond
[cond
];
1176 cmp
->is_bool
= false;
1177 #ifdef TARGET_SPARC64
1179 /* Note that sign-extension works for unsigned compares as
1180 long as both operands are sign-extended. */
1181 cmp
->g1
= cmp
->g2
= false;
1182 cmp
->c1
= tcg_temp_new();
1183 cmp
->c2
= tcg_temp_new();
1184 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1185 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1189 cmp
->g1
= cmp
->g2
= true;
1190 cmp
->c1
= cpu_cc_src
;
1191 cmp
->c2
= cpu_cc_src2
;
1198 gen_helper_compute_psr(cpu_env
);
1199 dc
->cc_op
= CC_OP_FLAGS
;
1203 /* We're going to generate a boolean result. */
1204 cmp
->cond
= TCG_COND_NE
;
1205 cmp
->is_bool
= true;
1206 cmp
->g1
= cmp
->g2
= false;
1207 cmp
->c1
= r_dst
= tcg_temp_new();
1208 cmp
->c2
= tcg_const_tl(0);
1212 gen_op_eval_bn(r_dst
);
1215 gen_op_eval_be(r_dst
, r_src
);
1218 gen_op_eval_ble(r_dst
, r_src
);
1221 gen_op_eval_bl(r_dst
, r_src
);
1224 gen_op_eval_bleu(r_dst
, r_src
);
1227 gen_op_eval_bcs(r_dst
, r_src
);
1230 gen_op_eval_bneg(r_dst
, r_src
);
1233 gen_op_eval_bvs(r_dst
, r_src
);
1236 gen_op_eval_ba(r_dst
);
1239 gen_op_eval_bne(r_dst
, r_src
);
1242 gen_op_eval_bg(r_dst
, r_src
);
1245 gen_op_eval_bge(r_dst
, r_src
);
1248 gen_op_eval_bgu(r_dst
, r_src
);
1251 gen_op_eval_bcc(r_dst
, r_src
);
1254 gen_op_eval_bpos(r_dst
, r_src
);
1257 gen_op_eval_bvc(r_dst
, r_src
);
1264 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1266 unsigned int offset
;
1269 /* For now we still generate a straight boolean result. */
1270 cmp
->cond
= TCG_COND_NE
;
1271 cmp
->is_bool
= true;
1272 cmp
->g1
= cmp
->g2
= false;
1273 cmp
->c1
= r_dst
= tcg_temp_new();
1274 cmp
->c2
= tcg_const_tl(0);
1294 gen_op_eval_bn(r_dst
);
1297 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1300 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1303 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1306 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1309 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1312 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1315 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1318 gen_op_eval_ba(r_dst
);
1321 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1324 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1327 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1330 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1333 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1336 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1339 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1344 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1348 gen_compare(&cmp
, cc
, cond
, dc
);
1350 /* The interface is to return a boolean in r_dst. */
1352 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1354 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1360 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1363 gen_fcompare(&cmp
, cc
, cond
);
1365 /* The interface is to return a boolean in r_dst. */
1367 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1369 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1375 #ifdef TARGET_SPARC64
1377 static const int gen_tcg_cond_reg
[8] = {
1388 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1390 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1391 cmp
->is_bool
= false;
1395 cmp
->c2
= tcg_const_tl(0);
1398 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1401 gen_compare_reg(&cmp
, cond
, r_src
);
1403 /* The interface is to return a boolean in r_dst. */
1404 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1410 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1412 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1413 target_ulong target
= dc
->pc
+ offset
;
1415 #ifdef TARGET_SPARC64
1416 if (unlikely(AM_CHECK(dc
))) {
1417 target
&= 0xffffffffULL
;
1421 /* unconditional not taken */
1423 dc
->pc
= dc
->npc
+ 4;
1424 dc
->npc
= dc
->pc
+ 4;
1427 dc
->npc
= dc
->pc
+ 4;
1429 } else if (cond
== 0x8) {
1430 /* unconditional taken */
1433 dc
->npc
= dc
->pc
+ 4;
1437 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1441 gen_cond(cpu_cond
, cc
, cond
, dc
);
1443 gen_branch_a(dc
, target
);
1445 gen_branch_n(dc
, target
);
1450 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1452 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1453 target_ulong target
= dc
->pc
+ offset
;
1455 #ifdef TARGET_SPARC64
1456 if (unlikely(AM_CHECK(dc
))) {
1457 target
&= 0xffffffffULL
;
1461 /* unconditional not taken */
1463 dc
->pc
= dc
->npc
+ 4;
1464 dc
->npc
= dc
->pc
+ 4;
1467 dc
->npc
= dc
->pc
+ 4;
1469 } else if (cond
== 0x8) {
1470 /* unconditional taken */
1473 dc
->npc
= dc
->pc
+ 4;
1477 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1481 gen_fcond(cpu_cond
, cc
, cond
);
1483 gen_branch_a(dc
, target
);
1485 gen_branch_n(dc
, target
);
1490 #ifdef TARGET_SPARC64
1491 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1494 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1495 target_ulong target
= dc
->pc
+ offset
;
1497 if (unlikely(AM_CHECK(dc
))) {
1498 target
&= 0xffffffffULL
;
1501 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1503 gen_branch_a(dc
, target
);
1505 gen_branch_n(dc
, target
);
1509 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1513 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1516 gen_helper_fcmps_fcc1(cpu_env
, r_rs1
, r_rs2
);
1519 gen_helper_fcmps_fcc2(cpu_env
, r_rs1
, r_rs2
);
1522 gen_helper_fcmps_fcc3(cpu_env
, r_rs1
, r_rs2
);
1527 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1531 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1534 gen_helper_fcmpd_fcc1(cpu_env
, r_rs1
, r_rs2
);
1537 gen_helper_fcmpd_fcc2(cpu_env
, r_rs1
, r_rs2
);
1540 gen_helper_fcmpd_fcc3(cpu_env
, r_rs1
, r_rs2
);
1545 static inline void gen_op_fcmpq(int fccno
)
1549 gen_helper_fcmpq(cpu_env
);
1552 gen_helper_fcmpq_fcc1(cpu_env
);
1555 gen_helper_fcmpq_fcc2(cpu_env
);
1558 gen_helper_fcmpq_fcc3(cpu_env
);
1563 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1567 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1570 gen_helper_fcmpes_fcc1(cpu_env
, r_rs1
, r_rs2
);
1573 gen_helper_fcmpes_fcc2(cpu_env
, r_rs1
, r_rs2
);
1576 gen_helper_fcmpes_fcc3(cpu_env
, r_rs1
, r_rs2
);
1581 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1585 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1588 gen_helper_fcmped_fcc1(cpu_env
, r_rs1
, r_rs2
);
1591 gen_helper_fcmped_fcc2(cpu_env
, r_rs1
, r_rs2
);
1594 gen_helper_fcmped_fcc3(cpu_env
, r_rs1
, r_rs2
);
1599 static inline void gen_op_fcmpeq(int fccno
)
1603 gen_helper_fcmpeq(cpu_env
);
1606 gen_helper_fcmpeq_fcc1(cpu_env
);
1609 gen_helper_fcmpeq_fcc2(cpu_env
);
1612 gen_helper_fcmpeq_fcc3(cpu_env
);
1619 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1621 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1624 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1626 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1629 static inline void gen_op_fcmpq(int fccno
)
1631 gen_helper_fcmpq(cpu_env
);
1634 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1636 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1639 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1641 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1644 static inline void gen_op_fcmpeq(int fccno
)
1646 gen_helper_fcmpeq(cpu_env
);
1650 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1652 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1653 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1654 gen_exception(dc
, TT_FP_EXCP
);
1657 static int gen_trap_ifnofpu(DisasContext
*dc
)
1659 #if !defined(CONFIG_USER_ONLY)
1660 if (!dc
->fpu_enabled
) {
1661 gen_exception(dc
, TT_NFPU_INSN
);
1668 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1670 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1673 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1674 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1678 src
= gen_load_fpr_F(dc
, rs
);
1679 dst
= gen_dest_fpr_F(dc
);
1681 gen(dst
, cpu_env
, src
);
1683 gen_store_fpr_F(dc
, rd
, dst
);
1686 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1687 void (*gen
)(TCGv_i32
, TCGv_i32
))
1691 src
= gen_load_fpr_F(dc
, rs
);
1692 dst
= gen_dest_fpr_F(dc
);
1696 gen_store_fpr_F(dc
, rd
, dst
);
1699 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1700 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1702 TCGv_i32 dst
, src1
, src2
;
1704 src1
= gen_load_fpr_F(dc
, rs1
);
1705 src2
= gen_load_fpr_F(dc
, rs2
);
1706 dst
= gen_dest_fpr_F(dc
);
1708 gen(dst
, cpu_env
, src1
, src2
);
1710 gen_store_fpr_F(dc
, rd
, dst
);
1713 #ifdef TARGET_SPARC64
1714 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1715 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1717 TCGv_i32 dst
, src1
, src2
;
1719 src1
= gen_load_fpr_F(dc
, rs1
);
1720 src2
= gen_load_fpr_F(dc
, rs2
);
1721 dst
= gen_dest_fpr_F(dc
);
1723 gen(dst
, src1
, src2
);
1725 gen_store_fpr_F(dc
, rd
, dst
);
1729 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1730 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1734 src
= gen_load_fpr_D(dc
, rs
);
1735 dst
= gen_dest_fpr_D(dc
, rd
);
1737 gen(dst
, cpu_env
, src
);
1739 gen_store_fpr_D(dc
, rd
, dst
);
1742 #ifdef TARGET_SPARC64
1743 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1744 void (*gen
)(TCGv_i64
, TCGv_i64
))
1748 src
= gen_load_fpr_D(dc
, rs
);
1749 dst
= gen_dest_fpr_D(dc
, rd
);
1753 gen_store_fpr_D(dc
, rd
, dst
);
1757 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1758 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1760 TCGv_i64 dst
, src1
, src2
;
1762 src1
= gen_load_fpr_D(dc
, rs1
);
1763 src2
= gen_load_fpr_D(dc
, rs2
);
1764 dst
= gen_dest_fpr_D(dc
, rd
);
1766 gen(dst
, cpu_env
, src1
, src2
);
1768 gen_store_fpr_D(dc
, rd
, dst
);
1771 #ifdef TARGET_SPARC64
1772 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1773 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1775 TCGv_i64 dst
, src1
, src2
;
1777 src1
= gen_load_fpr_D(dc
, rs1
);
1778 src2
= gen_load_fpr_D(dc
, rs2
);
1779 dst
= gen_dest_fpr_D(dc
, rd
);
1781 gen(dst
, src1
, src2
);
1783 gen_store_fpr_D(dc
, rd
, dst
);
1786 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1787 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1789 TCGv_i64 dst
, src1
, src2
;
1791 src1
= gen_load_fpr_D(dc
, rs1
);
1792 src2
= gen_load_fpr_D(dc
, rs2
);
1793 dst
= gen_dest_fpr_D(dc
, rd
);
1795 gen(dst
, cpu_gsr
, src1
, src2
);
1797 gen_store_fpr_D(dc
, rd
, dst
);
1800 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1801 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1803 TCGv_i64 dst
, src0
, src1
, src2
;
1805 src1
= gen_load_fpr_D(dc
, rs1
);
1806 src2
= gen_load_fpr_D(dc
, rs2
);
1807 src0
= gen_load_fpr_D(dc
, rd
);
1808 dst
= gen_dest_fpr_D(dc
, rd
);
1810 gen(dst
, src0
, src1
, src2
);
1812 gen_store_fpr_D(dc
, rd
, dst
);
1816 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1817 void (*gen
)(TCGv_ptr
))
1819 gen_op_load_fpr_QT1(QFPREG(rs
));
1823 gen_op_store_QT0_fpr(QFPREG(rd
));
1824 gen_update_fprs_dirty(QFPREG(rd
));
1827 #ifdef TARGET_SPARC64
1828 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1829 void (*gen
)(TCGv_ptr
))
1831 gen_op_load_fpr_QT1(QFPREG(rs
));
1835 gen_op_store_QT0_fpr(QFPREG(rd
));
1836 gen_update_fprs_dirty(QFPREG(rd
));
1840 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1841 void (*gen
)(TCGv_ptr
))
1843 gen_op_load_fpr_QT0(QFPREG(rs1
));
1844 gen_op_load_fpr_QT1(QFPREG(rs2
));
1848 gen_op_store_QT0_fpr(QFPREG(rd
));
1849 gen_update_fprs_dirty(QFPREG(rd
));
1852 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1853 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1856 TCGv_i32 src1
, src2
;
1858 src1
= gen_load_fpr_F(dc
, rs1
);
1859 src2
= gen_load_fpr_F(dc
, rs2
);
1860 dst
= gen_dest_fpr_D(dc
, rd
);
1862 gen(dst
, cpu_env
, src1
, src2
);
1864 gen_store_fpr_D(dc
, rd
, dst
);
1867 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1868 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1870 TCGv_i64 src1
, src2
;
1872 src1
= gen_load_fpr_D(dc
, rs1
);
1873 src2
= gen_load_fpr_D(dc
, rs2
);
1875 gen(cpu_env
, src1
, src2
);
1877 gen_op_store_QT0_fpr(QFPREG(rd
));
1878 gen_update_fprs_dirty(QFPREG(rd
));
1881 #ifdef TARGET_SPARC64
1882 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1883 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1888 src
= gen_load_fpr_F(dc
, rs
);
1889 dst
= gen_dest_fpr_D(dc
, rd
);
1891 gen(dst
, cpu_env
, src
);
1893 gen_store_fpr_D(dc
, rd
, dst
);
1897 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1898 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1903 src
= gen_load_fpr_F(dc
, rs
);
1904 dst
= gen_dest_fpr_D(dc
, rd
);
1906 gen(dst
, cpu_env
, src
);
1908 gen_store_fpr_D(dc
, rd
, dst
);
1911 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1912 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1917 src
= gen_load_fpr_D(dc
, rs
);
1918 dst
= gen_dest_fpr_F(dc
);
1920 gen(dst
, cpu_env
, src
);
1922 gen_store_fpr_F(dc
, rd
, dst
);
1925 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1926 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1930 gen_op_load_fpr_QT1(QFPREG(rs
));
1931 dst
= gen_dest_fpr_F(dc
);
1935 gen_store_fpr_F(dc
, rd
, dst
);
1938 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1939 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1943 gen_op_load_fpr_QT1(QFPREG(rs
));
1944 dst
= gen_dest_fpr_D(dc
, rd
);
1948 gen_store_fpr_D(dc
, rd
, dst
);
1951 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1952 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1956 src
= gen_load_fpr_F(dc
, rs
);
1960 gen_op_store_QT0_fpr(QFPREG(rd
));
1961 gen_update_fprs_dirty(QFPREG(rd
));
1964 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1965 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1969 src
= gen_load_fpr_D(dc
, rs
);
1973 gen_op_store_QT0_fpr(QFPREG(rd
));
1974 gen_update_fprs_dirty(QFPREG(rd
));
1978 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
1979 static TCGv_i32
gen_get_asi(DisasContext
*dc
, int insn
)
1984 #ifdef TARGET_SPARC64
1987 gen_exception(dc
, TT_ILL_INSN
);
1991 asi
= GET_FIELD(insn
, 19, 26);
1993 return tcg_const_i32(asi
);
1996 static void gen_ld_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
,
1997 int insn
, int size
, int sign
)
1999 TCGv_i32 r_asi
, r_size
, r_sign
;
2001 r_asi
= gen_get_asi(dc
, insn
);
2002 r_size
= tcg_const_i32(size
);
2003 r_sign
= tcg_const_i32(sign
);
2004 #ifdef TARGET_SPARC64
2005 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2008 TCGv_i64 t64
= tcg_temp_new_i64();
2009 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2010 tcg_gen_trunc_i64_tl(dst
, t64
);
2011 tcg_temp_free_i64(t64
);
2014 tcg_temp_free_i32(r_sign
);
2015 tcg_temp_free_i32(r_size
);
2016 tcg_temp_free_i32(r_asi
);
2019 static void gen_st_asi(DisasContext
*dc
, TCGv src
, TCGv addr
,
2022 TCGv_i32 r_asi
, r_size
;
2024 r_asi
= gen_get_asi(dc
, insn
);
2025 r_size
= tcg_const_i32(size
);
2026 #ifdef TARGET_SPARC64
2027 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2030 TCGv_i64 t64
= tcg_temp_new_i64();
2031 tcg_gen_extu_tl_i64(t64
, src
);
2032 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2033 tcg_temp_free_i64(t64
);
2036 tcg_temp_free_i32(r_size
);
2037 tcg_temp_free_i32(r_asi
);
2040 static void gen_swap_asi(DisasContext
*dc
, TCGv dst
, TCGv src
,
2041 TCGv addr
, int insn
)
2043 TCGv_i32 r_asi
, r_size
, r_sign
;
2044 TCGv_i64 s64
, t64
= tcg_temp_new_i64();
2046 r_asi
= gen_get_asi(dc
, insn
);
2047 r_size
= tcg_const_i32(4);
2048 r_sign
= tcg_const_i32(0);
2049 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2050 tcg_temp_free_i32(r_sign
);
2052 s64
= tcg_temp_new_i64();
2053 tcg_gen_extu_tl_i64(s64
, src
);
2054 gen_helper_st_asi(cpu_env
, addr
, s64
, r_asi
, r_size
);
2055 tcg_temp_free_i64(s64
);
2056 tcg_temp_free_i32(r_size
);
2057 tcg_temp_free_i32(r_asi
);
2059 tcg_gen_trunc_i64_tl(dst
, t64
);
2060 tcg_temp_free_i64(t64
);
2063 static void gen_cas_asi(DisasContext
*dc
, TCGv addr
, TCGv val2
,
2066 TCGv val1
= gen_load_gpr(dc
, rd
);
2067 TCGv dst
= gen_dest_gpr(dc
, rd
);
2068 TCGv_i32 r_asi
= gen_get_asi(dc
, insn
);
2070 gen_helper_cas_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2071 tcg_temp_free_i32(r_asi
);
2072 gen_store_gpr(dc
, rd
, dst
);
2075 static void gen_ldstub_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
, int insn
)
2077 TCGv_i32 r_asi
, r_size
, r_sign
;
2078 TCGv_i64 s64
, d64
= tcg_temp_new_i64();
2080 r_asi
= gen_get_asi(dc
, insn
);
2081 r_size
= tcg_const_i32(1);
2082 r_sign
= tcg_const_i32(0);
2083 gen_helper_ld_asi(d64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2084 tcg_temp_free_i32(r_sign
);
2086 s64
= tcg_const_i64(0xff);
2087 gen_helper_st_asi(cpu_env
, addr
, s64
, r_asi
, r_size
);
2088 tcg_temp_free_i64(s64
);
2089 tcg_temp_free_i32(r_size
);
2090 tcg_temp_free_i32(r_asi
);
2092 tcg_gen_trunc_i64_tl(dst
, d64
);
2093 tcg_temp_free_i64(d64
);
2097 #ifdef TARGET_SPARC64
2098 static void gen_ldf_asi(DisasContext
*dc
, TCGv addr
,
2099 int insn
, int size
, int rd
)
2101 TCGv_i32 r_asi
, r_size
, r_rd
;
2103 r_asi
= gen_get_asi(dc
, insn
);
2104 r_size
= tcg_const_i32(size
);
2105 r_rd
= tcg_const_i32(rd
);
2106 gen_helper_ldf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2107 tcg_temp_free_i32(r_rd
);
2108 tcg_temp_free_i32(r_size
);
2109 tcg_temp_free_i32(r_asi
);
2112 static void gen_stf_asi(DisasContext
*dc
, TCGv addr
,
2113 int insn
, int size
, int rd
)
2115 TCGv_i32 r_asi
, r_size
, r_rd
;
2117 r_asi
= gen_get_asi(dc
, insn
);
2118 r_size
= tcg_const_i32(size
);
2119 r_rd
= tcg_const_i32(rd
);
2120 gen_helper_stf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2121 tcg_temp_free_i32(r_rd
);
2122 tcg_temp_free_i32(r_size
);
2123 tcg_temp_free_i32(r_asi
);
2126 static void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2129 TCGv_i32 r_asi
, r_rd
;
2131 r_asi
= gen_get_asi(dc
, insn
);
2132 r_rd
= tcg_const_i32(rd
);
2133 gen_helper_ldda_asi(cpu_env
, addr
, r_asi
, r_rd
);
2134 tcg_temp_free_i32(r_rd
);
2135 tcg_temp_free_i32(r_asi
);
2138 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2141 TCGv_i32 r_asi
, r_size
;
2142 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2143 TCGv_i64 t64
= tcg_temp_new_i64();
2145 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2146 r_asi
= gen_get_asi(dc
, insn
);
2147 r_size
= tcg_const_i32(8);
2148 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2149 tcg_temp_free_i32(r_size
);
2150 tcg_temp_free_i32(r_asi
);
2151 tcg_temp_free_i64(t64
);
2154 static void gen_casx_asi(DisasContext
*dc
, TCGv addr
, TCGv val2
,
2157 TCGv val1
= gen_load_gpr(dc
, rd
);
2158 TCGv dst
= gen_dest_gpr(dc
, rd
);
2159 TCGv_i32 r_asi
= gen_get_asi(dc
, insn
);
2161 gen_helper_casx_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2162 tcg_temp_free_i32(r_asi
);
2163 gen_store_gpr(dc
, rd
, dst
);
2166 #elif !defined(CONFIG_USER_ONLY)
2167 static void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2170 TCGv_i32 r_asi
, r_size
, r_sign
;
2174 r_asi
= gen_get_asi(dc
, insn
);
2175 r_size
= tcg_const_i32(8);
2176 r_sign
= tcg_const_i32(0);
2177 t64
= tcg_temp_new_i64();
2178 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2179 tcg_temp_free_i32(r_sign
);
2180 tcg_temp_free_i32(r_size
);
2181 tcg_temp_free_i32(r_asi
);
2183 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2184 whereby "rd + 1" elicits "error: array subscript is above array".
2185 Since we have already asserted that rd is even, the semantics
2187 t
= gen_dest_gpr(dc
, rd
| 1);
2188 tcg_gen_trunc_i64_tl(t
, t64
);
2189 gen_store_gpr(dc
, rd
| 1, t
);
2191 tcg_gen_shri_i64(t64
, t64
, 32);
2192 tcg_gen_trunc_i64_tl(hi
, t64
);
2193 tcg_temp_free_i64(t64
);
2194 gen_store_gpr(dc
, rd
, hi
);
2197 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2200 TCGv_i32 r_asi
, r_size
;
2201 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2202 TCGv_i64 t64
= tcg_temp_new_i64();
2204 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2205 r_asi
= gen_get_asi(dc
, insn
);
2206 r_size
= tcg_const_i32(8);
2207 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2208 tcg_temp_free_i32(r_size
);
2209 tcg_temp_free_i32(r_asi
);
2210 tcg_temp_free_i64(t64
);
2214 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2216 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2217 return gen_load_gpr(dc
, rs1
);
2220 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2222 if (IS_IMM
) { /* immediate */
2223 target_long simm
= GET_FIELDs(insn
, 19, 31);
2224 TCGv t
= get_temp_tl(dc
);
2225 tcg_gen_movi_tl(t
, simm
);
2227 } else { /* register */
2228 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2229 return gen_load_gpr(dc
, rs2
);
2233 #ifdef TARGET_SPARC64
2234 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2236 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2238 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2239 or fold the comparison down to 32 bits and use movcond_i32. Choose
2241 c32
= tcg_temp_new_i32();
2243 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2245 TCGv_i64 c64
= tcg_temp_new_i64();
2246 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2247 tcg_gen_extrl_i64_i32(c32
, c64
);
2248 tcg_temp_free_i64(c64
);
2251 s1
= gen_load_fpr_F(dc
, rs
);
2252 s2
= gen_load_fpr_F(dc
, rd
);
2253 dst
= gen_dest_fpr_F(dc
);
2254 zero
= tcg_const_i32(0);
2256 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2258 tcg_temp_free_i32(c32
);
2259 tcg_temp_free_i32(zero
);
2260 gen_store_fpr_F(dc
, rd
, dst
);
2263 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2265 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2266 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2267 gen_load_fpr_D(dc
, rs
),
2268 gen_load_fpr_D(dc
, rd
));
2269 gen_store_fpr_D(dc
, rd
, dst
);
2272 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2274 int qd
= QFPREG(rd
);
2275 int qs
= QFPREG(rs
);
2277 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2278 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2279 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2280 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2282 gen_update_fprs_dirty(qd
);
2285 #ifndef CONFIG_USER_ONLY
2286 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_env cpu_env
)
2288 TCGv_i32 r_tl
= tcg_temp_new_i32();
2290 /* load env->tl into r_tl */
2291 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2293 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2294 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2296 /* calculate offset to current trap state from env->ts, reuse r_tl */
2297 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2298 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2300 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2302 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2303 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2304 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2305 tcg_temp_free_ptr(r_tl_tmp
);
2308 tcg_temp_free_i32(r_tl
);
2312 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2313 int width
, bool cc
, bool left
)
2315 TCGv lo1
, lo2
, t1
, t2
;
2316 uint64_t amask
, tabl
, tabr
;
2317 int shift
, imask
, omask
;
2320 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2321 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2322 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2323 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2324 dc
->cc_op
= CC_OP_SUB
;
2327 /* Theory of operation: there are two tables, left and right (not to
2328 be confused with the left and right versions of the opcode). These
2329 are indexed by the low 3 bits of the inputs. To make things "easy",
2330 these tables are loaded into two constants, TABL and TABR below.
2331 The operation index = (input & imask) << shift calculates the index
2332 into the constant, while val = (table >> index) & omask calculates
2333 the value we're looking for. */
2340 tabl
= 0x80c0e0f0f8fcfeffULL
;
2341 tabr
= 0xff7f3f1f0f070301ULL
;
2343 tabl
= 0x0103070f1f3f7fffULL
;
2344 tabr
= 0xfffefcf8f0e0c080ULL
;
2364 tabl
= (2 << 2) | 3;
2365 tabr
= (3 << 2) | 1;
2367 tabl
= (1 << 2) | 3;
2368 tabr
= (3 << 2) | 2;
2375 lo1
= tcg_temp_new();
2376 lo2
= tcg_temp_new();
2377 tcg_gen_andi_tl(lo1
, s1
, imask
);
2378 tcg_gen_andi_tl(lo2
, s2
, imask
);
2379 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2380 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2382 t1
= tcg_const_tl(tabl
);
2383 t2
= tcg_const_tl(tabr
);
2384 tcg_gen_shr_tl(lo1
, t1
, lo1
);
2385 tcg_gen_shr_tl(lo2
, t2
, lo2
);
2386 tcg_gen_andi_tl(dst
, lo1
, omask
);
2387 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2391 amask
&= 0xffffffffULL
;
2393 tcg_gen_andi_tl(s1
, s1
, amask
);
2394 tcg_gen_andi_tl(s2
, s2
, amask
);
2396 /* We want to compute
2397 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2398 We've already done dst = lo1, so this reduces to
2399 dst &= (s1 == s2 ? -1 : lo2)
2404 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
2405 tcg_gen_neg_tl(t1
, t1
);
2406 tcg_gen_or_tl(lo2
, lo2
, t1
);
2407 tcg_gen_and_tl(dst
, dst
, lo2
);
2415 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2417 TCGv tmp
= tcg_temp_new();
2419 tcg_gen_add_tl(tmp
, s1
, s2
);
2420 tcg_gen_andi_tl(dst
, tmp
, -8);
2422 tcg_gen_neg_tl(tmp
, tmp
);
2424 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2429 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2433 t1
= tcg_temp_new();
2434 t2
= tcg_temp_new();
2435 shift
= tcg_temp_new();
2437 tcg_gen_andi_tl(shift
, gsr
, 7);
2438 tcg_gen_shli_tl(shift
, shift
, 3);
2439 tcg_gen_shl_tl(t1
, s1
, shift
);
2441 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2442 shift of (up to 63) followed by a constant shift of 1. */
2443 tcg_gen_xori_tl(shift
, shift
, 63);
2444 tcg_gen_shr_tl(t2
, s2
, shift
);
2445 tcg_gen_shri_tl(t2
, t2
, 1);
2447 tcg_gen_or_tl(dst
, t1
, t2
);
2451 tcg_temp_free(shift
);
2455 #define CHECK_IU_FEATURE(dc, FEATURE) \
2456 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2458 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2459 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2462 /* before an instruction, dc->pc must be static */
2463 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2465 unsigned int opc
, rs1
, rs2
, rd
;
2466 TCGv cpu_src1
, cpu_src2
;
2467 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2468 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2471 opc
= GET_FIELD(insn
, 0, 1);
2472 rd
= GET_FIELD(insn
, 2, 6);
2475 case 0: /* branches/sethi */
2477 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2480 #ifdef TARGET_SPARC64
2481 case 0x1: /* V9 BPcc */
2485 target
= GET_FIELD_SP(insn
, 0, 18);
2486 target
= sign_extend(target
, 19);
2488 cc
= GET_FIELD_SP(insn
, 20, 21);
2490 do_branch(dc
, target
, insn
, 0);
2492 do_branch(dc
, target
, insn
, 1);
2497 case 0x3: /* V9 BPr */
2499 target
= GET_FIELD_SP(insn
, 0, 13) |
2500 (GET_FIELD_SP(insn
, 20, 21) << 14);
2501 target
= sign_extend(target
, 16);
2503 cpu_src1
= get_src1(dc
, insn
);
2504 do_branch_reg(dc
, target
, insn
, cpu_src1
);
2507 case 0x5: /* V9 FBPcc */
2509 int cc
= GET_FIELD_SP(insn
, 20, 21);
2510 if (gen_trap_ifnofpu(dc
)) {
2513 target
= GET_FIELD_SP(insn
, 0, 18);
2514 target
= sign_extend(target
, 19);
2516 do_fbranch(dc
, target
, insn
, cc
);
2520 case 0x7: /* CBN+x */
2525 case 0x2: /* BN+x */
2527 target
= GET_FIELD(insn
, 10, 31);
2528 target
= sign_extend(target
, 22);
2530 do_branch(dc
, target
, insn
, 0);
2533 case 0x6: /* FBN+x */
2535 if (gen_trap_ifnofpu(dc
)) {
2538 target
= GET_FIELD(insn
, 10, 31);
2539 target
= sign_extend(target
, 22);
2541 do_fbranch(dc
, target
, insn
, 0);
2544 case 0x4: /* SETHI */
2545 /* Special-case %g0 because that's the canonical nop. */
2547 uint32_t value
= GET_FIELD(insn
, 10, 31);
2548 TCGv t
= gen_dest_gpr(dc
, rd
);
2549 tcg_gen_movi_tl(t
, value
<< 10);
2550 gen_store_gpr(dc
, rd
, t
);
2553 case 0x0: /* UNIMPL */
2562 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
2563 TCGv o7
= gen_dest_gpr(dc
, 15);
2565 tcg_gen_movi_tl(o7
, dc
->pc
);
2566 gen_store_gpr(dc
, 15, o7
);
2569 #ifdef TARGET_SPARC64
2570 if (unlikely(AM_CHECK(dc
))) {
2571 target
&= 0xffffffffULL
;
2577 case 2: /* FPU & Logical Operations */
2579 unsigned int xop
= GET_FIELD(insn
, 7, 12);
2580 TCGv cpu_dst
= get_temp_tl(dc
);
2583 if (xop
== 0x3a) { /* generate trap */
2584 int cond
= GET_FIELD(insn
, 3, 6);
2586 TCGLabel
*l1
= NULL
;
2597 /* Conditional trap. */
2599 #ifdef TARGET_SPARC64
2601 int cc
= GET_FIELD_SP(insn
, 11, 12);
2603 gen_compare(&cmp
, 0, cond
, dc
);
2604 } else if (cc
== 2) {
2605 gen_compare(&cmp
, 1, cond
, dc
);
2610 gen_compare(&cmp
, 0, cond
, dc
);
2612 l1
= gen_new_label();
2613 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
2614 cmp
.c1
, cmp
.c2
, l1
);
2618 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2619 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2621 /* Don't use the normal temporaries, as they may well have
2622 gone out of scope with the branch above. While we're
2623 doing that we might as well pre-truncate to 32-bit. */
2624 trap
= tcg_temp_new_i32();
2626 rs1
= GET_FIELD_SP(insn
, 14, 18);
2628 rs2
= GET_FIELD_SP(insn
, 0, 6);
2630 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
2631 /* Signal that the trap value is fully constant. */
2634 TCGv t1
= gen_load_gpr(dc
, rs1
);
2635 tcg_gen_trunc_tl_i32(trap
, t1
);
2636 tcg_gen_addi_i32(trap
, trap
, rs2
);
2640 rs2
= GET_FIELD_SP(insn
, 0, 4);
2641 t1
= gen_load_gpr(dc
, rs1
);
2642 t2
= gen_load_gpr(dc
, rs2
);
2643 tcg_gen_add_tl(t1
, t1
, t2
);
2644 tcg_gen_trunc_tl_i32(trap
, t1
);
2647 tcg_gen_andi_i32(trap
, trap
, mask
);
2648 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2651 gen_helper_raise_exception(cpu_env
, trap
);
2652 tcg_temp_free_i32(trap
);
2655 /* An unconditional trap ends the TB. */
2659 /* A conditional trap falls through to the next insn. */
2663 } else if (xop
== 0x28) {
2664 rs1
= GET_FIELD(insn
, 13, 17);
2667 #ifndef TARGET_SPARC64
2668 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2669 manual, rdy on the microSPARC
2671 case 0x0f: /* stbar in the SPARCv8 manual,
2672 rdy on the microSPARC II */
2673 case 0x10 ... 0x1f: /* implementation-dependent in the
2674 SPARCv8 manual, rdy on the
2677 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
2678 TCGv t
= gen_dest_gpr(dc
, rd
);
2679 /* Read Asr17 for a Leon3 monoprocessor */
2680 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
2681 gen_store_gpr(dc
, rd
, t
);
2685 gen_store_gpr(dc
, rd
, cpu_y
);
2687 #ifdef TARGET_SPARC64
2688 case 0x2: /* V9 rdccr */
2690 gen_helper_rdccr(cpu_dst
, cpu_env
);
2691 gen_store_gpr(dc
, rd
, cpu_dst
);
2693 case 0x3: /* V9 rdasi */
2694 tcg_gen_movi_tl(cpu_dst
, dc
->asi
);
2695 gen_store_gpr(dc
, rd
, cpu_dst
);
2697 case 0x4: /* V9 rdtick */
2702 r_tickptr
= tcg_temp_new_ptr();
2703 r_const
= tcg_const_i32(dc
->mem_idx
);
2704 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2705 offsetof(CPUSPARCState
, tick
));
2706 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
2708 tcg_temp_free_ptr(r_tickptr
);
2709 tcg_temp_free_i32(r_const
);
2710 gen_store_gpr(dc
, rd
, cpu_dst
);
2713 case 0x5: /* V9 rdpc */
2715 TCGv t
= gen_dest_gpr(dc
, rd
);
2716 if (unlikely(AM_CHECK(dc
))) {
2717 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
2719 tcg_gen_movi_tl(t
, dc
->pc
);
2721 gen_store_gpr(dc
, rd
, t
);
2724 case 0x6: /* V9 rdfprs */
2725 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
2726 gen_store_gpr(dc
, rd
, cpu_dst
);
2728 case 0xf: /* V9 membar */
2729 break; /* no effect */
2730 case 0x13: /* Graphics Status */
2731 if (gen_trap_ifnofpu(dc
)) {
2734 gen_store_gpr(dc
, rd
, cpu_gsr
);
2736 case 0x16: /* Softint */
2737 tcg_gen_ld32s_tl(cpu_dst
, cpu_env
,
2738 offsetof(CPUSPARCState
, softint
));
2739 gen_store_gpr(dc
, rd
, cpu_dst
);
2741 case 0x17: /* Tick compare */
2742 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
2744 case 0x18: /* System tick */
2749 r_tickptr
= tcg_temp_new_ptr();
2750 r_const
= tcg_const_i32(dc
->mem_idx
);
2751 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2752 offsetof(CPUSPARCState
, stick
));
2753 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
2755 tcg_temp_free_ptr(r_tickptr
);
2756 tcg_temp_free_i32(r_const
);
2757 gen_store_gpr(dc
, rd
, cpu_dst
);
2760 case 0x19: /* System tick compare */
2761 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
2763 case 0x10: /* Performance Control */
2764 case 0x11: /* Performance Instrumentation Counter */
2765 case 0x12: /* Dispatch Control */
2766 case 0x14: /* Softint set, WO */
2767 case 0x15: /* Softint clear, WO */
2772 #if !defined(CONFIG_USER_ONLY)
2773 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
2774 #ifndef TARGET_SPARC64
2775 if (!supervisor(dc
)) {
2779 gen_helper_rdpsr(cpu_dst
, cpu_env
);
2781 CHECK_IU_FEATURE(dc
, HYPV
);
2782 if (!hypervisor(dc
))
2784 rs1
= GET_FIELD(insn
, 13, 17);
2787 // gen_op_rdhpstate();
2790 // gen_op_rdhtstate();
2793 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
2796 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
2799 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
2801 case 31: // hstick_cmpr
2802 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
2808 gen_store_gpr(dc
, rd
, cpu_dst
);
2810 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
2811 if (!supervisor(dc
)) {
2814 cpu_tmp0
= get_temp_tl(dc
);
2815 #ifdef TARGET_SPARC64
2816 rs1
= GET_FIELD(insn
, 13, 17);
2822 r_tsptr
= tcg_temp_new_ptr();
2823 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2824 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2825 offsetof(trap_state
, tpc
));
2826 tcg_temp_free_ptr(r_tsptr
);
2833 r_tsptr
= tcg_temp_new_ptr();
2834 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2835 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2836 offsetof(trap_state
, tnpc
));
2837 tcg_temp_free_ptr(r_tsptr
);
2844 r_tsptr
= tcg_temp_new_ptr();
2845 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2846 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2847 offsetof(trap_state
, tstate
));
2848 tcg_temp_free_ptr(r_tsptr
);
2853 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2855 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2856 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
2857 offsetof(trap_state
, tt
));
2858 tcg_temp_free_ptr(r_tsptr
);
2866 r_tickptr
= tcg_temp_new_ptr();
2867 r_const
= tcg_const_i32(dc
->mem_idx
);
2868 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2869 offsetof(CPUSPARCState
, tick
));
2870 gen_helper_tick_get_count(cpu_tmp0
, cpu_env
,
2871 r_tickptr
, r_const
);
2872 tcg_temp_free_ptr(r_tickptr
);
2873 tcg_temp_free_i32(r_const
);
2877 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
2880 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2881 offsetof(CPUSPARCState
, pstate
));
2884 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2885 offsetof(CPUSPARCState
, tl
));
2888 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2889 offsetof(CPUSPARCState
, psrpil
));
2892 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
2895 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2896 offsetof(CPUSPARCState
, cansave
));
2898 case 11: // canrestore
2899 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2900 offsetof(CPUSPARCState
, canrestore
));
2902 case 12: // cleanwin
2903 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2904 offsetof(CPUSPARCState
, cleanwin
));
2906 case 13: // otherwin
2907 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2908 offsetof(CPUSPARCState
, otherwin
));
2911 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2912 offsetof(CPUSPARCState
, wstate
));
2914 case 16: // UA2005 gl
2915 CHECK_IU_FEATURE(dc
, GL
);
2916 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2917 offsetof(CPUSPARCState
, gl
));
2919 case 26: // UA2005 strand status
2920 CHECK_IU_FEATURE(dc
, HYPV
);
2921 if (!hypervisor(dc
))
2923 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
2926 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
2933 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
2935 gen_store_gpr(dc
, rd
, cpu_tmp0
);
2937 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
2938 #ifdef TARGET_SPARC64
2940 gen_helper_flushw(cpu_env
);
2942 if (!supervisor(dc
))
2944 gen_store_gpr(dc
, rd
, cpu_tbr
);
2948 } else if (xop
== 0x34) { /* FPU Operations */
2949 if (gen_trap_ifnofpu(dc
)) {
2952 gen_op_clear_ieee_excp_and_FTT();
2953 rs1
= GET_FIELD(insn
, 13, 17);
2954 rs2
= GET_FIELD(insn
, 27, 31);
2955 xop
= GET_FIELD(insn
, 18, 26);
2958 case 0x1: /* fmovs */
2959 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
2960 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
2962 case 0x5: /* fnegs */
2963 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
2965 case 0x9: /* fabss */
2966 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
2968 case 0x29: /* fsqrts */
2969 CHECK_FPU_FEATURE(dc
, FSQRT
);
2970 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
2972 case 0x2a: /* fsqrtd */
2973 CHECK_FPU_FEATURE(dc
, FSQRT
);
2974 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
2976 case 0x2b: /* fsqrtq */
2977 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2978 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
2980 case 0x41: /* fadds */
2981 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
2983 case 0x42: /* faddd */
2984 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
2986 case 0x43: /* faddq */
2987 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2988 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
2990 case 0x45: /* fsubs */
2991 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
2993 case 0x46: /* fsubd */
2994 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
2996 case 0x47: /* fsubq */
2997 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2998 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3000 case 0x49: /* fmuls */
3001 CHECK_FPU_FEATURE(dc
, FMUL
);
3002 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3004 case 0x4a: /* fmuld */
3005 CHECK_FPU_FEATURE(dc
, FMUL
);
3006 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3008 case 0x4b: /* fmulq */
3009 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3010 CHECK_FPU_FEATURE(dc
, FMUL
);
3011 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3013 case 0x4d: /* fdivs */
3014 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3016 case 0x4e: /* fdivd */
3017 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3019 case 0x4f: /* fdivq */
3020 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3021 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3023 case 0x69: /* fsmuld */
3024 CHECK_FPU_FEATURE(dc
, FSMULD
);
3025 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3027 case 0x6e: /* fdmulq */
3028 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3029 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3031 case 0xc4: /* fitos */
3032 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3034 case 0xc6: /* fdtos */
3035 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3037 case 0xc7: /* fqtos */
3038 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3039 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3041 case 0xc8: /* fitod */
3042 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3044 case 0xc9: /* fstod */
3045 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3047 case 0xcb: /* fqtod */
3048 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3049 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3051 case 0xcc: /* fitoq */
3052 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3053 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3055 case 0xcd: /* fstoq */
3056 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3057 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3059 case 0xce: /* fdtoq */
3060 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3061 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3063 case 0xd1: /* fstoi */
3064 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3066 case 0xd2: /* fdtoi */
3067 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3069 case 0xd3: /* fqtoi */
3070 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3071 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3073 #ifdef TARGET_SPARC64
3074 case 0x2: /* V9 fmovd */
3075 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3076 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3078 case 0x3: /* V9 fmovq */
3079 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3080 gen_move_Q(rd
, rs2
);
3082 case 0x6: /* V9 fnegd */
3083 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3085 case 0x7: /* V9 fnegq */
3086 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3087 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3089 case 0xa: /* V9 fabsd */
3090 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3092 case 0xb: /* V9 fabsq */
3093 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3094 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3096 case 0x81: /* V9 fstox */
3097 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3099 case 0x82: /* V9 fdtox */
3100 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3102 case 0x83: /* V9 fqtox */
3103 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3104 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3106 case 0x84: /* V9 fxtos */
3107 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3109 case 0x88: /* V9 fxtod */
3110 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3112 case 0x8c: /* V9 fxtoq */
3113 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3114 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3120 } else if (xop
== 0x35) { /* FPU Operations */
3121 #ifdef TARGET_SPARC64
3124 if (gen_trap_ifnofpu(dc
)) {
3127 gen_op_clear_ieee_excp_and_FTT();
3128 rs1
= GET_FIELD(insn
, 13, 17);
3129 rs2
= GET_FIELD(insn
, 27, 31);
3130 xop
= GET_FIELD(insn
, 18, 26);
3133 #ifdef TARGET_SPARC64
3137 cond = GET_FIELD_SP(insn, 10, 12); \
3138 cpu_src1 = get_src1(dc, insn); \
3139 gen_compare_reg(&cmp, cond, cpu_src1); \
3140 gen_fmov##sz(dc, &cmp, rd, rs2); \
3141 free_compare(&cmp); \
3144 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3147 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3150 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3151 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3158 #ifdef TARGET_SPARC64
3159 #define FMOVCC(fcc, sz) \
3162 cond = GET_FIELD_SP(insn, 14, 17); \
3163 gen_fcompare(&cmp, fcc, cond); \
3164 gen_fmov##sz(dc, &cmp, rd, rs2); \
3165 free_compare(&cmp); \
3168 case 0x001: /* V9 fmovscc %fcc0 */
3171 case 0x002: /* V9 fmovdcc %fcc0 */
3174 case 0x003: /* V9 fmovqcc %fcc0 */
3175 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3178 case 0x041: /* V9 fmovscc %fcc1 */
3181 case 0x042: /* V9 fmovdcc %fcc1 */
3184 case 0x043: /* V9 fmovqcc %fcc1 */
3185 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3188 case 0x081: /* V9 fmovscc %fcc2 */
3191 case 0x082: /* V9 fmovdcc %fcc2 */
3194 case 0x083: /* V9 fmovqcc %fcc2 */
3195 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3198 case 0x0c1: /* V9 fmovscc %fcc3 */
3201 case 0x0c2: /* V9 fmovdcc %fcc3 */
3204 case 0x0c3: /* V9 fmovqcc %fcc3 */
3205 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3209 #define FMOVCC(xcc, sz) \
3212 cond = GET_FIELD_SP(insn, 14, 17); \
3213 gen_compare(&cmp, xcc, cond, dc); \
3214 gen_fmov##sz(dc, &cmp, rd, rs2); \
3215 free_compare(&cmp); \
3218 case 0x101: /* V9 fmovscc %icc */
3221 case 0x102: /* V9 fmovdcc %icc */
3224 case 0x103: /* V9 fmovqcc %icc */
3225 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3228 case 0x181: /* V9 fmovscc %xcc */
3231 case 0x182: /* V9 fmovdcc %xcc */
3234 case 0x183: /* V9 fmovqcc %xcc */
3235 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3240 case 0x51: /* fcmps, V9 %fcc */
3241 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3242 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3243 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3245 case 0x52: /* fcmpd, V9 %fcc */
3246 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3247 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3248 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3250 case 0x53: /* fcmpq, V9 %fcc */
3251 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3252 gen_op_load_fpr_QT0(QFPREG(rs1
));
3253 gen_op_load_fpr_QT1(QFPREG(rs2
));
3254 gen_op_fcmpq(rd
& 3);
3256 case 0x55: /* fcmpes, V9 %fcc */
3257 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3258 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3259 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3261 case 0x56: /* fcmped, V9 %fcc */
3262 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3263 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3264 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3266 case 0x57: /* fcmpeq, V9 %fcc */
3267 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3268 gen_op_load_fpr_QT0(QFPREG(rs1
));
3269 gen_op_load_fpr_QT1(QFPREG(rs2
));
3270 gen_op_fcmpeq(rd
& 3);
3275 } else if (xop
== 0x2) {
3276 TCGv dst
= gen_dest_gpr(dc
, rd
);
3277 rs1
= GET_FIELD(insn
, 13, 17);
3279 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3280 if (IS_IMM
) { /* immediate */
3281 simm
= GET_FIELDs(insn
, 19, 31);
3282 tcg_gen_movi_tl(dst
, simm
);
3283 gen_store_gpr(dc
, rd
, dst
);
3284 } else { /* register */
3285 rs2
= GET_FIELD(insn
, 27, 31);
3287 tcg_gen_movi_tl(dst
, 0);
3288 gen_store_gpr(dc
, rd
, dst
);
3290 cpu_src2
= gen_load_gpr(dc
, rs2
);
3291 gen_store_gpr(dc
, rd
, cpu_src2
);
3295 cpu_src1
= get_src1(dc
, insn
);
3296 if (IS_IMM
) { /* immediate */
3297 simm
= GET_FIELDs(insn
, 19, 31);
3298 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
3299 gen_store_gpr(dc
, rd
, dst
);
3300 } else { /* register */
3301 rs2
= GET_FIELD(insn
, 27, 31);
3303 /* mov shortcut: or x, %g0, y -> mov x, y */
3304 gen_store_gpr(dc
, rd
, cpu_src1
);
3306 cpu_src2
= gen_load_gpr(dc
, rs2
);
3307 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
3308 gen_store_gpr(dc
, rd
, dst
);
3312 #ifdef TARGET_SPARC64
3313 } else if (xop
== 0x25) { /* sll, V9 sllx */
3314 cpu_src1
= get_src1(dc
, insn
);
3315 if (IS_IMM
) { /* immediate */
3316 simm
= GET_FIELDs(insn
, 20, 31);
3317 if (insn
& (1 << 12)) {
3318 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3320 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3322 } else { /* register */
3323 rs2
= GET_FIELD(insn
, 27, 31);
3324 cpu_src2
= gen_load_gpr(dc
, rs2
);
3325 cpu_tmp0
= get_temp_tl(dc
);
3326 if (insn
& (1 << 12)) {
3327 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3329 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3331 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3333 gen_store_gpr(dc
, rd
, cpu_dst
);
3334 } else if (xop
== 0x26) { /* srl, V9 srlx */
3335 cpu_src1
= get_src1(dc
, insn
);
3336 if (IS_IMM
) { /* immediate */
3337 simm
= GET_FIELDs(insn
, 20, 31);
3338 if (insn
& (1 << 12)) {
3339 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3341 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3342 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3344 } else { /* register */
3345 rs2
= GET_FIELD(insn
, 27, 31);
3346 cpu_src2
= gen_load_gpr(dc
, rs2
);
3347 cpu_tmp0
= get_temp_tl(dc
);
3348 if (insn
& (1 << 12)) {
3349 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3350 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3352 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3353 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3354 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3357 gen_store_gpr(dc
, rd
, cpu_dst
);
3358 } else if (xop
== 0x27) { /* sra, V9 srax */
3359 cpu_src1
= get_src1(dc
, insn
);
3360 if (IS_IMM
) { /* immediate */
3361 simm
= GET_FIELDs(insn
, 20, 31);
3362 if (insn
& (1 << 12)) {
3363 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3365 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3366 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3368 } else { /* register */
3369 rs2
= GET_FIELD(insn
, 27, 31);
3370 cpu_src2
= gen_load_gpr(dc
, rs2
);
3371 cpu_tmp0
= get_temp_tl(dc
);
3372 if (insn
& (1 << 12)) {
3373 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3374 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3376 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3377 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3378 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3381 gen_store_gpr(dc
, rd
, cpu_dst
);
3383 } else if (xop
< 0x36) {
3385 cpu_src1
= get_src1(dc
, insn
);
3386 cpu_src2
= get_src2(dc
, insn
);
3387 switch (xop
& ~0x10) {
3390 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3391 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3392 dc
->cc_op
= CC_OP_ADD
;
3394 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3398 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3400 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3401 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3402 dc
->cc_op
= CC_OP_LOGIC
;
3406 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3408 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3409 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3410 dc
->cc_op
= CC_OP_LOGIC
;
3414 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3416 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3417 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3418 dc
->cc_op
= CC_OP_LOGIC
;
3423 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3424 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3425 dc
->cc_op
= CC_OP_SUB
;
3427 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3430 case 0x5: /* andn */
3431 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3433 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3434 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3435 dc
->cc_op
= CC_OP_LOGIC
;
3439 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3441 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3442 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3443 dc
->cc_op
= CC_OP_LOGIC
;
3446 case 0x7: /* xorn */
3447 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3449 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3450 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3451 dc
->cc_op
= CC_OP_LOGIC
;
3454 case 0x8: /* addx, V9 addc */
3455 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3458 #ifdef TARGET_SPARC64
3459 case 0x9: /* V9 mulx */
3460 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3463 case 0xa: /* umul */
3464 CHECK_IU_FEATURE(dc
, MUL
);
3465 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3467 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3468 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3469 dc
->cc_op
= CC_OP_LOGIC
;
3472 case 0xb: /* smul */
3473 CHECK_IU_FEATURE(dc
, MUL
);
3474 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
3476 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3477 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3478 dc
->cc_op
= CC_OP_LOGIC
;
3481 case 0xc: /* subx, V9 subc */
3482 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3485 #ifdef TARGET_SPARC64
3486 case 0xd: /* V9 udivx */
3487 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3490 case 0xe: /* udiv */
3491 CHECK_IU_FEATURE(dc
, DIV
);
3493 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3495 dc
->cc_op
= CC_OP_DIV
;
3497 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
3501 case 0xf: /* sdiv */
3502 CHECK_IU_FEATURE(dc
, DIV
);
3504 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3506 dc
->cc_op
= CC_OP_DIV
;
3508 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
3515 gen_store_gpr(dc
, rd
, cpu_dst
);
3517 cpu_src1
= get_src1(dc
, insn
);
3518 cpu_src2
= get_src2(dc
, insn
);
3520 case 0x20: /* taddcc */
3521 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3522 gen_store_gpr(dc
, rd
, cpu_dst
);
3523 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
3524 dc
->cc_op
= CC_OP_TADD
;
3526 case 0x21: /* tsubcc */
3527 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3528 gen_store_gpr(dc
, rd
, cpu_dst
);
3529 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
3530 dc
->cc_op
= CC_OP_TSUB
;
3532 case 0x22: /* taddcctv */
3533 gen_helper_taddcctv(cpu_dst
, cpu_env
,
3534 cpu_src1
, cpu_src2
);
3535 gen_store_gpr(dc
, rd
, cpu_dst
);
3536 dc
->cc_op
= CC_OP_TADDTV
;
3538 case 0x23: /* tsubcctv */
3539 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
3540 cpu_src1
, cpu_src2
);
3541 gen_store_gpr(dc
, rd
, cpu_dst
);
3542 dc
->cc_op
= CC_OP_TSUBTV
;
3544 case 0x24: /* mulscc */
3546 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
3547 gen_store_gpr(dc
, rd
, cpu_dst
);
3548 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3549 dc
->cc_op
= CC_OP_ADD
;
3551 #ifndef TARGET_SPARC64
3552 case 0x25: /* sll */
3553 if (IS_IMM
) { /* immediate */
3554 simm
= GET_FIELDs(insn
, 20, 31);
3555 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3556 } else { /* register */
3557 cpu_tmp0
= get_temp_tl(dc
);
3558 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3559 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3561 gen_store_gpr(dc
, rd
, cpu_dst
);
3563 case 0x26: /* srl */
3564 if (IS_IMM
) { /* immediate */
3565 simm
= GET_FIELDs(insn
, 20, 31);
3566 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3567 } else { /* register */
3568 cpu_tmp0
= get_temp_tl(dc
);
3569 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3570 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3572 gen_store_gpr(dc
, rd
, cpu_dst
);
3574 case 0x27: /* sra */
3575 if (IS_IMM
) { /* immediate */
3576 simm
= GET_FIELDs(insn
, 20, 31);
3577 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3578 } else { /* register */
3579 cpu_tmp0
= get_temp_tl(dc
);
3580 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3581 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3583 gen_store_gpr(dc
, rd
, cpu_dst
);
3588 cpu_tmp0
= get_temp_tl(dc
);
3591 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3592 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
3594 #ifndef TARGET_SPARC64
3595 case 0x01 ... 0x0f: /* undefined in the
3599 case 0x10 ... 0x1f: /* implementation-dependent
3603 if ((rd
== 0x13) && (dc
->def
->features
&
3604 CPU_FEATURE_POWERDOWN
)) {
3605 /* LEON3 power-down */
3607 gen_helper_power_down(cpu_env
);
3611 case 0x2: /* V9 wrccr */
3612 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3613 gen_helper_wrccr(cpu_env
, cpu_tmp0
);
3614 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3615 dc
->cc_op
= CC_OP_FLAGS
;
3617 case 0x3: /* V9 wrasi */
3618 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3619 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
3620 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3621 offsetof(CPUSPARCState
, asi
));
3622 /* End TB to notice changed ASI. */
3628 case 0x6: /* V9 wrfprs */
3629 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3630 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
3636 case 0xf: /* V9 sir, nop if user */
3637 #if !defined(CONFIG_USER_ONLY)
3638 if (supervisor(dc
)) {
3643 case 0x13: /* Graphics Status */
3644 if (gen_trap_ifnofpu(dc
)) {
3647 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
3649 case 0x14: /* Softint set */
3650 if (!supervisor(dc
))
3652 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3653 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
3655 case 0x15: /* Softint clear */
3656 if (!supervisor(dc
))
3658 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3659 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
3661 case 0x16: /* Softint write */
3662 if (!supervisor(dc
))
3664 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3665 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
3667 case 0x17: /* Tick compare */
3668 #if !defined(CONFIG_USER_ONLY)
3669 if (!supervisor(dc
))
3675 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
3677 r_tickptr
= tcg_temp_new_ptr();
3678 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3679 offsetof(CPUSPARCState
, tick
));
3680 gen_helper_tick_set_limit(r_tickptr
,
3682 tcg_temp_free_ptr(r_tickptr
);
3685 case 0x18: /* System tick */
3686 #if !defined(CONFIG_USER_ONLY)
3687 if (!supervisor(dc
))
3693 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
3695 r_tickptr
= tcg_temp_new_ptr();
3696 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3697 offsetof(CPUSPARCState
, stick
));
3698 gen_helper_tick_set_count(r_tickptr
,
3700 tcg_temp_free_ptr(r_tickptr
);
3703 case 0x19: /* System tick compare */
3704 #if !defined(CONFIG_USER_ONLY)
3705 if (!supervisor(dc
))
3711 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
3713 r_tickptr
= tcg_temp_new_ptr();
3714 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3715 offsetof(CPUSPARCState
, stick
));
3716 gen_helper_tick_set_limit(r_tickptr
,
3718 tcg_temp_free_ptr(r_tickptr
);
3722 case 0x10: /* Performance Control */
3723 case 0x11: /* Performance Instrumentation
3725 case 0x12: /* Dispatch Control */
3732 #if !defined(CONFIG_USER_ONLY)
3733 case 0x31: /* wrpsr, V9 saved, restored */
3735 if (!supervisor(dc
))
3737 #ifdef TARGET_SPARC64
3740 gen_helper_saved(cpu_env
);
3743 gen_helper_restored(cpu_env
);
3745 case 2: /* UA2005 allclean */
3746 case 3: /* UA2005 otherw */
3747 case 4: /* UA2005 normalw */
3748 case 5: /* UA2005 invalw */
3754 cpu_tmp0
= get_temp_tl(dc
);
3755 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3756 gen_helper_wrpsr(cpu_env
, cpu_tmp0
);
3757 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3758 dc
->cc_op
= CC_OP_FLAGS
;
3766 case 0x32: /* wrwim, V9 wrpr */
3768 if (!supervisor(dc
))
3770 cpu_tmp0
= get_temp_tl(dc
);
3771 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3772 #ifdef TARGET_SPARC64
3778 r_tsptr
= tcg_temp_new_ptr();
3779 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3780 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3781 offsetof(trap_state
, tpc
));
3782 tcg_temp_free_ptr(r_tsptr
);
3789 r_tsptr
= tcg_temp_new_ptr();
3790 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3791 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3792 offsetof(trap_state
, tnpc
));
3793 tcg_temp_free_ptr(r_tsptr
);
3800 r_tsptr
= tcg_temp_new_ptr();
3801 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3802 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3803 offsetof(trap_state
,
3805 tcg_temp_free_ptr(r_tsptr
);
3812 r_tsptr
= tcg_temp_new_ptr();
3813 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3814 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
3815 offsetof(trap_state
, tt
));
3816 tcg_temp_free_ptr(r_tsptr
);
3823 r_tickptr
= tcg_temp_new_ptr();
3824 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3825 offsetof(CPUSPARCState
, tick
));
3826 gen_helper_tick_set_count(r_tickptr
,
3828 tcg_temp_free_ptr(r_tickptr
);
3832 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
3836 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
3837 dc
->npc
= DYNAMIC_PC
;
3841 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3842 offsetof(CPUSPARCState
, tl
));
3843 dc
->npc
= DYNAMIC_PC
;
3846 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
3849 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
3852 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3853 offsetof(CPUSPARCState
,
3856 case 11: // canrestore
3857 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3858 offsetof(CPUSPARCState
,
3861 case 12: // cleanwin
3862 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3863 offsetof(CPUSPARCState
,
3866 case 13: // otherwin
3867 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3868 offsetof(CPUSPARCState
,
3872 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3873 offsetof(CPUSPARCState
,
3876 case 16: // UA2005 gl
3877 CHECK_IU_FEATURE(dc
, GL
);
3878 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3879 offsetof(CPUSPARCState
, gl
));
3881 case 26: // UA2005 strand status
3882 CHECK_IU_FEATURE(dc
, HYPV
);
3883 if (!hypervisor(dc
))
3885 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
3891 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
3892 if (dc
->def
->nwindows
!= 32) {
3893 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
3894 (1 << dc
->def
->nwindows
) - 1);
3899 case 0x33: /* wrtbr, UA2005 wrhpr */
3901 #ifndef TARGET_SPARC64
3902 if (!supervisor(dc
))
3904 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
3906 CHECK_IU_FEATURE(dc
, HYPV
);
3907 if (!hypervisor(dc
))
3909 cpu_tmp0
= get_temp_tl(dc
);
3910 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3913 // XXX gen_op_wrhpstate();
3920 // XXX gen_op_wrhtstate();
3923 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
3926 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
3928 case 31: // hstick_cmpr
3932 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
3933 r_tickptr
= tcg_temp_new_ptr();
3934 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3935 offsetof(CPUSPARCState
, hstick
));
3936 gen_helper_tick_set_limit(r_tickptr
,
3938 tcg_temp_free_ptr(r_tickptr
);
3941 case 6: // hver readonly
3949 #ifdef TARGET_SPARC64
3950 case 0x2c: /* V9 movcc */
3952 int cc
= GET_FIELD_SP(insn
, 11, 12);
3953 int cond
= GET_FIELD_SP(insn
, 14, 17);
3957 if (insn
& (1 << 18)) {
3959 gen_compare(&cmp
, 0, cond
, dc
);
3960 } else if (cc
== 2) {
3961 gen_compare(&cmp
, 1, cond
, dc
);
3966 gen_fcompare(&cmp
, cc
, cond
);
3969 /* The get_src2 above loaded the normal 13-bit
3970 immediate field, not the 11-bit field we have
3971 in movcc. But it did handle the reg case. */
3973 simm
= GET_FIELD_SPs(insn
, 0, 10);
3974 tcg_gen_movi_tl(cpu_src2
, simm
);
3977 dst
= gen_load_gpr(dc
, rd
);
3978 tcg_gen_movcond_tl(cmp
.cond
, dst
,
3982 gen_store_gpr(dc
, rd
, dst
);
3985 case 0x2d: /* V9 sdivx */
3986 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3987 gen_store_gpr(dc
, rd
, cpu_dst
);
3989 case 0x2e: /* V9 popc */
3990 gen_helper_popc(cpu_dst
, cpu_src2
);
3991 gen_store_gpr(dc
, rd
, cpu_dst
);
3993 case 0x2f: /* V9 movr */
3995 int cond
= GET_FIELD_SP(insn
, 10, 12);
3999 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4001 /* The get_src2 above loaded the normal 13-bit
4002 immediate field, not the 10-bit field we have
4003 in movr. But it did handle the reg case. */
4005 simm
= GET_FIELD_SPs(insn
, 0, 9);
4006 tcg_gen_movi_tl(cpu_src2
, simm
);
4009 dst
= gen_load_gpr(dc
, rd
);
4010 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4014 gen_store_gpr(dc
, rd
, dst
);
4022 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4023 #ifdef TARGET_SPARC64
4024 int opf
= GET_FIELD_SP(insn
, 5, 13);
4025 rs1
= GET_FIELD(insn
, 13, 17);
4026 rs2
= GET_FIELD(insn
, 27, 31);
4027 if (gen_trap_ifnofpu(dc
)) {
4032 case 0x000: /* VIS I edge8cc */
4033 CHECK_FPU_FEATURE(dc
, VIS1
);
4034 cpu_src1
= gen_load_gpr(dc
, rs1
);
4035 cpu_src2
= gen_load_gpr(dc
, rs2
);
4036 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4037 gen_store_gpr(dc
, rd
, cpu_dst
);
4039 case 0x001: /* VIS II edge8n */
4040 CHECK_FPU_FEATURE(dc
, VIS2
);
4041 cpu_src1
= gen_load_gpr(dc
, rs1
);
4042 cpu_src2
= gen_load_gpr(dc
, rs2
);
4043 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4044 gen_store_gpr(dc
, rd
, cpu_dst
);
4046 case 0x002: /* VIS I edge8lcc */
4047 CHECK_FPU_FEATURE(dc
, VIS1
);
4048 cpu_src1
= gen_load_gpr(dc
, rs1
);
4049 cpu_src2
= gen_load_gpr(dc
, rs2
);
4050 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4051 gen_store_gpr(dc
, rd
, cpu_dst
);
4053 case 0x003: /* VIS II edge8ln */
4054 CHECK_FPU_FEATURE(dc
, VIS2
);
4055 cpu_src1
= gen_load_gpr(dc
, rs1
);
4056 cpu_src2
= gen_load_gpr(dc
, rs2
);
4057 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4058 gen_store_gpr(dc
, rd
, cpu_dst
);
4060 case 0x004: /* VIS I edge16cc */
4061 CHECK_FPU_FEATURE(dc
, VIS1
);
4062 cpu_src1
= gen_load_gpr(dc
, rs1
);
4063 cpu_src2
= gen_load_gpr(dc
, rs2
);
4064 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4065 gen_store_gpr(dc
, rd
, cpu_dst
);
4067 case 0x005: /* VIS II edge16n */
4068 CHECK_FPU_FEATURE(dc
, VIS2
);
4069 cpu_src1
= gen_load_gpr(dc
, rs1
);
4070 cpu_src2
= gen_load_gpr(dc
, rs2
);
4071 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4072 gen_store_gpr(dc
, rd
, cpu_dst
);
4074 case 0x006: /* VIS I edge16lcc */
4075 CHECK_FPU_FEATURE(dc
, VIS1
);
4076 cpu_src1
= gen_load_gpr(dc
, rs1
);
4077 cpu_src2
= gen_load_gpr(dc
, rs2
);
4078 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4079 gen_store_gpr(dc
, rd
, cpu_dst
);
4081 case 0x007: /* VIS II edge16ln */
4082 CHECK_FPU_FEATURE(dc
, VIS2
);
4083 cpu_src1
= gen_load_gpr(dc
, rs1
);
4084 cpu_src2
= gen_load_gpr(dc
, rs2
);
4085 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4086 gen_store_gpr(dc
, rd
, cpu_dst
);
4088 case 0x008: /* VIS I edge32cc */
4089 CHECK_FPU_FEATURE(dc
, VIS1
);
4090 cpu_src1
= gen_load_gpr(dc
, rs1
);
4091 cpu_src2
= gen_load_gpr(dc
, rs2
);
4092 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4093 gen_store_gpr(dc
, rd
, cpu_dst
);
4095 case 0x009: /* VIS II edge32n */
4096 CHECK_FPU_FEATURE(dc
, VIS2
);
4097 cpu_src1
= gen_load_gpr(dc
, rs1
);
4098 cpu_src2
= gen_load_gpr(dc
, rs2
);
4099 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4100 gen_store_gpr(dc
, rd
, cpu_dst
);
4102 case 0x00a: /* VIS I edge32lcc */
4103 CHECK_FPU_FEATURE(dc
, VIS1
);
4104 cpu_src1
= gen_load_gpr(dc
, rs1
);
4105 cpu_src2
= gen_load_gpr(dc
, rs2
);
4106 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4107 gen_store_gpr(dc
, rd
, cpu_dst
);
4109 case 0x00b: /* VIS II edge32ln */
4110 CHECK_FPU_FEATURE(dc
, VIS2
);
4111 cpu_src1
= gen_load_gpr(dc
, rs1
);
4112 cpu_src2
= gen_load_gpr(dc
, rs2
);
4113 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4114 gen_store_gpr(dc
, rd
, cpu_dst
);
4116 case 0x010: /* VIS I array8 */
4117 CHECK_FPU_FEATURE(dc
, VIS1
);
4118 cpu_src1
= gen_load_gpr(dc
, rs1
);
4119 cpu_src2
= gen_load_gpr(dc
, rs2
);
4120 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4121 gen_store_gpr(dc
, rd
, cpu_dst
);
4123 case 0x012: /* VIS I array16 */
4124 CHECK_FPU_FEATURE(dc
, VIS1
);
4125 cpu_src1
= gen_load_gpr(dc
, rs1
);
4126 cpu_src2
= gen_load_gpr(dc
, rs2
);
4127 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4128 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4129 gen_store_gpr(dc
, rd
, cpu_dst
);
4131 case 0x014: /* VIS I array32 */
4132 CHECK_FPU_FEATURE(dc
, VIS1
);
4133 cpu_src1
= gen_load_gpr(dc
, rs1
);
4134 cpu_src2
= gen_load_gpr(dc
, rs2
);
4135 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4136 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4137 gen_store_gpr(dc
, rd
, cpu_dst
);
4139 case 0x018: /* VIS I alignaddr */
4140 CHECK_FPU_FEATURE(dc
, VIS1
);
4141 cpu_src1
= gen_load_gpr(dc
, rs1
);
4142 cpu_src2
= gen_load_gpr(dc
, rs2
);
4143 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4144 gen_store_gpr(dc
, rd
, cpu_dst
);
4146 case 0x01a: /* VIS I alignaddrl */
4147 CHECK_FPU_FEATURE(dc
, VIS1
);
4148 cpu_src1
= gen_load_gpr(dc
, rs1
);
4149 cpu_src2
= gen_load_gpr(dc
, rs2
);
4150 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4151 gen_store_gpr(dc
, rd
, cpu_dst
);
4153 case 0x019: /* VIS II bmask */
4154 CHECK_FPU_FEATURE(dc
, VIS2
);
4155 cpu_src1
= gen_load_gpr(dc
, rs1
);
4156 cpu_src2
= gen_load_gpr(dc
, rs2
);
4157 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4158 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4159 gen_store_gpr(dc
, rd
, cpu_dst
);
4161 case 0x020: /* VIS I fcmple16 */
4162 CHECK_FPU_FEATURE(dc
, VIS1
);
4163 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4164 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4165 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4166 gen_store_gpr(dc
, rd
, cpu_dst
);
4168 case 0x022: /* VIS I fcmpne16 */
4169 CHECK_FPU_FEATURE(dc
, VIS1
);
4170 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4171 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4172 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4173 gen_store_gpr(dc
, rd
, cpu_dst
);
4175 case 0x024: /* VIS I fcmple32 */
4176 CHECK_FPU_FEATURE(dc
, VIS1
);
4177 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4178 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4179 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4180 gen_store_gpr(dc
, rd
, cpu_dst
);
4182 case 0x026: /* VIS I fcmpne32 */
4183 CHECK_FPU_FEATURE(dc
, VIS1
);
4184 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4185 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4186 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4187 gen_store_gpr(dc
, rd
, cpu_dst
);
4189 case 0x028: /* VIS I fcmpgt16 */
4190 CHECK_FPU_FEATURE(dc
, VIS1
);
4191 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4192 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4193 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4194 gen_store_gpr(dc
, rd
, cpu_dst
);
4196 case 0x02a: /* VIS I fcmpeq16 */
4197 CHECK_FPU_FEATURE(dc
, VIS1
);
4198 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4199 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4200 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4201 gen_store_gpr(dc
, rd
, cpu_dst
);
4203 case 0x02c: /* VIS I fcmpgt32 */
4204 CHECK_FPU_FEATURE(dc
, VIS1
);
4205 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4206 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4207 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4208 gen_store_gpr(dc
, rd
, cpu_dst
);
4210 case 0x02e: /* VIS I fcmpeq32 */
4211 CHECK_FPU_FEATURE(dc
, VIS1
);
4212 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4213 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4214 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4215 gen_store_gpr(dc
, rd
, cpu_dst
);
4217 case 0x031: /* VIS I fmul8x16 */
4218 CHECK_FPU_FEATURE(dc
, VIS1
);
4219 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4221 case 0x033: /* VIS I fmul8x16au */
4222 CHECK_FPU_FEATURE(dc
, VIS1
);
4223 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4225 case 0x035: /* VIS I fmul8x16al */
4226 CHECK_FPU_FEATURE(dc
, VIS1
);
4227 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4229 case 0x036: /* VIS I fmul8sux16 */
4230 CHECK_FPU_FEATURE(dc
, VIS1
);
4231 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4233 case 0x037: /* VIS I fmul8ulx16 */
4234 CHECK_FPU_FEATURE(dc
, VIS1
);
4235 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4237 case 0x038: /* VIS I fmuld8sux16 */
4238 CHECK_FPU_FEATURE(dc
, VIS1
);
4239 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4241 case 0x039: /* VIS I fmuld8ulx16 */
4242 CHECK_FPU_FEATURE(dc
, VIS1
);
4243 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4245 case 0x03a: /* VIS I fpack32 */
4246 CHECK_FPU_FEATURE(dc
, VIS1
);
4247 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4249 case 0x03b: /* VIS I fpack16 */
4250 CHECK_FPU_FEATURE(dc
, VIS1
);
4251 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4252 cpu_dst_32
= gen_dest_fpr_F(dc
);
4253 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4254 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4256 case 0x03d: /* VIS I fpackfix */
4257 CHECK_FPU_FEATURE(dc
, VIS1
);
4258 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4259 cpu_dst_32
= gen_dest_fpr_F(dc
);
4260 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4261 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4263 case 0x03e: /* VIS I pdist */
4264 CHECK_FPU_FEATURE(dc
, VIS1
);
4265 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4267 case 0x048: /* VIS I faligndata */
4268 CHECK_FPU_FEATURE(dc
, VIS1
);
4269 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4271 case 0x04b: /* VIS I fpmerge */
4272 CHECK_FPU_FEATURE(dc
, VIS1
);
4273 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4275 case 0x04c: /* VIS II bshuffle */
4276 CHECK_FPU_FEATURE(dc
, VIS2
);
4277 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4279 case 0x04d: /* VIS I fexpand */
4280 CHECK_FPU_FEATURE(dc
, VIS1
);
4281 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4283 case 0x050: /* VIS I fpadd16 */
4284 CHECK_FPU_FEATURE(dc
, VIS1
);
4285 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4287 case 0x051: /* VIS I fpadd16s */
4288 CHECK_FPU_FEATURE(dc
, VIS1
);
4289 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4291 case 0x052: /* VIS I fpadd32 */
4292 CHECK_FPU_FEATURE(dc
, VIS1
);
4293 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4295 case 0x053: /* VIS I fpadd32s */
4296 CHECK_FPU_FEATURE(dc
, VIS1
);
4297 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4299 case 0x054: /* VIS I fpsub16 */
4300 CHECK_FPU_FEATURE(dc
, VIS1
);
4301 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4303 case 0x055: /* VIS I fpsub16s */
4304 CHECK_FPU_FEATURE(dc
, VIS1
);
4305 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4307 case 0x056: /* VIS I fpsub32 */
4308 CHECK_FPU_FEATURE(dc
, VIS1
);
4309 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4311 case 0x057: /* VIS I fpsub32s */
4312 CHECK_FPU_FEATURE(dc
, VIS1
);
4313 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4315 case 0x060: /* VIS I fzero */
4316 CHECK_FPU_FEATURE(dc
, VIS1
);
4317 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4318 tcg_gen_movi_i64(cpu_dst_64
, 0);
4319 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4321 case 0x061: /* VIS I fzeros */
4322 CHECK_FPU_FEATURE(dc
, VIS1
);
4323 cpu_dst_32
= gen_dest_fpr_F(dc
);
4324 tcg_gen_movi_i32(cpu_dst_32
, 0);
4325 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4327 case 0x062: /* VIS I fnor */
4328 CHECK_FPU_FEATURE(dc
, VIS1
);
4329 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4331 case 0x063: /* VIS I fnors */
4332 CHECK_FPU_FEATURE(dc
, VIS1
);
4333 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4335 case 0x064: /* VIS I fandnot2 */
4336 CHECK_FPU_FEATURE(dc
, VIS1
);
4337 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4339 case 0x065: /* VIS I fandnot2s */
4340 CHECK_FPU_FEATURE(dc
, VIS1
);
4341 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4343 case 0x066: /* VIS I fnot2 */
4344 CHECK_FPU_FEATURE(dc
, VIS1
);
4345 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4347 case 0x067: /* VIS I fnot2s */
4348 CHECK_FPU_FEATURE(dc
, VIS1
);
4349 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4351 case 0x068: /* VIS I fandnot1 */
4352 CHECK_FPU_FEATURE(dc
, VIS1
);
4353 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4355 case 0x069: /* VIS I fandnot1s */
4356 CHECK_FPU_FEATURE(dc
, VIS1
);
4357 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4359 case 0x06a: /* VIS I fnot1 */
4360 CHECK_FPU_FEATURE(dc
, VIS1
);
4361 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4363 case 0x06b: /* VIS I fnot1s */
4364 CHECK_FPU_FEATURE(dc
, VIS1
);
4365 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4367 case 0x06c: /* VIS I fxor */
4368 CHECK_FPU_FEATURE(dc
, VIS1
);
4369 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4371 case 0x06d: /* VIS I fxors */
4372 CHECK_FPU_FEATURE(dc
, VIS1
);
4373 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4375 case 0x06e: /* VIS I fnand */
4376 CHECK_FPU_FEATURE(dc
, VIS1
);
4377 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4379 case 0x06f: /* VIS I fnands */
4380 CHECK_FPU_FEATURE(dc
, VIS1
);
4381 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4383 case 0x070: /* VIS I fand */
4384 CHECK_FPU_FEATURE(dc
, VIS1
);
4385 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4387 case 0x071: /* VIS I fands */
4388 CHECK_FPU_FEATURE(dc
, VIS1
);
4389 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4391 case 0x072: /* VIS I fxnor */
4392 CHECK_FPU_FEATURE(dc
, VIS1
);
4393 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4395 case 0x073: /* VIS I fxnors */
4396 CHECK_FPU_FEATURE(dc
, VIS1
);
4397 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4399 case 0x074: /* VIS I fsrc1 */
4400 CHECK_FPU_FEATURE(dc
, VIS1
);
4401 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4402 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4404 case 0x075: /* VIS I fsrc1s */
4405 CHECK_FPU_FEATURE(dc
, VIS1
);
4406 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4407 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4409 case 0x076: /* VIS I fornot2 */
4410 CHECK_FPU_FEATURE(dc
, VIS1
);
4411 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4413 case 0x077: /* VIS I fornot2s */
4414 CHECK_FPU_FEATURE(dc
, VIS1
);
4415 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4417 case 0x078: /* VIS I fsrc2 */
4418 CHECK_FPU_FEATURE(dc
, VIS1
);
4419 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4420 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4422 case 0x079: /* VIS I fsrc2s */
4423 CHECK_FPU_FEATURE(dc
, VIS1
);
4424 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4425 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4427 case 0x07a: /* VIS I fornot1 */
4428 CHECK_FPU_FEATURE(dc
, VIS1
);
4429 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4431 case 0x07b: /* VIS I fornot1s */
4432 CHECK_FPU_FEATURE(dc
, VIS1
);
4433 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4435 case 0x07c: /* VIS I for */
4436 CHECK_FPU_FEATURE(dc
, VIS1
);
4437 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4439 case 0x07d: /* VIS I fors */
4440 CHECK_FPU_FEATURE(dc
, VIS1
);
4441 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4443 case 0x07e: /* VIS I fone */
4444 CHECK_FPU_FEATURE(dc
, VIS1
);
4445 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4446 tcg_gen_movi_i64(cpu_dst_64
, -1);
4447 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4449 case 0x07f: /* VIS I fones */
4450 CHECK_FPU_FEATURE(dc
, VIS1
);
4451 cpu_dst_32
= gen_dest_fpr_F(dc
);
4452 tcg_gen_movi_i32(cpu_dst_32
, -1);
4453 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4455 case 0x080: /* VIS I shutdown */
4456 case 0x081: /* VIS II siam */
4465 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
4466 #ifdef TARGET_SPARC64
4471 #ifdef TARGET_SPARC64
4472 } else if (xop
== 0x39) { /* V9 return */
4476 cpu_src1
= get_src1(dc
, insn
);
4477 cpu_tmp0
= get_temp_tl(dc
);
4478 if (IS_IMM
) { /* immediate */
4479 simm
= GET_FIELDs(insn
, 19, 31);
4480 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4481 } else { /* register */
4482 rs2
= GET_FIELD(insn
, 27, 31);
4484 cpu_src2
= gen_load_gpr(dc
, rs2
);
4485 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4487 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4490 gen_helper_restore(cpu_env
);
4492 r_const
= tcg_const_i32(3);
4493 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4494 tcg_temp_free_i32(r_const
);
4495 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4496 dc
->npc
= DYNAMIC_PC
;
4500 cpu_src1
= get_src1(dc
, insn
);
4501 cpu_tmp0
= get_temp_tl(dc
);
4502 if (IS_IMM
) { /* immediate */
4503 simm
= GET_FIELDs(insn
, 19, 31);
4504 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4505 } else { /* register */
4506 rs2
= GET_FIELD(insn
, 27, 31);
4508 cpu_src2
= gen_load_gpr(dc
, rs2
);
4509 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4511 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4515 case 0x38: /* jmpl */
4520 t
= gen_dest_gpr(dc
, rd
);
4521 tcg_gen_movi_tl(t
, dc
->pc
);
4522 gen_store_gpr(dc
, rd
, t
);
4524 r_const
= tcg_const_i32(3);
4525 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4526 tcg_temp_free_i32(r_const
);
4527 gen_address_mask(dc
, cpu_tmp0
);
4528 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4529 dc
->npc
= DYNAMIC_PC
;
4532 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4533 case 0x39: /* rett, V9 return */
4537 if (!supervisor(dc
))
4540 r_const
= tcg_const_i32(3);
4541 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4542 tcg_temp_free_i32(r_const
);
4543 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4544 dc
->npc
= DYNAMIC_PC
;
4545 gen_helper_rett(cpu_env
);
4549 case 0x3b: /* flush */
4550 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
4554 case 0x3c: /* save */
4556 gen_helper_save(cpu_env
);
4557 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4559 case 0x3d: /* restore */
4561 gen_helper_restore(cpu_env
);
4562 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4564 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4565 case 0x3e: /* V9 done/retry */
4569 if (!supervisor(dc
))
4571 dc
->npc
= DYNAMIC_PC
;
4572 dc
->pc
= DYNAMIC_PC
;
4573 gen_helper_done(cpu_env
);
4576 if (!supervisor(dc
))
4578 dc
->npc
= DYNAMIC_PC
;
4579 dc
->pc
= DYNAMIC_PC
;
4580 gen_helper_retry(cpu_env
);
4595 case 3: /* load/store instructions */
4597 unsigned int xop
= GET_FIELD(insn
, 7, 12);
4598 /* ??? gen_address_mask prevents us from using a source
4599 register directly. Always generate a temporary. */
4600 TCGv cpu_addr
= get_temp_tl(dc
);
4602 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
4603 if (xop
== 0x3c || xop
== 0x3e) {
4604 /* V9 casa/casxa : no offset */
4605 } else if (IS_IMM
) { /* immediate */
4606 simm
= GET_FIELDs(insn
, 19, 31);
4608 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
4610 } else { /* register */
4611 rs2
= GET_FIELD(insn
, 27, 31);
4613 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
4616 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
4617 (xop
> 0x17 && xop
<= 0x1d ) ||
4618 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
4619 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
4622 case 0x0: /* ld, V9 lduw, load unsigned word */
4623 gen_address_mask(dc
, cpu_addr
);
4624 tcg_gen_qemu_ld32u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4626 case 0x1: /* ldub, load unsigned byte */
4627 gen_address_mask(dc
, cpu_addr
);
4628 tcg_gen_qemu_ld8u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4630 case 0x2: /* lduh, load unsigned halfword */
4631 gen_address_mask(dc
, cpu_addr
);
4632 tcg_gen_qemu_ld16u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4634 case 0x3: /* ldd, load double word */
4642 r_const
= tcg_const_i32(7);
4643 /* XXX remove alignment check */
4644 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4645 tcg_temp_free_i32(r_const
);
4646 gen_address_mask(dc
, cpu_addr
);
4647 t64
= tcg_temp_new_i64();
4648 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4649 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4650 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4651 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
4652 tcg_gen_shri_i64(t64
, t64
, 32);
4653 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4654 tcg_temp_free_i64(t64
);
4655 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4658 case 0x9: /* ldsb, load signed byte */
4659 gen_address_mask(dc
, cpu_addr
);
4660 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4662 case 0xa: /* ldsh, load signed halfword */
4663 gen_address_mask(dc
, cpu_addr
);
4664 tcg_gen_qemu_ld16s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4666 case 0xd: /* ldstub -- XXX: should be atomically */
4669 TCGv tmp
= tcg_temp_new();
4671 gen_address_mask(dc
, cpu_addr
);
4672 tcg_gen_qemu_ld8u(tmp
, cpu_addr
, dc
->mem_idx
);
4673 r_const
= tcg_const_tl(0xff);
4674 tcg_gen_qemu_st8(r_const
, cpu_addr
, dc
->mem_idx
);
4675 tcg_gen_mov_tl(cpu_val
, tmp
);
4676 tcg_temp_free(r_const
);
4681 /* swap, swap register with memory. Also atomically */
4683 TCGv t0
= get_temp_tl(dc
);
4684 CHECK_IU_FEATURE(dc
, SWAP
);
4685 cpu_src1
= gen_load_gpr(dc
, rd
);
4686 gen_address_mask(dc
, cpu_addr
);
4687 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4688 tcg_gen_qemu_st32(cpu_src1
, cpu_addr
, dc
->mem_idx
);
4689 tcg_gen_mov_tl(cpu_val
, t0
);
4692 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4693 case 0x10: /* lda, V9 lduwa, load word alternate */
4694 #ifndef TARGET_SPARC64
4697 if (!supervisor(dc
))
4701 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, 4, 0);
4703 case 0x11: /* lduba, load unsigned byte alternate */
4704 #ifndef TARGET_SPARC64
4707 if (!supervisor(dc
))
4711 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, 1, 0);
4713 case 0x12: /* lduha, load unsigned halfword alternate */
4714 #ifndef TARGET_SPARC64
4717 if (!supervisor(dc
))
4721 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, 2, 0);
4723 case 0x13: /* ldda, load double word alternate */
4724 #ifndef TARGET_SPARC64
4727 if (!supervisor(dc
))
4733 gen_ldda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4735 case 0x19: /* ldsba, load signed byte alternate */
4736 #ifndef TARGET_SPARC64
4739 if (!supervisor(dc
))
4743 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, 1, 1);
4745 case 0x1a: /* ldsha, load signed halfword alternate */
4746 #ifndef TARGET_SPARC64
4749 if (!supervisor(dc
))
4753 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, 2, 1);
4755 case 0x1d: /* ldstuba -- XXX: should be atomically */
4756 #ifndef TARGET_SPARC64
4759 if (!supervisor(dc
))
4763 gen_ldstub_asi(dc
, cpu_val
, cpu_addr
, insn
);
4765 case 0x1f: /* swapa, swap reg with alt. memory. Also
4767 CHECK_IU_FEATURE(dc
, SWAP
);
4768 #ifndef TARGET_SPARC64
4771 if (!supervisor(dc
))
4775 cpu_src1
= gen_load_gpr(dc
, rd
);
4776 gen_swap_asi(dc
, cpu_val
, cpu_src1
, cpu_addr
, insn
);
4779 #ifndef TARGET_SPARC64
4780 case 0x30: /* ldc */
4781 case 0x31: /* ldcsr */
4782 case 0x33: /* lddc */
4786 #ifdef TARGET_SPARC64
4787 case 0x08: /* V9 ldsw */
4788 gen_address_mask(dc
, cpu_addr
);
4789 tcg_gen_qemu_ld32s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4791 case 0x0b: /* V9 ldx */
4792 gen_address_mask(dc
, cpu_addr
);
4793 tcg_gen_qemu_ld64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4795 case 0x18: /* V9 ldswa */
4797 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, 4, 1);
4799 case 0x1b: /* V9 ldxa */
4801 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, 8, 0);
4803 case 0x2d: /* V9 prefetch, no effect */
4805 case 0x30: /* V9 ldfa */
4806 if (gen_trap_ifnofpu(dc
)) {
4810 gen_ldf_asi(dc
, cpu_addr
, insn
, 4, rd
);
4811 gen_update_fprs_dirty(rd
);
4813 case 0x33: /* V9 lddfa */
4814 if (gen_trap_ifnofpu(dc
)) {
4818 gen_ldf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
4819 gen_update_fprs_dirty(DFPREG(rd
));
4821 case 0x3d: /* V9 prefetcha, no effect */
4823 case 0x32: /* V9 ldqfa */
4824 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4825 if (gen_trap_ifnofpu(dc
)) {
4829 gen_ldf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
4830 gen_update_fprs_dirty(QFPREG(rd
));
4836 gen_store_gpr(dc
, rd
, cpu_val
);
4837 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4840 } else if (xop
>= 0x20 && xop
< 0x24) {
4843 if (gen_trap_ifnofpu(dc
)) {
4848 case 0x20: /* ldf, load fpreg */
4849 gen_address_mask(dc
, cpu_addr
);
4850 t0
= get_temp_tl(dc
);
4851 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4852 cpu_dst_32
= gen_dest_fpr_F(dc
);
4853 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4854 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4856 case 0x21: /* ldfsr, V9 ldxfsr */
4857 #ifdef TARGET_SPARC64
4858 gen_address_mask(dc
, cpu_addr
);
4860 TCGv_i64 t64
= tcg_temp_new_i64();
4861 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4862 gen_helper_ldxfsr(cpu_env
, t64
);
4863 tcg_temp_free_i64(t64
);
4867 cpu_dst_32
= get_temp_i32(dc
);
4868 t0
= get_temp_tl(dc
);
4869 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4870 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4871 gen_helper_ldfsr(cpu_env
, cpu_dst_32
);
4873 case 0x22: /* ldqf, load quad fpreg */
4877 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4878 r_const
= tcg_const_i32(dc
->mem_idx
);
4879 gen_address_mask(dc
, cpu_addr
);
4880 gen_helper_ldqf(cpu_env
, cpu_addr
, r_const
);
4881 tcg_temp_free_i32(r_const
);
4882 gen_op_store_QT0_fpr(QFPREG(rd
));
4883 gen_update_fprs_dirty(QFPREG(rd
));
4886 case 0x23: /* lddf, load double fpreg */
4887 gen_address_mask(dc
, cpu_addr
);
4888 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4889 tcg_gen_qemu_ld64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
);
4890 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4895 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
4896 xop
== 0xe || xop
== 0x1e) {
4897 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
4900 case 0x4: /* st, store word */
4901 gen_address_mask(dc
, cpu_addr
);
4902 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
4904 case 0x5: /* stb, store byte */
4905 gen_address_mask(dc
, cpu_addr
);
4906 tcg_gen_qemu_st8(cpu_val
, cpu_addr
, dc
->mem_idx
);
4908 case 0x6: /* sth, store halfword */
4909 gen_address_mask(dc
, cpu_addr
);
4910 tcg_gen_qemu_st16(cpu_val
, cpu_addr
, dc
->mem_idx
);
4912 case 0x7: /* std, store double word */
4921 gen_address_mask(dc
, cpu_addr
);
4922 r_const
= tcg_const_i32(7);
4923 /* XXX remove alignment check */
4924 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4925 tcg_temp_free_i32(r_const
);
4926 lo
= gen_load_gpr(dc
, rd
+ 1);
4928 t64
= tcg_temp_new_i64();
4929 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
4930 tcg_gen_qemu_st64(t64
, cpu_addr
, dc
->mem_idx
);
4931 tcg_temp_free_i64(t64
);
4934 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4935 case 0x14: /* sta, V9 stwa, store word alternate */
4936 #ifndef TARGET_SPARC64
4939 if (!supervisor(dc
))
4943 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, 4);
4944 dc
->npc
= DYNAMIC_PC
;
4946 case 0x15: /* stba, store byte alternate */
4947 #ifndef TARGET_SPARC64
4950 if (!supervisor(dc
))
4954 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, 1);
4955 dc
->npc
= DYNAMIC_PC
;
4957 case 0x16: /* stha, store halfword alternate */
4958 #ifndef TARGET_SPARC64
4961 if (!supervisor(dc
))
4965 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, 2);
4966 dc
->npc
= DYNAMIC_PC
;
4968 case 0x17: /* stda, store double word alternate */
4969 #ifndef TARGET_SPARC64
4972 if (!supervisor(dc
))
4979 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4983 #ifdef TARGET_SPARC64
4984 case 0x0e: /* V9 stx */
4985 gen_address_mask(dc
, cpu_addr
);
4986 tcg_gen_qemu_st64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4988 case 0x1e: /* V9 stxa */
4990 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, 8);
4991 dc
->npc
= DYNAMIC_PC
;
4997 } else if (xop
> 0x23 && xop
< 0x28) {
4998 if (gen_trap_ifnofpu(dc
)) {
5003 case 0x24: /* stf, store fpreg */
5005 TCGv t
= get_temp_tl(dc
);
5006 gen_address_mask(dc
, cpu_addr
);
5007 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5008 tcg_gen_ext_i32_tl(t
, cpu_src1_32
);
5009 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5012 case 0x25: /* stfsr, V9 stxfsr */
5014 TCGv t
= get_temp_tl(dc
);
5016 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUSPARCState
, fsr
));
5017 #ifdef TARGET_SPARC64
5018 gen_address_mask(dc
, cpu_addr
);
5020 tcg_gen_qemu_st64(t
, cpu_addr
, dc
->mem_idx
);
5024 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5028 #ifdef TARGET_SPARC64
5029 /* V9 stqf, store quad fpreg */
5033 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5034 gen_op_load_fpr_QT0(QFPREG(rd
));
5035 r_const
= tcg_const_i32(dc
->mem_idx
);
5036 gen_address_mask(dc
, cpu_addr
);
5037 gen_helper_stqf(cpu_env
, cpu_addr
, r_const
);
5038 tcg_temp_free_i32(r_const
);
5041 #else /* !TARGET_SPARC64 */
5042 /* stdfq, store floating point queue */
5043 #if defined(CONFIG_USER_ONLY)
5046 if (!supervisor(dc
))
5048 if (gen_trap_ifnofpu(dc
)) {
5054 case 0x27: /* stdf, store double fpreg */
5055 gen_address_mask(dc
, cpu_addr
);
5056 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5057 tcg_gen_qemu_st64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
);
5062 } else if (xop
> 0x33 && xop
< 0x3f) {
5065 #ifdef TARGET_SPARC64
5066 case 0x34: /* V9 stfa */
5067 if (gen_trap_ifnofpu(dc
)) {
5070 gen_stf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5072 case 0x36: /* V9 stqfa */
5076 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5077 if (gen_trap_ifnofpu(dc
)) {
5080 r_const
= tcg_const_i32(7);
5081 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
5082 tcg_temp_free_i32(r_const
);
5083 gen_stf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5086 case 0x37: /* V9 stdfa */
5087 if (gen_trap_ifnofpu(dc
)) {
5090 gen_stf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5092 case 0x3e: /* V9 casxa */
5093 rs2
= GET_FIELD(insn
, 27, 31);
5094 cpu_src2
= gen_load_gpr(dc
, rs2
);
5095 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5098 case 0x34: /* stc */
5099 case 0x35: /* stcsr */
5100 case 0x36: /* stdcq */
5101 case 0x37: /* stdc */
5104 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5105 case 0x3c: /* V9 or LEON3 casa */
5106 #ifndef TARGET_SPARC64
5107 CHECK_IU_FEATURE(dc
, CASA
);
5111 /* LEON3 allows CASA from user space with ASI 0xa */
5112 if ((GET_FIELD(insn
, 19, 26) != 0xa) && !supervisor(dc
)) {
5116 rs2
= GET_FIELD(insn
, 27, 31);
5117 cpu_src2
= gen_load_gpr(dc
, rs2
);
5118 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5130 /* default case for non jump instructions */
5131 if (dc
->npc
== DYNAMIC_PC
) {
5132 dc
->pc
= DYNAMIC_PC
;
5134 } else if (dc
->npc
== JUMP_PC
) {
5135 /* we can do a static jump */
5136 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5140 dc
->npc
= dc
->npc
+ 4;
5145 gen_exception(dc
, TT_ILL_INSN
);
5148 gen_exception(dc
, TT_UNIMP_FLUSH
);
5150 #if !defined(CONFIG_USER_ONLY)
5152 gen_exception(dc
, TT_PRIV_INSN
);
5156 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
5158 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5160 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
5163 #ifndef TARGET_SPARC64
5165 gen_exception(dc
, TT_NCP_INSN
);
5169 if (dc
->n_t32
!= 0) {
5171 for (i
= dc
->n_t32
- 1; i
>= 0; --i
) {
5172 tcg_temp_free_i32(dc
->t32
[i
]);
5176 if (dc
->n_ttl
!= 0) {
5178 for (i
= dc
->n_ttl
- 1; i
>= 0; --i
) {
5179 tcg_temp_free(dc
->ttl
[i
]);
5185 void gen_intermediate_code(CPUSPARCState
* env
, TranslationBlock
* tb
)
5187 SPARCCPU
*cpu
= sparc_env_get_cpu(env
);
5188 CPUState
*cs
= CPU(cpu
);
5189 target_ulong pc_start
, last_pc
;
5190 DisasContext dc1
, *dc
= &dc1
;
5195 memset(dc
, 0, sizeof(DisasContext
));
5200 dc
->npc
= (target_ulong
) tb
->cs_base
;
5201 dc
->cc_op
= CC_OP_DYNAMIC
;
5202 dc
->mem_idx
= tb
->flags
& TB_FLAG_MMU_MASK
;
5204 dc
->fpu_enabled
= tb_fpu_enabled(tb
->flags
);
5205 dc
->address_mask_32bit
= tb_am_enabled(tb
->flags
);
5206 dc
->singlestep
= (cs
->singlestep_enabled
|| singlestep
);
5207 #ifdef TARGET_SPARC64
5208 dc
->asi
= (tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5212 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5213 if (max_insns
== 0) {
5214 max_insns
= CF_COUNT_MASK
;
5216 if (max_insns
> TCG_MAX_INSNS
) {
5217 max_insns
= TCG_MAX_INSNS
;
5222 if (dc
->npc
& JUMP_PC
) {
5223 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5224 tcg_gen_insn_start(dc
->pc
, dc
->jump_pc
[0] | JUMP_PC
);
5226 tcg_gen_insn_start(dc
->pc
, dc
->npc
);
5231 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
5232 if (dc
->pc
!= pc_start
) {
5235 gen_helper_debug(cpu_env
);
5241 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5245 insn
= cpu_ldl_code(env
, dc
->pc
);
5247 disas_sparc_insn(dc
, insn
);
5251 /* if the next PC is different, we abort now */
5252 if (dc
->pc
!= (last_pc
+ 4))
5254 /* if we reach a page boundary, we stop generation so that the
5255 PC of a TT_TFAULT exception is always in the right page */
5256 if ((dc
->pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
5258 /* if single step mode, we generate only one instruction and
5259 generate an exception */
5260 if (dc
->singlestep
) {
5263 } while (!tcg_op_buf_full() &&
5264 (dc
->pc
- pc_start
) < (TARGET_PAGE_SIZE
- 32) &&
5265 num_insns
< max_insns
);
5268 if (tb
->cflags
& CF_LAST_IO
) {
5272 if (dc
->pc
!= DYNAMIC_PC
&&
5273 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
5274 /* static PC and NPC: we can use direct chaining */
5275 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5277 if (dc
->pc
!= DYNAMIC_PC
) {
5278 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5284 gen_tb_end(tb
, num_insns
);
5286 tb
->size
= last_pc
+ 4 - pc_start
;
5287 tb
->icount
= num_insns
;
5290 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
5291 && qemu_log_in_addr_range(pc_start
)) {
5292 qemu_log("--------------\n");
5293 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5294 log_target_disas(cs
, pc_start
, last_pc
+ 4 - pc_start
, 0);
5300 void gen_intermediate_code_init(CPUSPARCState
*env
)
5303 static const char gregnames
[32][4] = {
5304 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5305 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5306 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5307 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5309 static const char fregnames
[32][4] = {
5310 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5311 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5312 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5313 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5316 static const struct { TCGv_i32
*ptr
; int off
; const char *name
; } r32
[] = {
5317 #ifdef TARGET_SPARC64
5318 { &cpu_xcc
, offsetof(CPUSPARCState
, xcc
), "xcc" },
5319 { &cpu_fprs
, offsetof(CPUSPARCState
, fprs
), "fprs" },
5321 { &cpu_wim
, offsetof(CPUSPARCState
, wim
), "wim" },
5323 { &cpu_cc_op
, offsetof(CPUSPARCState
, cc_op
), "cc_op" },
5324 { &cpu_psr
, offsetof(CPUSPARCState
, psr
), "psr" },
5327 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5328 #ifdef TARGET_SPARC64
5329 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5330 { &cpu_tick_cmpr
, offsetof(CPUSPARCState
, tick_cmpr
), "tick_cmpr" },
5331 { &cpu_stick_cmpr
, offsetof(CPUSPARCState
, stick_cmpr
), "stick_cmpr" },
5332 { &cpu_hstick_cmpr
, offsetof(CPUSPARCState
, hstick_cmpr
),
5334 { &cpu_hintp
, offsetof(CPUSPARCState
, hintp
), "hintp" },
5335 { &cpu_htba
, offsetof(CPUSPARCState
, htba
), "htba" },
5336 { &cpu_hver
, offsetof(CPUSPARCState
, hver
), "hver" },
5337 { &cpu_ssr
, offsetof(CPUSPARCState
, ssr
), "ssr" },
5338 { &cpu_ver
, offsetof(CPUSPARCState
, version
), "ver" },
5340 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5341 { &cpu_cc_src
, offsetof(CPUSPARCState
, cc_src
), "cc_src" },
5342 { &cpu_cc_src2
, offsetof(CPUSPARCState
, cc_src2
), "cc_src2" },
5343 { &cpu_cc_dst
, offsetof(CPUSPARCState
, cc_dst
), "cc_dst" },
5344 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5345 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5346 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5347 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5348 #ifndef CONFIG_USER_ONLY
5349 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5355 /* init various static tables */
5361 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
5362 tcg_ctx
.tcg_env
= cpu_env
;
5364 cpu_regwptr
= tcg_global_mem_new_ptr(cpu_env
,
5365 offsetof(CPUSPARCState
, regwptr
),
5368 for (i
= 0; i
< ARRAY_SIZE(r32
); ++i
) {
5369 *r32
[i
].ptr
= tcg_global_mem_new_i32(cpu_env
, r32
[i
].off
, r32
[i
].name
);
5372 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5373 *rtl
[i
].ptr
= tcg_global_mem_new(cpu_env
, rtl
[i
].off
, rtl
[i
].name
);
5376 TCGV_UNUSED(cpu_regs
[0]);
5377 for (i
= 1; i
< 8; ++i
) {
5378 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
5379 offsetof(CPUSPARCState
, gregs
[i
]),
5383 for (i
= 8; i
< 32; ++i
) {
5384 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5385 (i
- 8) * sizeof(target_ulong
),
5389 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5390 cpu_fpr
[i
] = tcg_global_mem_new_i64(cpu_env
,
5391 offsetof(CPUSPARCState
, fpr
[i
]),
5396 void restore_state_to_opc(CPUSPARCState
*env
, TranslationBlock
*tb
,
5399 target_ulong pc
= data
[0];
5400 target_ulong npc
= data
[1];
5403 if (npc
== DYNAMIC_PC
) {
5404 /* dynamic NPC: already stored */
5405 } else if (npc
& JUMP_PC
) {
5406 /* jump PC: use 'cond' and the jump targets of the translation */
5408 env
->npc
= npc
& ~3;