4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "exec/translator.h"
39 #define DYNAMIC_PC 1 /* dynamic pc value */
40 #define JUMP_PC 2 /* dynamic pc value which takes only two values
41 according to jump_pc[T2] */
43 #define DISAS_EXIT DISAS_TARGET_0
45 /* global register indexes */
46 static TCGv_ptr cpu_regwptr
;
47 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
48 static TCGv_i32 cpu_cc_op
;
49 static TCGv_i32 cpu_psr
;
50 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
51 static TCGv cpu_regs
[32];
53 #ifndef CONFIG_USER_ONLY
58 static TCGv_i32 cpu_xcc
, cpu_fprs
;
60 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
61 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
65 /* Floating point registers */
66 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
68 #include "exec/gen-icount.h"
70 typedef struct DisasContext
{
71 DisasContextBase base
;
72 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
73 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
74 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
77 bool address_mask_32bit
;
78 #ifndef CONFIG_USER_ONLY
85 uint32_t cc_op
; /* current CC operation */
104 // This function uses non-native bit order
105 #define GET_FIELD(X, FROM, TO) \
106 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
108 // This function uses the order in the manuals, i.e. bit 0 is 2^0
109 #define GET_FIELD_SP(X, FROM, TO) \
110 GET_FIELD(X, 31 - (TO), 31 - (FROM))
112 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
113 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
115 #ifdef TARGET_SPARC64
116 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
117 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
119 #define DFPREG(r) (r & 0x1e)
120 #define QFPREG(r) (r & 0x1c)
123 #define UA2005_HTRAP_MASK 0xff
124 #define V8_TRAP_MASK 0x7f
126 static int sign_extend(int x
, int len
)
129 return (x
<< len
) >> len
;
132 #define IS_IMM (insn & (1<<13))
134 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
137 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
138 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
142 static inline TCGv
get_temp_tl(DisasContext
*dc
)
145 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
146 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
150 static inline void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
152 #if defined(TARGET_SPARC64)
153 int bit
= (rd
< 32) ? 1 : 2;
154 /* If we know we've already set this bit within the TB,
155 we can avoid setting it again. */
156 if (!(dc
->fprs_dirty
& bit
)) {
157 dc
->fprs_dirty
|= bit
;
158 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
163 /* floating point registers moves */
164 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
166 TCGv_i32 ret
= get_temp_i32(dc
);
168 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
170 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
175 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
177 TCGv_i64 t
= tcg_temp_new_i64();
179 tcg_gen_extu_i32_i64(t
, v
);
180 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
181 (dst
& 1 ? 0 : 32), 32);
182 tcg_temp_free_i64(t
);
183 gen_update_fprs_dirty(dc
, dst
);
186 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
188 return get_temp_i32(dc
);
191 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
194 return cpu_fpr
[src
/ 2];
197 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
200 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
201 gen_update_fprs_dirty(dc
, dst
);
204 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
206 return cpu_fpr
[DFPREG(dst
) / 2];
209 static void gen_op_load_fpr_QT0(unsigned int src
)
211 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
212 offsetof(CPU_QuadU
, ll
.upper
));
213 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
214 offsetof(CPU_QuadU
, ll
.lower
));
217 static void gen_op_load_fpr_QT1(unsigned int src
)
219 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
220 offsetof(CPU_QuadU
, ll
.upper
));
221 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
222 offsetof(CPU_QuadU
, ll
.lower
));
225 static void gen_op_store_QT0_fpr(unsigned int dst
)
227 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
228 offsetof(CPU_QuadU
, ll
.upper
));
229 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
230 offsetof(CPU_QuadU
, ll
.lower
));
233 static void gen_store_fpr_Q(DisasContext
*dc
, unsigned int dst
,
234 TCGv_i64 v1
, TCGv_i64 v2
)
238 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v1
);
239 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2 + 1], v2
);
240 gen_update_fprs_dirty(dc
, dst
);
243 #ifdef TARGET_SPARC64
244 static TCGv_i64
gen_load_fpr_Q0(DisasContext
*dc
, unsigned int src
)
247 return cpu_fpr
[src
/ 2];
250 static TCGv_i64
gen_load_fpr_Q1(DisasContext
*dc
, unsigned int src
)
253 return cpu_fpr
[src
/ 2 + 1];
256 static void gen_move_Q(DisasContext
*dc
, unsigned int rd
, unsigned int rs
)
261 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
262 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
263 gen_update_fprs_dirty(dc
, rd
);
268 #ifdef CONFIG_USER_ONLY
269 #define supervisor(dc) 0
270 #ifdef TARGET_SPARC64
271 #define hypervisor(dc) 0
274 #ifdef TARGET_SPARC64
275 #define hypervisor(dc) (dc->hypervisor)
276 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
278 #define supervisor(dc) (dc->supervisor)
282 #ifdef TARGET_SPARC64
284 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
286 #define AM_CHECK(dc) (1)
290 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
292 #ifdef TARGET_SPARC64
294 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
298 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
302 return cpu_regs
[reg
];
304 TCGv t
= get_temp_tl(dc
);
305 tcg_gen_movi_tl(t
, 0);
310 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
314 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
318 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
322 return cpu_regs
[reg
];
324 return get_temp_tl(dc
);
328 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
330 return translator_use_goto_tb(&s
->base
, pc
) &&
331 translator_use_goto_tb(&s
->base
, npc
);
334 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
335 target_ulong pc
, target_ulong npc
)
337 if (use_goto_tb(s
, pc
, npc
)) {
338 /* jump to same page: we can use a direct jump */
339 tcg_gen_goto_tb(tb_num
);
340 tcg_gen_movi_tl(cpu_pc
, pc
);
341 tcg_gen_movi_tl(cpu_npc
, npc
);
342 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
344 /* jump to another page: currently not optimized */
345 tcg_gen_movi_tl(cpu_pc
, pc
);
346 tcg_gen_movi_tl(cpu_npc
, npc
);
347 tcg_gen_exit_tb(NULL
, 0);
352 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
354 tcg_gen_extu_i32_tl(reg
, src
);
355 tcg_gen_extract_tl(reg
, reg
, PSR_NEG_SHIFT
, 1);
358 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
360 tcg_gen_extu_i32_tl(reg
, src
);
361 tcg_gen_extract_tl(reg
, reg
, PSR_ZERO_SHIFT
, 1);
364 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
366 tcg_gen_extu_i32_tl(reg
, src
);
367 tcg_gen_extract_tl(reg
, reg
, PSR_OVF_SHIFT
, 1);
370 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
372 tcg_gen_extu_i32_tl(reg
, src
);
373 tcg_gen_extract_tl(reg
, reg
, PSR_CARRY_SHIFT
, 1);
376 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
378 tcg_gen_mov_tl(cpu_cc_src
, src1
);
379 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
380 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
381 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
384 static TCGv_i32
gen_add32_carry32(void)
386 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
388 /* Carry is computed from a previous add: (dst < src) */
389 #if TARGET_LONG_BITS == 64
390 cc_src1_32
= tcg_temp_new_i32();
391 cc_src2_32
= tcg_temp_new_i32();
392 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
393 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
395 cc_src1_32
= cpu_cc_dst
;
396 cc_src2_32
= cpu_cc_src
;
399 carry_32
= tcg_temp_new_i32();
400 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
402 #if TARGET_LONG_BITS == 64
403 tcg_temp_free_i32(cc_src1_32
);
404 tcg_temp_free_i32(cc_src2_32
);
410 static TCGv_i32
gen_sub32_carry32(void)
412 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
414 /* Carry is computed from a previous borrow: (src1 < src2) */
415 #if TARGET_LONG_BITS == 64
416 cc_src1_32
= tcg_temp_new_i32();
417 cc_src2_32
= tcg_temp_new_i32();
418 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
419 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
421 cc_src1_32
= cpu_cc_src
;
422 cc_src2_32
= cpu_cc_src2
;
425 carry_32
= tcg_temp_new_i32();
426 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
428 #if TARGET_LONG_BITS == 64
429 tcg_temp_free_i32(cc_src1_32
);
430 tcg_temp_free_i32(cc_src2_32
);
436 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
437 TCGv src2
, int update_cc
)
445 /* Carry is known to be zero. Fall back to plain ADD. */
447 gen_op_add_cc(dst
, src1
, src2
);
449 tcg_gen_add_tl(dst
, src1
, src2
);
456 if (TARGET_LONG_BITS
== 32) {
457 /* We can re-use the host's hardware carry generation by using
458 an ADD2 opcode. We discard the low part of the output.
459 Ideally we'd combine this operation with the add that
460 generated the carry in the first place. */
461 carry
= tcg_temp_new();
462 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
463 tcg_temp_free(carry
);
466 carry_32
= gen_add32_carry32();
472 carry_32
= gen_sub32_carry32();
476 /* We need external help to produce the carry. */
477 carry_32
= tcg_temp_new_i32();
478 gen_helper_compute_C_icc(carry_32
, cpu_env
);
482 #if TARGET_LONG_BITS == 64
483 carry
= tcg_temp_new();
484 tcg_gen_extu_i32_i64(carry
, carry_32
);
489 tcg_gen_add_tl(dst
, src1
, src2
);
490 tcg_gen_add_tl(dst
, dst
, carry
);
492 tcg_temp_free_i32(carry_32
);
493 #if TARGET_LONG_BITS == 64
494 tcg_temp_free(carry
);
499 tcg_gen_mov_tl(cpu_cc_src
, src1
);
500 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
501 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
502 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
503 dc
->cc_op
= CC_OP_ADDX
;
507 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
509 tcg_gen_mov_tl(cpu_cc_src
, src1
);
510 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
511 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
512 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
515 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
516 TCGv src2
, int update_cc
)
524 /* Carry is known to be zero. Fall back to plain SUB. */
526 gen_op_sub_cc(dst
, src1
, src2
);
528 tcg_gen_sub_tl(dst
, src1
, src2
);
535 carry_32
= gen_add32_carry32();
541 if (TARGET_LONG_BITS
== 32) {
542 /* We can re-use the host's hardware carry generation by using
543 a SUB2 opcode. We discard the low part of the output.
544 Ideally we'd combine this operation with the add that
545 generated the carry in the first place. */
546 carry
= tcg_temp_new();
547 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
548 tcg_temp_free(carry
);
551 carry_32
= gen_sub32_carry32();
555 /* We need external help to produce the carry. */
556 carry_32
= tcg_temp_new_i32();
557 gen_helper_compute_C_icc(carry_32
, cpu_env
);
561 #if TARGET_LONG_BITS == 64
562 carry
= tcg_temp_new();
563 tcg_gen_extu_i32_i64(carry
, carry_32
);
568 tcg_gen_sub_tl(dst
, src1
, src2
);
569 tcg_gen_sub_tl(dst
, dst
, carry
);
571 tcg_temp_free_i32(carry_32
);
572 #if TARGET_LONG_BITS == 64
573 tcg_temp_free(carry
);
578 tcg_gen_mov_tl(cpu_cc_src
, src1
);
579 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
580 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
581 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
582 dc
->cc_op
= CC_OP_SUBX
;
586 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
588 TCGv r_temp
, zero
, t0
;
590 r_temp
= tcg_temp_new();
597 zero
= tcg_const_tl(0);
598 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
599 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
600 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
601 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
606 // env->y = (b2 << 31) | (env->y >> 1);
607 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
608 tcg_gen_deposit_tl(cpu_y
, t0
, cpu_cc_src
, 31, 1);
611 gen_mov_reg_N(t0
, cpu_psr
);
612 gen_mov_reg_V(r_temp
, cpu_psr
);
613 tcg_gen_xor_tl(t0
, t0
, r_temp
);
614 tcg_temp_free(r_temp
);
616 // T0 = (b1 << 31) | (T0 >> 1);
618 tcg_gen_shli_tl(t0
, t0
, 31);
619 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
620 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
623 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
625 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
628 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
630 #if TARGET_LONG_BITS == 32
632 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
634 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
637 TCGv t0
= tcg_temp_new_i64();
638 TCGv t1
= tcg_temp_new_i64();
641 tcg_gen_ext32s_i64(t0
, src1
);
642 tcg_gen_ext32s_i64(t1
, src2
);
644 tcg_gen_ext32u_i64(t0
, src1
);
645 tcg_gen_ext32u_i64(t1
, src2
);
648 tcg_gen_mul_i64(dst
, t0
, t1
);
652 tcg_gen_shri_i64(cpu_y
, dst
, 32);
656 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
658 /* zero-extend truncated operands before multiplication */
659 gen_op_multiply(dst
, src1
, src2
, 0);
662 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
664 /* sign-extend truncated operands before multiplication */
665 gen_op_multiply(dst
, src1
, src2
, 1);
669 static inline void gen_op_eval_ba(TCGv dst
)
671 tcg_gen_movi_tl(dst
, 1);
675 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
677 gen_mov_reg_Z(dst
, src
);
681 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
683 TCGv t0
= tcg_temp_new();
684 gen_mov_reg_N(t0
, src
);
685 gen_mov_reg_V(dst
, src
);
686 tcg_gen_xor_tl(dst
, dst
, t0
);
687 gen_mov_reg_Z(t0
, src
);
688 tcg_gen_or_tl(dst
, dst
, t0
);
693 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
695 TCGv t0
= tcg_temp_new();
696 gen_mov_reg_V(t0
, src
);
697 gen_mov_reg_N(dst
, src
);
698 tcg_gen_xor_tl(dst
, dst
, t0
);
703 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
705 TCGv t0
= tcg_temp_new();
706 gen_mov_reg_Z(t0
, src
);
707 gen_mov_reg_C(dst
, src
);
708 tcg_gen_or_tl(dst
, dst
, t0
);
713 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
715 gen_mov_reg_C(dst
, src
);
719 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
721 gen_mov_reg_V(dst
, src
);
725 static inline void gen_op_eval_bn(TCGv dst
)
727 tcg_gen_movi_tl(dst
, 0);
731 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
733 gen_mov_reg_N(dst
, src
);
737 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
739 gen_mov_reg_Z(dst
, src
);
740 tcg_gen_xori_tl(dst
, dst
, 0x1);
744 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
746 gen_op_eval_ble(dst
, src
);
747 tcg_gen_xori_tl(dst
, dst
, 0x1);
751 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
753 gen_op_eval_bl(dst
, src
);
754 tcg_gen_xori_tl(dst
, dst
, 0x1);
758 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
760 gen_op_eval_bleu(dst
, src
);
761 tcg_gen_xori_tl(dst
, dst
, 0x1);
765 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
767 gen_mov_reg_C(dst
, src
);
768 tcg_gen_xori_tl(dst
, dst
, 0x1);
772 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
774 gen_mov_reg_N(dst
, src
);
775 tcg_gen_xori_tl(dst
, dst
, 0x1);
779 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
781 gen_mov_reg_V(dst
, src
);
782 tcg_gen_xori_tl(dst
, dst
, 0x1);
786 FPSR bit field FCC1 | FCC0:
792 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
793 unsigned int fcc_offset
)
795 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
796 tcg_gen_andi_tl(reg
, reg
, 0x1);
799 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
800 unsigned int fcc_offset
)
802 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
803 tcg_gen_andi_tl(reg
, reg
, 0x1);
807 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
808 unsigned int fcc_offset
)
810 TCGv t0
= tcg_temp_new();
811 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
812 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
813 tcg_gen_or_tl(dst
, dst
, t0
);
817 // 1 or 2: FCC0 ^ FCC1
818 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
819 unsigned int fcc_offset
)
821 TCGv t0
= tcg_temp_new();
822 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
823 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
824 tcg_gen_xor_tl(dst
, dst
, t0
);
829 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
830 unsigned int fcc_offset
)
832 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
836 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
837 unsigned int fcc_offset
)
839 TCGv t0
= tcg_temp_new();
840 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
841 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
842 tcg_gen_andc_tl(dst
, dst
, t0
);
847 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
848 unsigned int fcc_offset
)
850 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
854 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
855 unsigned int fcc_offset
)
857 TCGv t0
= tcg_temp_new();
858 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
859 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
860 tcg_gen_andc_tl(dst
, t0
, dst
);
865 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
866 unsigned int fcc_offset
)
868 TCGv t0
= tcg_temp_new();
869 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
870 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
871 tcg_gen_and_tl(dst
, dst
, t0
);
876 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
877 unsigned int fcc_offset
)
879 TCGv t0
= tcg_temp_new();
880 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
881 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
882 tcg_gen_or_tl(dst
, dst
, t0
);
883 tcg_gen_xori_tl(dst
, dst
, 0x1);
887 // 0 or 3: !(FCC0 ^ FCC1)
888 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
889 unsigned int fcc_offset
)
891 TCGv t0
= tcg_temp_new();
892 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
893 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
894 tcg_gen_xor_tl(dst
, dst
, t0
);
895 tcg_gen_xori_tl(dst
, dst
, 0x1);
900 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
901 unsigned int fcc_offset
)
903 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
904 tcg_gen_xori_tl(dst
, dst
, 0x1);
907 // !1: !(FCC0 & !FCC1)
908 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
909 unsigned int fcc_offset
)
911 TCGv t0
= tcg_temp_new();
912 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
913 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
914 tcg_gen_andc_tl(dst
, dst
, t0
);
915 tcg_gen_xori_tl(dst
, dst
, 0x1);
920 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
921 unsigned int fcc_offset
)
923 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
924 tcg_gen_xori_tl(dst
, dst
, 0x1);
927 // !2: !(!FCC0 & FCC1)
928 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
929 unsigned int fcc_offset
)
931 TCGv t0
= tcg_temp_new();
932 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
933 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
934 tcg_gen_andc_tl(dst
, t0
, dst
);
935 tcg_gen_xori_tl(dst
, dst
, 0x1);
939 // !3: !(FCC0 & FCC1)
940 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
941 unsigned int fcc_offset
)
943 TCGv t0
= tcg_temp_new();
944 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
945 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
946 tcg_gen_and_tl(dst
, dst
, t0
);
947 tcg_gen_xori_tl(dst
, dst
, 0x1);
951 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
952 target_ulong pc2
, TCGv r_cond
)
954 TCGLabel
*l1
= gen_new_label();
956 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
958 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
961 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
964 static void gen_branch_a(DisasContext
*dc
, target_ulong pc1
)
966 TCGLabel
*l1
= gen_new_label();
967 target_ulong npc
= dc
->npc
;
969 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cond
, 0, l1
);
971 gen_goto_tb(dc
, 0, npc
, pc1
);
974 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
976 dc
->base
.is_jmp
= DISAS_NORETURN
;
979 static void gen_branch_n(DisasContext
*dc
, target_ulong pc1
)
981 target_ulong npc
= dc
->npc
;
983 if (likely(npc
!= DYNAMIC_PC
)) {
985 dc
->jump_pc
[0] = pc1
;
986 dc
->jump_pc
[1] = npc
+ 4;
991 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
993 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
994 t
= tcg_const_tl(pc1
);
996 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, z
, t
, cpu_npc
);
1000 dc
->pc
= DYNAMIC_PC
;
1004 static inline void gen_generic_branch(DisasContext
*dc
)
1006 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
1007 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
1008 TCGv zero
= tcg_const_tl(0);
1010 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1012 tcg_temp_free(npc0
);
1013 tcg_temp_free(npc1
);
1014 tcg_temp_free(zero
);
1017 /* call this function before using the condition register as it may
1018 have been set for a jump */
1019 static inline void flush_cond(DisasContext
*dc
)
1021 if (dc
->npc
== JUMP_PC
) {
1022 gen_generic_branch(dc
);
1023 dc
->npc
= DYNAMIC_PC
;
1027 static inline void save_npc(DisasContext
*dc
)
1029 if (dc
->npc
== JUMP_PC
) {
1030 gen_generic_branch(dc
);
1031 dc
->npc
= DYNAMIC_PC
;
1032 } else if (dc
->npc
!= DYNAMIC_PC
) {
1033 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1037 static inline void update_psr(DisasContext
*dc
)
1039 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1040 dc
->cc_op
= CC_OP_FLAGS
;
1041 gen_helper_compute_psr(cpu_env
);
1045 static inline void save_state(DisasContext
*dc
)
1047 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1051 static void gen_exception(DisasContext
*dc
, int which
)
1056 t
= tcg_const_i32(which
);
1057 gen_helper_raise_exception(cpu_env
, t
);
1058 tcg_temp_free_i32(t
);
1059 dc
->base
.is_jmp
= DISAS_NORETURN
;
1062 static void gen_check_align(TCGv addr
, int mask
)
1064 TCGv_i32 r_mask
= tcg_const_i32(mask
);
1065 gen_helper_check_align(cpu_env
, addr
, r_mask
);
1066 tcg_temp_free_i32(r_mask
);
1069 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1071 if (dc
->npc
== JUMP_PC
) {
1072 gen_generic_branch(dc
);
1073 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1074 dc
->pc
= DYNAMIC_PC
;
1075 } else if (dc
->npc
== DYNAMIC_PC
) {
1076 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1077 dc
->pc
= DYNAMIC_PC
;
1083 static inline void gen_op_next_insn(void)
1085 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1086 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1089 static void free_compare(DisasCompare
*cmp
)
1092 tcg_temp_free(cmp
->c1
);
1095 tcg_temp_free(cmp
->c2
);
1099 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1102 static int subcc_cond
[16] = {
1118 -1, /* no overflow */
1121 static int logic_cond
[16] = {
1123 TCG_COND_EQ
, /* eq: Z */
1124 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1125 TCG_COND_LT
, /* lt: N ^ V -> N */
1126 TCG_COND_EQ
, /* leu: C | Z -> Z */
1127 TCG_COND_NEVER
, /* ltu: C -> 0 */
1128 TCG_COND_LT
, /* neg: N */
1129 TCG_COND_NEVER
, /* vs: V -> 0 */
1131 TCG_COND_NE
, /* ne: !Z */
1132 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1133 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1134 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1135 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1136 TCG_COND_GE
, /* pos: !N */
1137 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1143 #ifdef TARGET_SPARC64
1153 switch (dc
->cc_op
) {
1155 cmp
->cond
= logic_cond
[cond
];
1157 cmp
->is_bool
= false;
1159 cmp
->c2
= tcg_const_tl(0);
1160 #ifdef TARGET_SPARC64
1163 cmp
->c1
= tcg_temp_new();
1164 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1169 cmp
->c1
= cpu_cc_dst
;
1176 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1177 goto do_compare_dst_0
;
1179 case 7: /* overflow */
1180 case 15: /* !overflow */
1184 cmp
->cond
= subcc_cond
[cond
];
1185 cmp
->is_bool
= false;
1186 #ifdef TARGET_SPARC64
1188 /* Note that sign-extension works for unsigned compares as
1189 long as both operands are sign-extended. */
1190 cmp
->g1
= cmp
->g2
= false;
1191 cmp
->c1
= tcg_temp_new();
1192 cmp
->c2
= tcg_temp_new();
1193 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1194 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1198 cmp
->g1
= cmp
->g2
= true;
1199 cmp
->c1
= cpu_cc_src
;
1200 cmp
->c2
= cpu_cc_src2
;
1207 gen_helper_compute_psr(cpu_env
);
1208 dc
->cc_op
= CC_OP_FLAGS
;
1212 /* We're going to generate a boolean result. */
1213 cmp
->cond
= TCG_COND_NE
;
1214 cmp
->is_bool
= true;
1215 cmp
->g1
= cmp
->g2
= false;
1216 cmp
->c1
= r_dst
= tcg_temp_new();
1217 cmp
->c2
= tcg_const_tl(0);
1221 gen_op_eval_bn(r_dst
);
1224 gen_op_eval_be(r_dst
, r_src
);
1227 gen_op_eval_ble(r_dst
, r_src
);
1230 gen_op_eval_bl(r_dst
, r_src
);
1233 gen_op_eval_bleu(r_dst
, r_src
);
1236 gen_op_eval_bcs(r_dst
, r_src
);
1239 gen_op_eval_bneg(r_dst
, r_src
);
1242 gen_op_eval_bvs(r_dst
, r_src
);
1245 gen_op_eval_ba(r_dst
);
1248 gen_op_eval_bne(r_dst
, r_src
);
1251 gen_op_eval_bg(r_dst
, r_src
);
1254 gen_op_eval_bge(r_dst
, r_src
);
1257 gen_op_eval_bgu(r_dst
, r_src
);
1260 gen_op_eval_bcc(r_dst
, r_src
);
1263 gen_op_eval_bpos(r_dst
, r_src
);
1266 gen_op_eval_bvc(r_dst
, r_src
);
1273 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1275 unsigned int offset
;
1278 /* For now we still generate a straight boolean result. */
1279 cmp
->cond
= TCG_COND_NE
;
1280 cmp
->is_bool
= true;
1281 cmp
->g1
= cmp
->g2
= false;
1282 cmp
->c1
= r_dst
= tcg_temp_new();
1283 cmp
->c2
= tcg_const_tl(0);
1303 gen_op_eval_bn(r_dst
);
1306 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1309 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1312 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1315 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1318 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1321 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1324 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1327 gen_op_eval_ba(r_dst
);
1330 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1333 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1336 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1339 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1342 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1345 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1348 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1353 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1357 gen_compare(&cmp
, cc
, cond
, dc
);
1359 /* The interface is to return a boolean in r_dst. */
1361 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1363 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1369 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1372 gen_fcompare(&cmp
, cc
, cond
);
1374 /* The interface is to return a boolean in r_dst. */
1376 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1378 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1384 #ifdef TARGET_SPARC64
1386 static const int gen_tcg_cond_reg
[8] = {
1397 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1399 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1400 cmp
->is_bool
= false;
1404 cmp
->c2
= tcg_const_tl(0);
1407 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1410 gen_compare_reg(&cmp
, cond
, r_src
);
1412 /* The interface is to return a boolean in r_dst. */
1413 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1419 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1421 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1422 target_ulong target
= dc
->pc
+ offset
;
1424 #ifdef TARGET_SPARC64
1425 if (unlikely(AM_CHECK(dc
))) {
1426 target
&= 0xffffffffULL
;
1430 /* unconditional not taken */
1432 dc
->pc
= dc
->npc
+ 4;
1433 dc
->npc
= dc
->pc
+ 4;
1436 dc
->npc
= dc
->pc
+ 4;
1438 } else if (cond
== 0x8) {
1439 /* unconditional taken */
1442 dc
->npc
= dc
->pc
+ 4;
1446 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1450 gen_cond(cpu_cond
, cc
, cond
, dc
);
1452 gen_branch_a(dc
, target
);
1454 gen_branch_n(dc
, target
);
1459 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1461 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1462 target_ulong target
= dc
->pc
+ offset
;
1464 #ifdef TARGET_SPARC64
1465 if (unlikely(AM_CHECK(dc
))) {
1466 target
&= 0xffffffffULL
;
1470 /* unconditional not taken */
1472 dc
->pc
= dc
->npc
+ 4;
1473 dc
->npc
= dc
->pc
+ 4;
1476 dc
->npc
= dc
->pc
+ 4;
1478 } else if (cond
== 0x8) {
1479 /* unconditional taken */
1482 dc
->npc
= dc
->pc
+ 4;
1486 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1490 gen_fcond(cpu_cond
, cc
, cond
);
1492 gen_branch_a(dc
, target
);
1494 gen_branch_n(dc
, target
);
1499 #ifdef TARGET_SPARC64
1500 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1503 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1504 target_ulong target
= dc
->pc
+ offset
;
1506 if (unlikely(AM_CHECK(dc
))) {
1507 target
&= 0xffffffffULL
;
1510 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1512 gen_branch_a(dc
, target
);
1514 gen_branch_n(dc
, target
);
1518 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1522 gen_helper_fcmps(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1525 gen_helper_fcmps_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1528 gen_helper_fcmps_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1531 gen_helper_fcmps_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1536 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1540 gen_helper_fcmpd(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1543 gen_helper_fcmpd_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1546 gen_helper_fcmpd_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1549 gen_helper_fcmpd_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1554 static inline void gen_op_fcmpq(int fccno
)
1558 gen_helper_fcmpq(cpu_fsr
, cpu_env
);
1561 gen_helper_fcmpq_fcc1(cpu_fsr
, cpu_env
);
1564 gen_helper_fcmpq_fcc2(cpu_fsr
, cpu_env
);
1567 gen_helper_fcmpq_fcc3(cpu_fsr
, cpu_env
);
1572 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1576 gen_helper_fcmpes(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1579 gen_helper_fcmpes_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1582 gen_helper_fcmpes_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1585 gen_helper_fcmpes_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1590 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1594 gen_helper_fcmped(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1597 gen_helper_fcmped_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1600 gen_helper_fcmped_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1603 gen_helper_fcmped_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1608 static inline void gen_op_fcmpeq(int fccno
)
1612 gen_helper_fcmpeq(cpu_fsr
, cpu_env
);
1615 gen_helper_fcmpeq_fcc1(cpu_fsr
, cpu_env
);
1618 gen_helper_fcmpeq_fcc2(cpu_fsr
, cpu_env
);
1621 gen_helper_fcmpeq_fcc3(cpu_fsr
, cpu_env
);
1628 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1630 gen_helper_fcmps(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1633 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1635 gen_helper_fcmpd(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1638 static inline void gen_op_fcmpq(int fccno
)
1640 gen_helper_fcmpq(cpu_fsr
, cpu_env
);
1643 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1645 gen_helper_fcmpes(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1648 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1650 gen_helper_fcmped(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1653 static inline void gen_op_fcmpeq(int fccno
)
1655 gen_helper_fcmpeq(cpu_fsr
, cpu_env
);
1659 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1661 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1662 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1663 gen_exception(dc
, TT_FP_EXCP
);
1666 static int gen_trap_ifnofpu(DisasContext
*dc
)
1668 #if !defined(CONFIG_USER_ONLY)
1669 if (!dc
->fpu_enabled
) {
1670 gen_exception(dc
, TT_NFPU_INSN
);
1677 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1679 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1682 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1683 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1687 src
= gen_load_fpr_F(dc
, rs
);
1688 dst
= gen_dest_fpr_F(dc
);
1690 gen(dst
, cpu_env
, src
);
1691 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1693 gen_store_fpr_F(dc
, rd
, dst
);
1696 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1697 void (*gen
)(TCGv_i32
, TCGv_i32
))
1701 src
= gen_load_fpr_F(dc
, rs
);
1702 dst
= gen_dest_fpr_F(dc
);
1706 gen_store_fpr_F(dc
, rd
, dst
);
1709 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1710 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1712 TCGv_i32 dst
, src1
, src2
;
1714 src1
= gen_load_fpr_F(dc
, rs1
);
1715 src2
= gen_load_fpr_F(dc
, rs2
);
1716 dst
= gen_dest_fpr_F(dc
);
1718 gen(dst
, cpu_env
, src1
, src2
);
1719 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1721 gen_store_fpr_F(dc
, rd
, dst
);
1724 #ifdef TARGET_SPARC64
1725 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1726 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1728 TCGv_i32 dst
, src1
, src2
;
1730 src1
= gen_load_fpr_F(dc
, rs1
);
1731 src2
= gen_load_fpr_F(dc
, rs2
);
1732 dst
= gen_dest_fpr_F(dc
);
1734 gen(dst
, src1
, src2
);
1736 gen_store_fpr_F(dc
, rd
, dst
);
1740 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1741 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1745 src
= gen_load_fpr_D(dc
, rs
);
1746 dst
= gen_dest_fpr_D(dc
, rd
);
1748 gen(dst
, cpu_env
, src
);
1749 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1751 gen_store_fpr_D(dc
, rd
, dst
);
1754 #ifdef TARGET_SPARC64
1755 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1756 void (*gen
)(TCGv_i64
, TCGv_i64
))
1760 src
= gen_load_fpr_D(dc
, rs
);
1761 dst
= gen_dest_fpr_D(dc
, rd
);
1765 gen_store_fpr_D(dc
, rd
, dst
);
1769 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1770 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1772 TCGv_i64 dst
, src1
, src2
;
1774 src1
= gen_load_fpr_D(dc
, rs1
);
1775 src2
= gen_load_fpr_D(dc
, rs2
);
1776 dst
= gen_dest_fpr_D(dc
, rd
);
1778 gen(dst
, cpu_env
, src1
, src2
);
1779 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1781 gen_store_fpr_D(dc
, rd
, dst
);
1784 #ifdef TARGET_SPARC64
1785 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1786 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1788 TCGv_i64 dst
, src1
, src2
;
1790 src1
= gen_load_fpr_D(dc
, rs1
);
1791 src2
= gen_load_fpr_D(dc
, rs2
);
1792 dst
= gen_dest_fpr_D(dc
, rd
);
1794 gen(dst
, src1
, src2
);
1796 gen_store_fpr_D(dc
, rd
, dst
);
1799 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1800 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1802 TCGv_i64 dst
, src1
, src2
;
1804 src1
= gen_load_fpr_D(dc
, rs1
);
1805 src2
= gen_load_fpr_D(dc
, rs2
);
1806 dst
= gen_dest_fpr_D(dc
, rd
);
1808 gen(dst
, cpu_gsr
, src1
, src2
);
1810 gen_store_fpr_D(dc
, rd
, dst
);
1813 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1814 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1816 TCGv_i64 dst
, src0
, src1
, src2
;
1818 src1
= gen_load_fpr_D(dc
, rs1
);
1819 src2
= gen_load_fpr_D(dc
, rs2
);
1820 src0
= gen_load_fpr_D(dc
, rd
);
1821 dst
= gen_dest_fpr_D(dc
, rd
);
1823 gen(dst
, src0
, src1
, src2
);
1825 gen_store_fpr_D(dc
, rd
, dst
);
1829 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1830 void (*gen
)(TCGv_ptr
))
1832 gen_op_load_fpr_QT1(QFPREG(rs
));
1835 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1837 gen_op_store_QT0_fpr(QFPREG(rd
));
1838 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1841 #ifdef TARGET_SPARC64
1842 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1843 void (*gen
)(TCGv_ptr
))
1845 gen_op_load_fpr_QT1(QFPREG(rs
));
1849 gen_op_store_QT0_fpr(QFPREG(rd
));
1850 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1854 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1855 void (*gen
)(TCGv_ptr
))
1857 gen_op_load_fpr_QT0(QFPREG(rs1
));
1858 gen_op_load_fpr_QT1(QFPREG(rs2
));
1861 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1863 gen_op_store_QT0_fpr(QFPREG(rd
));
1864 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1867 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1868 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1871 TCGv_i32 src1
, src2
;
1873 src1
= gen_load_fpr_F(dc
, rs1
);
1874 src2
= gen_load_fpr_F(dc
, rs2
);
1875 dst
= gen_dest_fpr_D(dc
, rd
);
1877 gen(dst
, cpu_env
, src1
, src2
);
1878 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1880 gen_store_fpr_D(dc
, rd
, dst
);
1883 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1884 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1886 TCGv_i64 src1
, src2
;
1888 src1
= gen_load_fpr_D(dc
, rs1
);
1889 src2
= gen_load_fpr_D(dc
, rs2
);
1891 gen(cpu_env
, src1
, src2
);
1892 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1894 gen_op_store_QT0_fpr(QFPREG(rd
));
1895 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1898 #ifdef TARGET_SPARC64
1899 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1900 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1905 src
= gen_load_fpr_F(dc
, rs
);
1906 dst
= gen_dest_fpr_D(dc
, rd
);
1908 gen(dst
, cpu_env
, src
);
1909 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1911 gen_store_fpr_D(dc
, rd
, dst
);
1915 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1916 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1921 src
= gen_load_fpr_F(dc
, rs
);
1922 dst
= gen_dest_fpr_D(dc
, rd
);
1924 gen(dst
, cpu_env
, src
);
1926 gen_store_fpr_D(dc
, rd
, dst
);
1929 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1930 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1935 src
= gen_load_fpr_D(dc
, rs
);
1936 dst
= gen_dest_fpr_F(dc
);
1938 gen(dst
, cpu_env
, src
);
1939 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1941 gen_store_fpr_F(dc
, rd
, dst
);
1944 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1945 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1949 gen_op_load_fpr_QT1(QFPREG(rs
));
1950 dst
= gen_dest_fpr_F(dc
);
1953 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1955 gen_store_fpr_F(dc
, rd
, dst
);
1958 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1959 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1963 gen_op_load_fpr_QT1(QFPREG(rs
));
1964 dst
= gen_dest_fpr_D(dc
, rd
);
1967 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1969 gen_store_fpr_D(dc
, rd
, dst
);
1972 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1973 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1977 src
= gen_load_fpr_F(dc
, rs
);
1981 gen_op_store_QT0_fpr(QFPREG(rd
));
1982 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1985 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1986 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1990 src
= gen_load_fpr_D(dc
, rs
);
1994 gen_op_store_QT0_fpr(QFPREG(rd
));
1995 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1998 static void gen_swap(DisasContext
*dc
, TCGv dst
, TCGv src
,
1999 TCGv addr
, int mmu_idx
, MemOp memop
)
2001 gen_address_mask(dc
, addr
);
2002 tcg_gen_atomic_xchg_tl(dst
, addr
, src
, mmu_idx
, memop
);
2005 static void gen_ldstub(DisasContext
*dc
, TCGv dst
, TCGv addr
, int mmu_idx
)
2007 TCGv m1
= tcg_const_tl(0xff);
2008 gen_address_mask(dc
, addr
);
2009 tcg_gen_atomic_xchg_tl(dst
, addr
, m1
, mmu_idx
, MO_UB
);
2014 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2033 static DisasASI
get_asi(DisasContext
*dc
, int insn
, MemOp memop
)
2035 int asi
= GET_FIELD(insn
, 19, 26);
2036 ASIType type
= GET_ASI_HELPER
;
2037 int mem_idx
= dc
->mem_idx
;
2039 #ifndef TARGET_SPARC64
2040 /* Before v9, all asis are immediate and privileged. */
2042 gen_exception(dc
, TT_ILL_INSN
);
2043 type
= GET_ASI_EXCP
;
2044 } else if (supervisor(dc
)
2045 /* Note that LEON accepts ASI_USERDATA in user mode, for
2046 use with CASA. Also note that previous versions of
2047 QEMU allowed (and old versions of gcc emitted) ASI_P
2048 for LEON, which is incorrect. */
2049 || (asi
== ASI_USERDATA
2050 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
2052 case ASI_USERDATA
: /* User data access */
2053 mem_idx
= MMU_USER_IDX
;
2054 type
= GET_ASI_DIRECT
;
2056 case ASI_KERNELDATA
: /* Supervisor data access */
2057 mem_idx
= MMU_KERNEL_IDX
;
2058 type
= GET_ASI_DIRECT
;
2060 case ASI_M_BYPASS
: /* MMU passthrough */
2061 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
2062 mem_idx
= MMU_PHYS_IDX
;
2063 type
= GET_ASI_DIRECT
;
2065 case ASI_M_BCOPY
: /* Block copy, sta access */
2066 mem_idx
= MMU_KERNEL_IDX
;
2067 type
= GET_ASI_BCOPY
;
2069 case ASI_M_BFILL
: /* Block fill, stda access */
2070 mem_idx
= MMU_KERNEL_IDX
;
2071 type
= GET_ASI_BFILL
;
2075 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
2076 * permissions check in get_physical_address(..).
2078 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
2080 gen_exception(dc
, TT_PRIV_INSN
);
2081 type
= GET_ASI_EXCP
;
2087 /* With v9, all asis below 0x80 are privileged. */
2088 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2089 down that bit into DisasContext. For the moment that's ok,
2090 since the direct implementations below doesn't have any ASIs
2091 in the restricted [0x30, 0x7f] range, and the check will be
2092 done properly in the helper. */
2093 if (!supervisor(dc
) && asi
< 0x80) {
2094 gen_exception(dc
, TT_PRIV_ACT
);
2095 type
= GET_ASI_EXCP
;
2098 case ASI_REAL
: /* Bypass */
2099 case ASI_REAL_IO
: /* Bypass, non-cacheable */
2100 case ASI_REAL_L
: /* Bypass LE */
2101 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
2102 case ASI_TWINX_REAL
: /* Real address, twinx */
2103 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
2104 case ASI_QUAD_LDD_PHYS
:
2105 case ASI_QUAD_LDD_PHYS_L
:
2106 mem_idx
= MMU_PHYS_IDX
;
2108 case ASI_N
: /* Nucleus */
2109 case ASI_NL
: /* Nucleus LE */
2112 case ASI_NUCLEUS_QUAD_LDD
:
2113 case ASI_NUCLEUS_QUAD_LDD_L
:
2114 if (hypervisor(dc
)) {
2115 mem_idx
= MMU_PHYS_IDX
;
2117 mem_idx
= MMU_NUCLEUS_IDX
;
2120 case ASI_AIUP
: /* As if user primary */
2121 case ASI_AIUPL
: /* As if user primary LE */
2122 case ASI_TWINX_AIUP
:
2123 case ASI_TWINX_AIUP_L
:
2124 case ASI_BLK_AIUP_4V
:
2125 case ASI_BLK_AIUP_L_4V
:
2128 mem_idx
= MMU_USER_IDX
;
2130 case ASI_AIUS
: /* As if user secondary */
2131 case ASI_AIUSL
: /* As if user secondary LE */
2132 case ASI_TWINX_AIUS
:
2133 case ASI_TWINX_AIUS_L
:
2134 case ASI_BLK_AIUS_4V
:
2135 case ASI_BLK_AIUS_L_4V
:
2138 mem_idx
= MMU_USER_SECONDARY_IDX
;
2140 case ASI_S
: /* Secondary */
2141 case ASI_SL
: /* Secondary LE */
2144 case ASI_BLK_COMMIT_S
:
2151 if (mem_idx
== MMU_USER_IDX
) {
2152 mem_idx
= MMU_USER_SECONDARY_IDX
;
2153 } else if (mem_idx
== MMU_KERNEL_IDX
) {
2154 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
2157 case ASI_P
: /* Primary */
2158 case ASI_PL
: /* Primary LE */
2161 case ASI_BLK_COMMIT_P
:
2185 type
= GET_ASI_DIRECT
;
2187 case ASI_TWINX_REAL
:
2188 case ASI_TWINX_REAL_L
:
2191 case ASI_TWINX_AIUP
:
2192 case ASI_TWINX_AIUP_L
:
2193 case ASI_TWINX_AIUS
:
2194 case ASI_TWINX_AIUS_L
:
2199 case ASI_QUAD_LDD_PHYS
:
2200 case ASI_QUAD_LDD_PHYS_L
:
2201 case ASI_NUCLEUS_QUAD_LDD
:
2202 case ASI_NUCLEUS_QUAD_LDD_L
:
2203 type
= GET_ASI_DTWINX
;
2205 case ASI_BLK_COMMIT_P
:
2206 case ASI_BLK_COMMIT_S
:
2207 case ASI_BLK_AIUP_4V
:
2208 case ASI_BLK_AIUP_L_4V
:
2211 case ASI_BLK_AIUS_4V
:
2212 case ASI_BLK_AIUS_L_4V
:
2219 type
= GET_ASI_BLOCK
;
2226 type
= GET_ASI_SHORT
;
2233 type
= GET_ASI_SHORT
;
2236 /* The little-endian asis all have bit 3 set. */
2243 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
2246 static void gen_ld_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
,
2247 int insn
, MemOp memop
)
2249 DisasASI da
= get_asi(dc
, insn
, memop
);
2254 case GET_ASI_DTWINX
: /* Reserved for ldda. */
2255 gen_exception(dc
, TT_ILL_INSN
);
2257 case GET_ASI_DIRECT
:
2258 gen_address_mask(dc
, addr
);
2259 tcg_gen_qemu_ld_tl(dst
, addr
, da
.mem_idx
, da
.memop
);
2263 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2264 TCGv_i32 r_mop
= tcg_const_i32(memop
);
2267 #ifdef TARGET_SPARC64
2268 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_mop
);
2271 TCGv_i64 t64
= tcg_temp_new_i64();
2272 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2273 tcg_gen_trunc_i64_tl(dst
, t64
);
2274 tcg_temp_free_i64(t64
);
2277 tcg_temp_free_i32(r_mop
);
2278 tcg_temp_free_i32(r_asi
);
2284 static void gen_st_asi(DisasContext
*dc
, TCGv src
, TCGv addr
,
2285 int insn
, MemOp memop
)
2287 DisasASI da
= get_asi(dc
, insn
, memop
);
2292 case GET_ASI_DTWINX
: /* Reserved for stda. */
2293 #ifndef TARGET_SPARC64
2294 gen_exception(dc
, TT_ILL_INSN
);
2297 if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
2298 /* Pre OpenSPARC CPUs don't have these */
2299 gen_exception(dc
, TT_ILL_INSN
);
2302 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2303 * are ST_BLKINIT_ ASIs */
2306 case GET_ASI_DIRECT
:
2307 gen_address_mask(dc
, addr
);
2308 tcg_gen_qemu_st_tl(src
, addr
, da
.mem_idx
, da
.memop
);
2310 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2312 /* Copy 32 bytes from the address in SRC to ADDR. */
2313 /* ??? The original qemu code suggests 4-byte alignment, dropping
2314 the low bits, but the only place I can see this used is in the
2315 Linux kernel with 32 byte alignment, which would make more sense
2316 as a cacheline-style operation. */
2318 TCGv saddr
= tcg_temp_new();
2319 TCGv daddr
= tcg_temp_new();
2320 TCGv four
= tcg_const_tl(4);
2321 TCGv_i32 tmp
= tcg_temp_new_i32();
2324 tcg_gen_andi_tl(saddr
, src
, -4);
2325 tcg_gen_andi_tl(daddr
, addr
, -4);
2326 for (i
= 0; i
< 32; i
+= 4) {
2327 /* Since the loads and stores are paired, allow the
2328 copy to happen in the host endianness. */
2329 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
.mem_idx
, MO_UL
);
2330 tcg_gen_qemu_st_i32(tmp
, daddr
, da
.mem_idx
, MO_UL
);
2331 tcg_gen_add_tl(saddr
, saddr
, four
);
2332 tcg_gen_add_tl(daddr
, daddr
, four
);
2335 tcg_temp_free(saddr
);
2336 tcg_temp_free(daddr
);
2337 tcg_temp_free(four
);
2338 tcg_temp_free_i32(tmp
);
2344 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2345 TCGv_i32 r_mop
= tcg_const_i32(memop
& MO_SIZE
);
2348 #ifdef TARGET_SPARC64
2349 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_mop
);
2352 TCGv_i64 t64
= tcg_temp_new_i64();
2353 tcg_gen_extu_tl_i64(t64
, src
);
2354 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2355 tcg_temp_free_i64(t64
);
2358 tcg_temp_free_i32(r_mop
);
2359 tcg_temp_free_i32(r_asi
);
2361 /* A write to a TLB register may alter page maps. End the TB. */
2362 dc
->npc
= DYNAMIC_PC
;
2368 static void gen_swap_asi(DisasContext
*dc
, TCGv dst
, TCGv src
,
2369 TCGv addr
, int insn
)
2371 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2376 case GET_ASI_DIRECT
:
2377 gen_swap(dc
, dst
, src
, addr
, da
.mem_idx
, da
.memop
);
2380 /* ??? Should be DAE_invalid_asi. */
2381 gen_exception(dc
, TT_DATA_ACCESS
);
2386 static void gen_cas_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2389 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2395 case GET_ASI_DIRECT
:
2396 oldv
= tcg_temp_new();
2397 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2398 da
.mem_idx
, da
.memop
);
2399 gen_store_gpr(dc
, rd
, oldv
);
2400 tcg_temp_free(oldv
);
2403 /* ??? Should be DAE_invalid_asi. */
2404 gen_exception(dc
, TT_DATA_ACCESS
);
2409 static void gen_ldstub_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
, int insn
)
2411 DisasASI da
= get_asi(dc
, insn
, MO_UB
);
2416 case GET_ASI_DIRECT
:
2417 gen_ldstub(dc
, dst
, addr
, da
.mem_idx
);
2420 /* ??? In theory, this should be raise DAE_invalid_asi.
2421 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2422 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
2423 gen_helper_exit_atomic(cpu_env
);
2425 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2426 TCGv_i32 r_mop
= tcg_const_i32(MO_UB
);
2430 t64
= tcg_temp_new_i64();
2431 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2433 s64
= tcg_const_i64(0xff);
2434 gen_helper_st_asi(cpu_env
, addr
, s64
, r_asi
, r_mop
);
2435 tcg_temp_free_i64(s64
);
2436 tcg_temp_free_i32(r_mop
);
2437 tcg_temp_free_i32(r_asi
);
2439 tcg_gen_trunc_i64_tl(dst
, t64
);
2440 tcg_temp_free_i64(t64
);
2443 dc
->npc
= DYNAMIC_PC
;
2450 #ifdef TARGET_SPARC64
2451 static void gen_ldf_asi(DisasContext
*dc
, TCGv addr
,
2452 int insn
, int size
, int rd
)
2454 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2462 case GET_ASI_DIRECT
:
2463 gen_address_mask(dc
, addr
);
2466 d32
= gen_dest_fpr_F(dc
);
2467 tcg_gen_qemu_ld_i32(d32
, addr
, da
.mem_idx
, da
.memop
);
2468 gen_store_fpr_F(dc
, rd
, d32
);
2471 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2472 da
.memop
| MO_ALIGN_4
);
2475 d64
= tcg_temp_new_i64();
2476 tcg_gen_qemu_ld_i64(d64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_4
);
2477 tcg_gen_addi_tl(addr
, addr
, 8);
2478 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
,
2479 da
.memop
| MO_ALIGN_4
);
2480 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2481 tcg_temp_free_i64(d64
);
2484 g_assert_not_reached();
2489 /* Valid for lddfa on aligned registers only. */
2490 if (size
== 8 && (rd
& 7) == 0) {
2495 gen_address_mask(dc
, addr
);
2497 /* The first operation checks required alignment. */
2498 memop
= da
.memop
| MO_ALIGN_64
;
2499 eight
= tcg_const_tl(8);
2500 for (i
= 0; ; ++i
) {
2501 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2506 tcg_gen_add_tl(addr
, addr
, eight
);
2509 tcg_temp_free(eight
);
2511 gen_exception(dc
, TT_ILL_INSN
);
2516 /* Valid for lddfa only. */
2518 gen_address_mask(dc
, addr
);
2519 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
, da
.memop
);
2521 gen_exception(dc
, TT_ILL_INSN
);
2527 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2528 TCGv_i32 r_mop
= tcg_const_i32(da
.memop
);
2531 /* According to the table in the UA2011 manual, the only
2532 other asis that are valid for ldfa/lddfa/ldqfa are
2533 the NO_FAULT asis. We still need a helper for these,
2534 but we can just use the integer asi helper for them. */
2537 d64
= tcg_temp_new_i64();
2538 gen_helper_ld_asi(d64
, cpu_env
, addr
, r_asi
, r_mop
);
2539 d32
= gen_dest_fpr_F(dc
);
2540 tcg_gen_extrl_i64_i32(d32
, d64
);
2541 tcg_temp_free_i64(d64
);
2542 gen_store_fpr_F(dc
, rd
, d32
);
2545 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], cpu_env
, addr
, r_asi
, r_mop
);
2548 d64
= tcg_temp_new_i64();
2549 gen_helper_ld_asi(d64
, cpu_env
, addr
, r_asi
, r_mop
);
2550 tcg_gen_addi_tl(addr
, addr
, 8);
2551 gen_helper_ld_asi(cpu_fpr
[rd
/2+1], cpu_env
, addr
, r_asi
, r_mop
);
2552 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2553 tcg_temp_free_i64(d64
);
2556 g_assert_not_reached();
2558 tcg_temp_free_i32(r_mop
);
2559 tcg_temp_free_i32(r_asi
);
2565 static void gen_stf_asi(DisasContext
*dc
, TCGv addr
,
2566 int insn
, int size
, int rd
)
2568 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2575 case GET_ASI_DIRECT
:
2576 gen_address_mask(dc
, addr
);
2579 d32
= gen_load_fpr_F(dc
, rd
);
2580 tcg_gen_qemu_st_i32(d32
, addr
, da
.mem_idx
, da
.memop
);
2583 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2584 da
.memop
| MO_ALIGN_4
);
2587 /* Only 4-byte alignment required. However, it is legal for the
2588 cpu to signal the alignment fault, and the OS trap handler is
2589 required to fix it up. Requiring 16-byte alignment here avoids
2590 having to probe the second page before performing the first
2592 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2593 da
.memop
| MO_ALIGN_16
);
2594 tcg_gen_addi_tl(addr
, addr
, 8);
2595 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
, da
.memop
);
2598 g_assert_not_reached();
2603 /* Valid for stdfa on aligned registers only. */
2604 if (size
== 8 && (rd
& 7) == 0) {
2609 gen_address_mask(dc
, addr
);
2611 /* The first operation checks required alignment. */
2612 memop
= da
.memop
| MO_ALIGN_64
;
2613 eight
= tcg_const_tl(8);
2614 for (i
= 0; ; ++i
) {
2615 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2620 tcg_gen_add_tl(addr
, addr
, eight
);
2623 tcg_temp_free(eight
);
2625 gen_exception(dc
, TT_ILL_INSN
);
2630 /* Valid for stdfa only. */
2632 gen_address_mask(dc
, addr
);
2633 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
, da
.memop
);
2635 gen_exception(dc
, TT_ILL_INSN
);
2640 /* According to the table in the UA2011 manual, the only
2641 other asis that are valid for ldfa/lddfa/ldqfa are
2642 the PST* asis, which aren't currently handled. */
2643 gen_exception(dc
, TT_ILL_INSN
);
2648 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2650 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2651 TCGv_i64 hi
= gen_dest_gpr(dc
, rd
);
2652 TCGv_i64 lo
= gen_dest_gpr(dc
, rd
+ 1);
2658 case GET_ASI_DTWINX
:
2659 gen_address_mask(dc
, addr
);
2660 tcg_gen_qemu_ld_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2661 tcg_gen_addi_tl(addr
, addr
, 8);
2662 tcg_gen_qemu_ld_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2665 case GET_ASI_DIRECT
:
2667 TCGv_i64 tmp
= tcg_temp_new_i64();
2669 gen_address_mask(dc
, addr
);
2670 tcg_gen_qemu_ld_i64(tmp
, addr
, da
.mem_idx
, da
.memop
);
2672 /* Note that LE ldda acts as if each 32-bit register
2673 result is byte swapped. Having just performed one
2674 64-bit bswap, we need now to swap the writebacks. */
2675 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2676 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2678 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2680 tcg_temp_free_i64(tmp
);
2685 /* ??? In theory we've handled all of the ASIs that are valid
2686 for ldda, and this should raise DAE_invalid_asi. However,
2687 real hardware allows others. This can be seen with e.g.
2688 FreeBSD 10.3 wrt ASI_IC_TAG. */
2690 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2691 TCGv_i32 r_mop
= tcg_const_i32(da
.memop
);
2692 TCGv_i64 tmp
= tcg_temp_new_i64();
2695 gen_helper_ld_asi(tmp
, cpu_env
, addr
, r_asi
, r_mop
);
2696 tcg_temp_free_i32(r_asi
);
2697 tcg_temp_free_i32(r_mop
);
2700 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2701 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2703 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2705 tcg_temp_free_i64(tmp
);
2710 gen_store_gpr(dc
, rd
, hi
);
2711 gen_store_gpr(dc
, rd
+ 1, lo
);
2714 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2717 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2718 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2724 case GET_ASI_DTWINX
:
2725 gen_address_mask(dc
, addr
);
2726 tcg_gen_qemu_st_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2727 tcg_gen_addi_tl(addr
, addr
, 8);
2728 tcg_gen_qemu_st_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2731 case GET_ASI_DIRECT
:
2733 TCGv_i64 t64
= tcg_temp_new_i64();
2735 /* Note that LE stda acts as if each 32-bit register result is
2736 byte swapped. We will perform one 64-bit LE store, so now
2737 we must swap the order of the construction. */
2738 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2739 tcg_gen_concat32_i64(t64
, lo
, hi
);
2741 tcg_gen_concat32_i64(t64
, hi
, lo
);
2743 gen_address_mask(dc
, addr
);
2744 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
);
2745 tcg_temp_free_i64(t64
);
2750 /* ??? In theory we've handled all of the ASIs that are valid
2751 for stda, and this should raise DAE_invalid_asi. */
2753 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2754 TCGv_i32 r_mop
= tcg_const_i32(da
.memop
);
2755 TCGv_i64 t64
= tcg_temp_new_i64();
2758 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2759 tcg_gen_concat32_i64(t64
, lo
, hi
);
2761 tcg_gen_concat32_i64(t64
, hi
, lo
);
2765 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2766 tcg_temp_free_i32(r_mop
);
2767 tcg_temp_free_i32(r_asi
);
2768 tcg_temp_free_i64(t64
);
2774 static void gen_casx_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2777 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2783 case GET_ASI_DIRECT
:
2784 oldv
= tcg_temp_new();
2785 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2786 da
.mem_idx
, da
.memop
);
2787 gen_store_gpr(dc
, rd
, oldv
);
2788 tcg_temp_free(oldv
);
2791 /* ??? Should be DAE_invalid_asi. */
2792 gen_exception(dc
, TT_DATA_ACCESS
);
2797 #elif !defined(CONFIG_USER_ONLY)
2798 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2800 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2801 whereby "rd + 1" elicits "error: array subscript is above array".
2802 Since we have already asserted that rd is even, the semantics
2804 TCGv lo
= gen_dest_gpr(dc
, rd
| 1);
2805 TCGv hi
= gen_dest_gpr(dc
, rd
);
2806 TCGv_i64 t64
= tcg_temp_new_i64();
2807 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2811 tcg_temp_free_i64(t64
);
2813 case GET_ASI_DIRECT
:
2814 gen_address_mask(dc
, addr
);
2815 tcg_gen_qemu_ld_i64(t64
, addr
, da
.mem_idx
, da
.memop
);
2819 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2820 TCGv_i32 r_mop
= tcg_const_i32(MO_UQ
);
2823 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2824 tcg_temp_free_i32(r_mop
);
2825 tcg_temp_free_i32(r_asi
);
2830 tcg_gen_extr_i64_i32(lo
, hi
, t64
);
2831 tcg_temp_free_i64(t64
);
2832 gen_store_gpr(dc
, rd
| 1, lo
);
2833 gen_store_gpr(dc
, rd
, hi
);
2836 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2839 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2840 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2841 TCGv_i64 t64
= tcg_temp_new_i64();
2843 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2848 case GET_ASI_DIRECT
:
2849 gen_address_mask(dc
, addr
);
2850 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
);
2853 /* Store 32 bytes of T64 to ADDR. */
2854 /* ??? The original qemu code suggests 8-byte alignment, dropping
2855 the low bits, but the only place I can see this used is in the
2856 Linux kernel with 32 byte alignment, which would make more sense
2857 as a cacheline-style operation. */
2859 TCGv d_addr
= tcg_temp_new();
2860 TCGv eight
= tcg_const_tl(8);
2863 tcg_gen_andi_tl(d_addr
, addr
, -8);
2864 for (i
= 0; i
< 32; i
+= 8) {
2865 tcg_gen_qemu_st_i64(t64
, d_addr
, da
.mem_idx
, da
.memop
);
2866 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2869 tcg_temp_free(d_addr
);
2870 tcg_temp_free(eight
);
2875 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2876 TCGv_i32 r_mop
= tcg_const_i32(MO_UQ
);
2879 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2880 tcg_temp_free_i32(r_mop
);
2881 tcg_temp_free_i32(r_asi
);
2886 tcg_temp_free_i64(t64
);
2890 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2892 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2893 return gen_load_gpr(dc
, rs1
);
2896 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2898 if (IS_IMM
) { /* immediate */
2899 target_long simm
= GET_FIELDs(insn
, 19, 31);
2900 TCGv t
= get_temp_tl(dc
);
2901 tcg_gen_movi_tl(t
, simm
);
2903 } else { /* register */
2904 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2905 return gen_load_gpr(dc
, rs2
);
2909 #ifdef TARGET_SPARC64
2910 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2912 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2914 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2915 or fold the comparison down to 32 bits and use movcond_i32. Choose
2917 c32
= tcg_temp_new_i32();
2919 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2921 TCGv_i64 c64
= tcg_temp_new_i64();
2922 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2923 tcg_gen_extrl_i64_i32(c32
, c64
);
2924 tcg_temp_free_i64(c64
);
2927 s1
= gen_load_fpr_F(dc
, rs
);
2928 s2
= gen_load_fpr_F(dc
, rd
);
2929 dst
= gen_dest_fpr_F(dc
);
2930 zero
= tcg_const_i32(0);
2932 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2934 tcg_temp_free_i32(c32
);
2935 tcg_temp_free_i32(zero
);
2936 gen_store_fpr_F(dc
, rd
, dst
);
2939 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2941 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2942 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2943 gen_load_fpr_D(dc
, rs
),
2944 gen_load_fpr_D(dc
, rd
));
2945 gen_store_fpr_D(dc
, rd
, dst
);
2948 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2950 int qd
= QFPREG(rd
);
2951 int qs
= QFPREG(rs
);
2953 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2954 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2955 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2956 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2958 gen_update_fprs_dirty(dc
, qd
);
2961 #ifndef CONFIG_USER_ONLY
2962 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_env cpu_env
)
2964 TCGv_i32 r_tl
= tcg_temp_new_i32();
2966 /* load env->tl into r_tl */
2967 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2969 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2970 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2972 /* calculate offset to current trap state from env->ts, reuse r_tl */
2973 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2974 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2976 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2978 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2979 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2980 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2981 tcg_temp_free_ptr(r_tl_tmp
);
2984 tcg_temp_free_i32(r_tl
);
2988 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2989 int width
, bool cc
, bool left
)
2991 TCGv lo1
, lo2
, t1
, t2
;
2992 uint64_t amask
, tabl
, tabr
;
2993 int shift
, imask
, omask
;
2996 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2997 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2998 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2999 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3000 dc
->cc_op
= CC_OP_SUB
;
3003 /* Theory of operation: there are two tables, left and right (not to
3004 be confused with the left and right versions of the opcode). These
3005 are indexed by the low 3 bits of the inputs. To make things "easy",
3006 these tables are loaded into two constants, TABL and TABR below.
3007 The operation index = (input & imask) << shift calculates the index
3008 into the constant, while val = (table >> index) & omask calculates
3009 the value we're looking for. */
3016 tabl
= 0x80c0e0f0f8fcfeffULL
;
3017 tabr
= 0xff7f3f1f0f070301ULL
;
3019 tabl
= 0x0103070f1f3f7fffULL
;
3020 tabr
= 0xfffefcf8f0e0c080ULL
;
3040 tabl
= (2 << 2) | 3;
3041 tabr
= (3 << 2) | 1;
3043 tabl
= (1 << 2) | 3;
3044 tabr
= (3 << 2) | 2;
3051 lo1
= tcg_temp_new();
3052 lo2
= tcg_temp_new();
3053 tcg_gen_andi_tl(lo1
, s1
, imask
);
3054 tcg_gen_andi_tl(lo2
, s2
, imask
);
3055 tcg_gen_shli_tl(lo1
, lo1
, shift
);
3056 tcg_gen_shli_tl(lo2
, lo2
, shift
);
3058 t1
= tcg_const_tl(tabl
);
3059 t2
= tcg_const_tl(tabr
);
3060 tcg_gen_shr_tl(lo1
, t1
, lo1
);
3061 tcg_gen_shr_tl(lo2
, t2
, lo2
);
3062 tcg_gen_andi_tl(dst
, lo1
, omask
);
3063 tcg_gen_andi_tl(lo2
, lo2
, omask
);
3067 amask
&= 0xffffffffULL
;
3069 tcg_gen_andi_tl(s1
, s1
, amask
);
3070 tcg_gen_andi_tl(s2
, s2
, amask
);
3072 /* We want to compute
3073 dst = (s1 == s2 ? lo1 : lo1 & lo2).
3074 We've already done dst = lo1, so this reduces to
3075 dst &= (s1 == s2 ? -1 : lo2)
3080 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
3081 tcg_gen_neg_tl(t1
, t1
);
3082 tcg_gen_or_tl(lo2
, lo2
, t1
);
3083 tcg_gen_and_tl(dst
, dst
, lo2
);
3091 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
3093 TCGv tmp
= tcg_temp_new();
3095 tcg_gen_add_tl(tmp
, s1
, s2
);
3096 tcg_gen_andi_tl(dst
, tmp
, -8);
3098 tcg_gen_neg_tl(tmp
, tmp
);
3100 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
3105 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
3109 t1
= tcg_temp_new();
3110 t2
= tcg_temp_new();
3111 shift
= tcg_temp_new();
3113 tcg_gen_andi_tl(shift
, gsr
, 7);
3114 tcg_gen_shli_tl(shift
, shift
, 3);
3115 tcg_gen_shl_tl(t1
, s1
, shift
);
3117 /* A shift of 64 does not produce 0 in TCG. Divide this into a
3118 shift of (up to 63) followed by a constant shift of 1. */
3119 tcg_gen_xori_tl(shift
, shift
, 63);
3120 tcg_gen_shr_tl(t2
, s2
, shift
);
3121 tcg_gen_shri_tl(t2
, t2
, 1);
3123 tcg_gen_or_tl(dst
, t1
, t2
);
3127 tcg_temp_free(shift
);
3131 #define CHECK_IU_FEATURE(dc, FEATURE) \
3132 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3134 #define CHECK_FPU_FEATURE(dc, FEATURE) \
3135 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3138 /* before an instruction, dc->pc must be static */
3139 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
3141 unsigned int opc
, rs1
, rs2
, rd
;
3142 TCGv cpu_src1
, cpu_src2
;
3143 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
3144 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
3147 opc
= GET_FIELD(insn
, 0, 1);
3148 rd
= GET_FIELD(insn
, 2, 6);
3151 case 0: /* branches/sethi */
3153 unsigned int xop
= GET_FIELD(insn
, 7, 9);
3156 #ifdef TARGET_SPARC64
3157 case 0x1: /* V9 BPcc */
3161 target
= GET_FIELD_SP(insn
, 0, 18);
3162 target
= sign_extend(target
, 19);
3164 cc
= GET_FIELD_SP(insn
, 20, 21);
3166 do_branch(dc
, target
, insn
, 0);
3168 do_branch(dc
, target
, insn
, 1);
3173 case 0x3: /* V9 BPr */
3175 target
= GET_FIELD_SP(insn
, 0, 13) |
3176 (GET_FIELD_SP(insn
, 20, 21) << 14);
3177 target
= sign_extend(target
, 16);
3179 cpu_src1
= get_src1(dc
, insn
);
3180 do_branch_reg(dc
, target
, insn
, cpu_src1
);
3183 case 0x5: /* V9 FBPcc */
3185 int cc
= GET_FIELD_SP(insn
, 20, 21);
3186 if (gen_trap_ifnofpu(dc
)) {
3189 target
= GET_FIELD_SP(insn
, 0, 18);
3190 target
= sign_extend(target
, 19);
3192 do_fbranch(dc
, target
, insn
, cc
);
3196 case 0x7: /* CBN+x */
3201 case 0x2: /* BN+x */
3203 target
= GET_FIELD(insn
, 10, 31);
3204 target
= sign_extend(target
, 22);
3206 do_branch(dc
, target
, insn
, 0);
3209 case 0x6: /* FBN+x */
3211 if (gen_trap_ifnofpu(dc
)) {
3214 target
= GET_FIELD(insn
, 10, 31);
3215 target
= sign_extend(target
, 22);
3217 do_fbranch(dc
, target
, insn
, 0);
3220 case 0x4: /* SETHI */
3221 /* Special-case %g0 because that's the canonical nop. */
3223 uint32_t value
= GET_FIELD(insn
, 10, 31);
3224 TCGv t
= gen_dest_gpr(dc
, rd
);
3225 tcg_gen_movi_tl(t
, value
<< 10);
3226 gen_store_gpr(dc
, rd
, t
);
3229 case 0x0: /* UNIMPL */
3238 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
3239 TCGv o7
= gen_dest_gpr(dc
, 15);
3241 tcg_gen_movi_tl(o7
, dc
->pc
);
3242 gen_store_gpr(dc
, 15, o7
);
3245 #ifdef TARGET_SPARC64
3246 if (unlikely(AM_CHECK(dc
))) {
3247 target
&= 0xffffffffULL
;
3253 case 2: /* FPU & Logical Operations */
3255 unsigned int xop
= GET_FIELD(insn
, 7, 12);
3256 TCGv cpu_dst
= get_temp_tl(dc
);
3259 if (xop
== 0x3a) { /* generate trap */
3260 int cond
= GET_FIELD(insn
, 3, 6);
3262 TCGLabel
*l1
= NULL
;
3273 /* Conditional trap. */
3275 #ifdef TARGET_SPARC64
3277 int cc
= GET_FIELD_SP(insn
, 11, 12);
3279 gen_compare(&cmp
, 0, cond
, dc
);
3280 } else if (cc
== 2) {
3281 gen_compare(&cmp
, 1, cond
, dc
);
3286 gen_compare(&cmp
, 0, cond
, dc
);
3288 l1
= gen_new_label();
3289 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
3290 cmp
.c1
, cmp
.c2
, l1
);
3294 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
3295 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
3297 /* Don't use the normal temporaries, as they may well have
3298 gone out of scope with the branch above. While we're
3299 doing that we might as well pre-truncate to 32-bit. */
3300 trap
= tcg_temp_new_i32();
3302 rs1
= GET_FIELD_SP(insn
, 14, 18);
3304 rs2
= GET_FIELD_SP(insn
, 0, 7);
3306 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
3307 /* Signal that the trap value is fully constant. */
3310 TCGv t1
= gen_load_gpr(dc
, rs1
);
3311 tcg_gen_trunc_tl_i32(trap
, t1
);
3312 tcg_gen_addi_i32(trap
, trap
, rs2
);
3316 rs2
= GET_FIELD_SP(insn
, 0, 4);
3317 t1
= gen_load_gpr(dc
, rs1
);
3318 t2
= gen_load_gpr(dc
, rs2
);
3319 tcg_gen_add_tl(t1
, t1
, t2
);
3320 tcg_gen_trunc_tl_i32(trap
, t1
);
3323 tcg_gen_andi_i32(trap
, trap
, mask
);
3324 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
3327 gen_helper_raise_exception(cpu_env
, trap
);
3328 tcg_temp_free_i32(trap
);
3331 /* An unconditional trap ends the TB. */
3332 dc
->base
.is_jmp
= DISAS_NORETURN
;
3335 /* A conditional trap falls through to the next insn. */
3339 } else if (xop
== 0x28) {
3340 rs1
= GET_FIELD(insn
, 13, 17);
3343 #ifndef TARGET_SPARC64
3344 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3345 manual, rdy on the microSPARC
3347 case 0x0f: /* stbar in the SPARCv8 manual,
3348 rdy on the microSPARC II */
3349 case 0x10 ... 0x1f: /* implementation-dependent in the
3350 SPARCv8 manual, rdy on the
3353 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
3354 TCGv t
= gen_dest_gpr(dc
, rd
);
3355 /* Read Asr17 for a Leon3 monoprocessor */
3356 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
3357 gen_store_gpr(dc
, rd
, t
);
3361 gen_store_gpr(dc
, rd
, cpu_y
);
3363 #ifdef TARGET_SPARC64
3364 case 0x2: /* V9 rdccr */
3366 gen_helper_rdccr(cpu_dst
, cpu_env
);
3367 gen_store_gpr(dc
, rd
, cpu_dst
);
3369 case 0x3: /* V9 rdasi */
3370 tcg_gen_movi_tl(cpu_dst
, dc
->asi
);
3371 gen_store_gpr(dc
, rd
, cpu_dst
);
3373 case 0x4: /* V9 rdtick */
3378 r_tickptr
= tcg_temp_new_ptr();
3379 r_const
= tcg_const_i32(dc
->mem_idx
);
3380 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3381 offsetof(CPUSPARCState
, tick
));
3382 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3385 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
3387 tcg_temp_free_ptr(r_tickptr
);
3388 tcg_temp_free_i32(r_const
);
3389 gen_store_gpr(dc
, rd
, cpu_dst
);
3390 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3391 /* I/O operations in icount mode must end the TB */
3392 dc
->base
.is_jmp
= DISAS_EXIT
;
3396 case 0x5: /* V9 rdpc */
3398 TCGv t
= gen_dest_gpr(dc
, rd
);
3399 if (unlikely(AM_CHECK(dc
))) {
3400 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
3402 tcg_gen_movi_tl(t
, dc
->pc
);
3404 gen_store_gpr(dc
, rd
, t
);
3407 case 0x6: /* V9 rdfprs */
3408 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
3409 gen_store_gpr(dc
, rd
, cpu_dst
);
3411 case 0xf: /* V9 membar */
3412 break; /* no effect */
3413 case 0x13: /* Graphics Status */
3414 if (gen_trap_ifnofpu(dc
)) {
3417 gen_store_gpr(dc
, rd
, cpu_gsr
);
3419 case 0x16: /* Softint */
3420 tcg_gen_ld32s_tl(cpu_dst
, cpu_env
,
3421 offsetof(CPUSPARCState
, softint
));
3422 gen_store_gpr(dc
, rd
, cpu_dst
);
3424 case 0x17: /* Tick compare */
3425 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
3427 case 0x18: /* System tick */
3432 r_tickptr
= tcg_temp_new_ptr();
3433 r_const
= tcg_const_i32(dc
->mem_idx
);
3434 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3435 offsetof(CPUSPARCState
, stick
));
3436 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3439 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
3441 tcg_temp_free_ptr(r_tickptr
);
3442 tcg_temp_free_i32(r_const
);
3443 gen_store_gpr(dc
, rd
, cpu_dst
);
3444 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3445 /* I/O operations in icount mode must end the TB */
3446 dc
->base
.is_jmp
= DISAS_EXIT
;
3450 case 0x19: /* System tick compare */
3451 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
3453 case 0x1a: /* UltraSPARC-T1 Strand status */
3454 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3455 * this ASR as impl. dep
3457 CHECK_IU_FEATURE(dc
, HYPV
);
3459 TCGv t
= gen_dest_gpr(dc
, rd
);
3460 tcg_gen_movi_tl(t
, 1UL);
3461 gen_store_gpr(dc
, rd
, t
);
3464 case 0x10: /* Performance Control */
3465 case 0x11: /* Performance Instrumentation Counter */
3466 case 0x12: /* Dispatch Control */
3467 case 0x14: /* Softint set, WO */
3468 case 0x15: /* Softint clear, WO */
3473 #if !defined(CONFIG_USER_ONLY)
3474 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
3475 #ifndef TARGET_SPARC64
3476 if (!supervisor(dc
)) {
3480 gen_helper_rdpsr(cpu_dst
, cpu_env
);
3482 CHECK_IU_FEATURE(dc
, HYPV
);
3483 if (!hypervisor(dc
))
3485 rs1
= GET_FIELD(insn
, 13, 17);
3488 tcg_gen_ld_i64(cpu_dst
, cpu_env
,
3489 offsetof(CPUSPARCState
, hpstate
));
3492 // gen_op_rdhtstate();
3495 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
3498 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
3501 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
3503 case 31: // hstick_cmpr
3504 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
3510 gen_store_gpr(dc
, rd
, cpu_dst
);
3512 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
3513 if (!supervisor(dc
)) {
3516 cpu_tmp0
= get_temp_tl(dc
);
3517 #ifdef TARGET_SPARC64
3518 rs1
= GET_FIELD(insn
, 13, 17);
3524 r_tsptr
= tcg_temp_new_ptr();
3525 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3526 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3527 offsetof(trap_state
, tpc
));
3528 tcg_temp_free_ptr(r_tsptr
);
3535 r_tsptr
= tcg_temp_new_ptr();
3536 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3537 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3538 offsetof(trap_state
, tnpc
));
3539 tcg_temp_free_ptr(r_tsptr
);
3546 r_tsptr
= tcg_temp_new_ptr();
3547 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3548 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3549 offsetof(trap_state
, tstate
));
3550 tcg_temp_free_ptr(r_tsptr
);
3555 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3557 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3558 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
3559 offsetof(trap_state
, tt
));
3560 tcg_temp_free_ptr(r_tsptr
);
3568 r_tickptr
= tcg_temp_new_ptr();
3569 r_const
= tcg_const_i32(dc
->mem_idx
);
3570 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3571 offsetof(CPUSPARCState
, tick
));
3572 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3575 gen_helper_tick_get_count(cpu_tmp0
, cpu_env
,
3576 r_tickptr
, r_const
);
3577 tcg_temp_free_ptr(r_tickptr
);
3578 tcg_temp_free_i32(r_const
);
3579 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3580 /* I/O operations in icount mode must end the TB */
3581 dc
->base
.is_jmp
= DISAS_EXIT
;
3586 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
3589 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3590 offsetof(CPUSPARCState
, pstate
));
3593 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3594 offsetof(CPUSPARCState
, tl
));
3597 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3598 offsetof(CPUSPARCState
, psrpil
));
3601 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
3604 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3605 offsetof(CPUSPARCState
, cansave
));
3607 case 11: // canrestore
3608 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3609 offsetof(CPUSPARCState
, canrestore
));
3611 case 12: // cleanwin
3612 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3613 offsetof(CPUSPARCState
, cleanwin
));
3615 case 13: // otherwin
3616 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3617 offsetof(CPUSPARCState
, otherwin
));
3620 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3621 offsetof(CPUSPARCState
, wstate
));
3623 case 16: // UA2005 gl
3624 CHECK_IU_FEATURE(dc
, GL
);
3625 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3626 offsetof(CPUSPARCState
, gl
));
3628 case 26: // UA2005 strand status
3629 CHECK_IU_FEATURE(dc
, HYPV
);
3630 if (!hypervisor(dc
))
3632 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
3635 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
3642 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
3644 gen_store_gpr(dc
, rd
, cpu_tmp0
);
3647 #if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
3648 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
3649 #ifdef TARGET_SPARC64
3650 gen_helper_flushw(cpu_env
);
3652 if (!supervisor(dc
))
3654 gen_store_gpr(dc
, rd
, cpu_tbr
);
3658 } else if (xop
== 0x34) { /* FPU Operations */
3659 if (gen_trap_ifnofpu(dc
)) {
3662 gen_op_clear_ieee_excp_and_FTT();
3663 rs1
= GET_FIELD(insn
, 13, 17);
3664 rs2
= GET_FIELD(insn
, 27, 31);
3665 xop
= GET_FIELD(insn
, 18, 26);
3668 case 0x1: /* fmovs */
3669 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
3670 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
3672 case 0x5: /* fnegs */
3673 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
3675 case 0x9: /* fabss */
3676 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
3678 case 0x29: /* fsqrts */
3679 CHECK_FPU_FEATURE(dc
, FSQRT
);
3680 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
3682 case 0x2a: /* fsqrtd */
3683 CHECK_FPU_FEATURE(dc
, FSQRT
);
3684 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
3686 case 0x2b: /* fsqrtq */
3687 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3688 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
3690 case 0x41: /* fadds */
3691 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
3693 case 0x42: /* faddd */
3694 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
3696 case 0x43: /* faddq */
3697 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3698 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
3700 case 0x45: /* fsubs */
3701 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
3703 case 0x46: /* fsubd */
3704 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3706 case 0x47: /* fsubq */
3707 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3708 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3710 case 0x49: /* fmuls */
3711 CHECK_FPU_FEATURE(dc
, FMUL
);
3712 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3714 case 0x4a: /* fmuld */
3715 CHECK_FPU_FEATURE(dc
, FMUL
);
3716 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3718 case 0x4b: /* fmulq */
3719 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3720 CHECK_FPU_FEATURE(dc
, FMUL
);
3721 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3723 case 0x4d: /* fdivs */
3724 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3726 case 0x4e: /* fdivd */
3727 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3729 case 0x4f: /* fdivq */
3730 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3731 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3733 case 0x69: /* fsmuld */
3734 CHECK_FPU_FEATURE(dc
, FSMULD
);
3735 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3737 case 0x6e: /* fdmulq */
3738 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3739 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3741 case 0xc4: /* fitos */
3742 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3744 case 0xc6: /* fdtos */
3745 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3747 case 0xc7: /* fqtos */
3748 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3749 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3751 case 0xc8: /* fitod */
3752 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3754 case 0xc9: /* fstod */
3755 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3757 case 0xcb: /* fqtod */
3758 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3759 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3761 case 0xcc: /* fitoq */
3762 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3763 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3765 case 0xcd: /* fstoq */
3766 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3767 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3769 case 0xce: /* fdtoq */
3770 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3771 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3773 case 0xd1: /* fstoi */
3774 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3776 case 0xd2: /* fdtoi */
3777 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3779 case 0xd3: /* fqtoi */
3780 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3781 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3783 #ifdef TARGET_SPARC64
3784 case 0x2: /* V9 fmovd */
3785 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3786 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3788 case 0x3: /* V9 fmovq */
3789 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3790 gen_move_Q(dc
, rd
, rs2
);
3792 case 0x6: /* V9 fnegd */
3793 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3795 case 0x7: /* V9 fnegq */
3796 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3797 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3799 case 0xa: /* V9 fabsd */
3800 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3802 case 0xb: /* V9 fabsq */
3803 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3804 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3806 case 0x81: /* V9 fstox */
3807 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3809 case 0x82: /* V9 fdtox */
3810 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3812 case 0x83: /* V9 fqtox */
3813 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3814 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3816 case 0x84: /* V9 fxtos */
3817 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3819 case 0x88: /* V9 fxtod */
3820 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3822 case 0x8c: /* V9 fxtoq */
3823 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3824 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3830 } else if (xop
== 0x35) { /* FPU Operations */
3831 #ifdef TARGET_SPARC64
3834 if (gen_trap_ifnofpu(dc
)) {
3837 gen_op_clear_ieee_excp_and_FTT();
3838 rs1
= GET_FIELD(insn
, 13, 17);
3839 rs2
= GET_FIELD(insn
, 27, 31);
3840 xop
= GET_FIELD(insn
, 18, 26);
3842 #ifdef TARGET_SPARC64
3846 cond = GET_FIELD_SP(insn, 10, 12); \
3847 cpu_src1 = get_src1(dc, insn); \
3848 gen_compare_reg(&cmp, cond, cpu_src1); \
3849 gen_fmov##sz(dc, &cmp, rd, rs2); \
3850 free_compare(&cmp); \
3853 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3856 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3859 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3860 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3867 #ifdef TARGET_SPARC64
3868 #define FMOVCC(fcc, sz) \
3871 cond = GET_FIELD_SP(insn, 14, 17); \
3872 gen_fcompare(&cmp, fcc, cond); \
3873 gen_fmov##sz(dc, &cmp, rd, rs2); \
3874 free_compare(&cmp); \
3877 case 0x001: /* V9 fmovscc %fcc0 */
3880 case 0x002: /* V9 fmovdcc %fcc0 */
3883 case 0x003: /* V9 fmovqcc %fcc0 */
3884 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3887 case 0x041: /* V9 fmovscc %fcc1 */
3890 case 0x042: /* V9 fmovdcc %fcc1 */
3893 case 0x043: /* V9 fmovqcc %fcc1 */
3894 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3897 case 0x081: /* V9 fmovscc %fcc2 */
3900 case 0x082: /* V9 fmovdcc %fcc2 */
3903 case 0x083: /* V9 fmovqcc %fcc2 */
3904 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3907 case 0x0c1: /* V9 fmovscc %fcc3 */
3910 case 0x0c2: /* V9 fmovdcc %fcc3 */
3913 case 0x0c3: /* V9 fmovqcc %fcc3 */
3914 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3918 #define FMOVCC(xcc, sz) \
3921 cond = GET_FIELD_SP(insn, 14, 17); \
3922 gen_compare(&cmp, xcc, cond, dc); \
3923 gen_fmov##sz(dc, &cmp, rd, rs2); \
3924 free_compare(&cmp); \
3927 case 0x101: /* V9 fmovscc %icc */
3930 case 0x102: /* V9 fmovdcc %icc */
3933 case 0x103: /* V9 fmovqcc %icc */
3934 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3937 case 0x181: /* V9 fmovscc %xcc */
3940 case 0x182: /* V9 fmovdcc %xcc */
3943 case 0x183: /* V9 fmovqcc %xcc */
3944 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3949 case 0x51: /* fcmps, V9 %fcc */
3950 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3951 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3952 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3954 case 0x52: /* fcmpd, V9 %fcc */
3955 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3956 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3957 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3959 case 0x53: /* fcmpq, V9 %fcc */
3960 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3961 gen_op_load_fpr_QT0(QFPREG(rs1
));
3962 gen_op_load_fpr_QT1(QFPREG(rs2
));
3963 gen_op_fcmpq(rd
& 3);
3965 case 0x55: /* fcmpes, V9 %fcc */
3966 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3967 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3968 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3970 case 0x56: /* fcmped, V9 %fcc */
3971 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3972 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3973 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3975 case 0x57: /* fcmpeq, V9 %fcc */
3976 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3977 gen_op_load_fpr_QT0(QFPREG(rs1
));
3978 gen_op_load_fpr_QT1(QFPREG(rs2
));
3979 gen_op_fcmpeq(rd
& 3);
3984 } else if (xop
== 0x2) {
3985 TCGv dst
= gen_dest_gpr(dc
, rd
);
3986 rs1
= GET_FIELD(insn
, 13, 17);
3988 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3989 if (IS_IMM
) { /* immediate */
3990 simm
= GET_FIELDs(insn
, 19, 31);
3991 tcg_gen_movi_tl(dst
, simm
);
3992 gen_store_gpr(dc
, rd
, dst
);
3993 } else { /* register */
3994 rs2
= GET_FIELD(insn
, 27, 31);
3996 tcg_gen_movi_tl(dst
, 0);
3997 gen_store_gpr(dc
, rd
, dst
);
3999 cpu_src2
= gen_load_gpr(dc
, rs2
);
4000 gen_store_gpr(dc
, rd
, cpu_src2
);
4004 cpu_src1
= get_src1(dc
, insn
);
4005 if (IS_IMM
) { /* immediate */
4006 simm
= GET_FIELDs(insn
, 19, 31);
4007 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
4008 gen_store_gpr(dc
, rd
, dst
);
4009 } else { /* register */
4010 rs2
= GET_FIELD(insn
, 27, 31);
4012 /* mov shortcut: or x, %g0, y -> mov x, y */
4013 gen_store_gpr(dc
, rd
, cpu_src1
);
4015 cpu_src2
= gen_load_gpr(dc
, rs2
);
4016 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
4017 gen_store_gpr(dc
, rd
, dst
);
4021 #ifdef TARGET_SPARC64
4022 } else if (xop
== 0x25) { /* sll, V9 sllx */
4023 cpu_src1
= get_src1(dc
, insn
);
4024 if (IS_IMM
) { /* immediate */
4025 simm
= GET_FIELDs(insn
, 20, 31);
4026 if (insn
& (1 << 12)) {
4027 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
4029 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
4031 } else { /* register */
4032 rs2
= GET_FIELD(insn
, 27, 31);
4033 cpu_src2
= gen_load_gpr(dc
, rs2
);
4034 cpu_tmp0
= get_temp_tl(dc
);
4035 if (insn
& (1 << 12)) {
4036 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
4038 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
4040 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
4042 gen_store_gpr(dc
, rd
, cpu_dst
);
4043 } else if (xop
== 0x26) { /* srl, V9 srlx */
4044 cpu_src1
= get_src1(dc
, insn
);
4045 if (IS_IMM
) { /* immediate */
4046 simm
= GET_FIELDs(insn
, 20, 31);
4047 if (insn
& (1 << 12)) {
4048 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
4050 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
4051 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
4053 } else { /* register */
4054 rs2
= GET_FIELD(insn
, 27, 31);
4055 cpu_src2
= gen_load_gpr(dc
, rs2
);
4056 cpu_tmp0
= get_temp_tl(dc
);
4057 if (insn
& (1 << 12)) {
4058 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
4059 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
4061 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
4062 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
4063 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
4066 gen_store_gpr(dc
, rd
, cpu_dst
);
4067 } else if (xop
== 0x27) { /* sra, V9 srax */
4068 cpu_src1
= get_src1(dc
, insn
);
4069 if (IS_IMM
) { /* immediate */
4070 simm
= GET_FIELDs(insn
, 20, 31);
4071 if (insn
& (1 << 12)) {
4072 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
4074 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
4075 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
4077 } else { /* register */
4078 rs2
= GET_FIELD(insn
, 27, 31);
4079 cpu_src2
= gen_load_gpr(dc
, rs2
);
4080 cpu_tmp0
= get_temp_tl(dc
);
4081 if (insn
& (1 << 12)) {
4082 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
4083 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
4085 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
4086 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
4087 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
4090 gen_store_gpr(dc
, rd
, cpu_dst
);
4092 } else if (xop
< 0x36) {
4094 cpu_src1
= get_src1(dc
, insn
);
4095 cpu_src2
= get_src2(dc
, insn
);
4096 switch (xop
& ~0x10) {
4099 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4100 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
4101 dc
->cc_op
= CC_OP_ADD
;
4103 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4107 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4109 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4110 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4111 dc
->cc_op
= CC_OP_LOGIC
;
4115 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4117 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4118 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4119 dc
->cc_op
= CC_OP_LOGIC
;
4123 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4125 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4126 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4127 dc
->cc_op
= CC_OP_LOGIC
;
4132 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4133 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
4134 dc
->cc_op
= CC_OP_SUB
;
4136 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4139 case 0x5: /* andn */
4140 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4142 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4143 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4144 dc
->cc_op
= CC_OP_LOGIC
;
4148 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4150 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4151 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4152 dc
->cc_op
= CC_OP_LOGIC
;
4155 case 0x7: /* xorn */
4156 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4158 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4159 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4160 dc
->cc_op
= CC_OP_LOGIC
;
4163 case 0x8: /* addx, V9 addc */
4164 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
4167 #ifdef TARGET_SPARC64
4168 case 0x9: /* V9 mulx */
4169 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
4172 case 0xa: /* umul */
4173 CHECK_IU_FEATURE(dc
, MUL
);
4174 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
4176 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4177 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4178 dc
->cc_op
= CC_OP_LOGIC
;
4181 case 0xb: /* smul */
4182 CHECK_IU_FEATURE(dc
, MUL
);
4183 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
4185 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4186 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4187 dc
->cc_op
= CC_OP_LOGIC
;
4190 case 0xc: /* subx, V9 subc */
4191 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
4194 #ifdef TARGET_SPARC64
4195 case 0xd: /* V9 udivx */
4196 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
4199 case 0xe: /* udiv */
4200 CHECK_IU_FEATURE(dc
, DIV
);
4202 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
4204 dc
->cc_op
= CC_OP_DIV
;
4206 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
4210 case 0xf: /* sdiv */
4211 CHECK_IU_FEATURE(dc
, DIV
);
4213 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
4215 dc
->cc_op
= CC_OP_DIV
;
4217 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
4224 gen_store_gpr(dc
, rd
, cpu_dst
);
4226 cpu_src1
= get_src1(dc
, insn
);
4227 cpu_src2
= get_src2(dc
, insn
);
4229 case 0x20: /* taddcc */
4230 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4231 gen_store_gpr(dc
, rd
, cpu_dst
);
4232 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
4233 dc
->cc_op
= CC_OP_TADD
;
4235 case 0x21: /* tsubcc */
4236 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4237 gen_store_gpr(dc
, rd
, cpu_dst
);
4238 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
4239 dc
->cc_op
= CC_OP_TSUB
;
4241 case 0x22: /* taddcctv */
4242 gen_helper_taddcctv(cpu_dst
, cpu_env
,
4243 cpu_src1
, cpu_src2
);
4244 gen_store_gpr(dc
, rd
, cpu_dst
);
4245 dc
->cc_op
= CC_OP_TADDTV
;
4247 case 0x23: /* tsubcctv */
4248 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
4249 cpu_src1
, cpu_src2
);
4250 gen_store_gpr(dc
, rd
, cpu_dst
);
4251 dc
->cc_op
= CC_OP_TSUBTV
;
4253 case 0x24: /* mulscc */
4255 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
4256 gen_store_gpr(dc
, rd
, cpu_dst
);
4257 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
4258 dc
->cc_op
= CC_OP_ADD
;
4260 #ifndef TARGET_SPARC64
4261 case 0x25: /* sll */
4262 if (IS_IMM
) { /* immediate */
4263 simm
= GET_FIELDs(insn
, 20, 31);
4264 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4265 } else { /* register */
4266 cpu_tmp0
= get_temp_tl(dc
);
4267 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4268 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4270 gen_store_gpr(dc
, rd
, cpu_dst
);
4272 case 0x26: /* srl */
4273 if (IS_IMM
) { /* immediate */
4274 simm
= GET_FIELDs(insn
, 20, 31);
4275 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4276 } else { /* register */
4277 cpu_tmp0
= get_temp_tl(dc
);
4278 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4279 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4281 gen_store_gpr(dc
, rd
, cpu_dst
);
4283 case 0x27: /* sra */
4284 if (IS_IMM
) { /* immediate */
4285 simm
= GET_FIELDs(insn
, 20, 31);
4286 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4287 } else { /* register */
4288 cpu_tmp0
= get_temp_tl(dc
);
4289 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4290 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4292 gen_store_gpr(dc
, rd
, cpu_dst
);
4297 cpu_tmp0
= get_temp_tl(dc
);
4300 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4301 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
4303 #ifndef TARGET_SPARC64
4304 case 0x01 ... 0x0f: /* undefined in the
4308 case 0x10 ... 0x1f: /* implementation-dependent
4312 if ((rd
== 0x13) && (dc
->def
->features
&
4313 CPU_FEATURE_POWERDOWN
)) {
4314 /* LEON3 power-down */
4316 gen_helper_power_down(cpu_env
);
4320 case 0x2: /* V9 wrccr */
4321 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4322 gen_helper_wrccr(cpu_env
, cpu_tmp0
);
4323 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
4324 dc
->cc_op
= CC_OP_FLAGS
;
4326 case 0x3: /* V9 wrasi */
4327 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4328 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
4329 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4330 offsetof(CPUSPARCState
, asi
));
4331 /* End TB to notice changed ASI. */
4334 tcg_gen_exit_tb(NULL
, 0);
4335 dc
->base
.is_jmp
= DISAS_NORETURN
;
4337 case 0x6: /* V9 wrfprs */
4338 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4339 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
4343 tcg_gen_exit_tb(NULL
, 0);
4344 dc
->base
.is_jmp
= DISAS_NORETURN
;
4346 case 0xf: /* V9 sir, nop if user */
4347 #if !defined(CONFIG_USER_ONLY)
4348 if (supervisor(dc
)) {
4353 case 0x13: /* Graphics Status */
4354 if (gen_trap_ifnofpu(dc
)) {
4357 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
4359 case 0x14: /* Softint set */
4360 if (!supervisor(dc
))
4362 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4363 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
4365 case 0x15: /* Softint clear */
4366 if (!supervisor(dc
))
4368 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4369 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
4371 case 0x16: /* Softint write */
4372 if (!supervisor(dc
))
4374 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4375 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
4377 case 0x17: /* Tick compare */
4378 #if !defined(CONFIG_USER_ONLY)
4379 if (!supervisor(dc
))
4385 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
4387 r_tickptr
= tcg_temp_new_ptr();
4388 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4389 offsetof(CPUSPARCState
, tick
));
4390 if (tb_cflags(dc
->base
.tb
) &
4394 gen_helper_tick_set_limit(r_tickptr
,
4396 tcg_temp_free_ptr(r_tickptr
);
4397 /* End TB to handle timer interrupt */
4398 dc
->base
.is_jmp
= DISAS_EXIT
;
4401 case 0x18: /* System tick */
4402 #if !defined(CONFIG_USER_ONLY)
4403 if (!supervisor(dc
))
4409 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
4411 r_tickptr
= tcg_temp_new_ptr();
4412 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4413 offsetof(CPUSPARCState
, stick
));
4414 if (tb_cflags(dc
->base
.tb
) &
4418 gen_helper_tick_set_count(r_tickptr
,
4420 tcg_temp_free_ptr(r_tickptr
);
4421 /* End TB to handle timer interrupt */
4422 dc
->base
.is_jmp
= DISAS_EXIT
;
4425 case 0x19: /* System tick compare */
4426 #if !defined(CONFIG_USER_ONLY)
4427 if (!supervisor(dc
))
4433 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
4435 r_tickptr
= tcg_temp_new_ptr();
4436 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4437 offsetof(CPUSPARCState
, stick
));
4438 if (tb_cflags(dc
->base
.tb
) &
4442 gen_helper_tick_set_limit(r_tickptr
,
4444 tcg_temp_free_ptr(r_tickptr
);
4445 /* End TB to handle timer interrupt */
4446 dc
->base
.is_jmp
= DISAS_EXIT
;
4450 case 0x10: /* Performance Control */
4451 case 0x11: /* Performance Instrumentation
4453 case 0x12: /* Dispatch Control */
4460 #if !defined(CONFIG_USER_ONLY)
4461 case 0x31: /* wrpsr, V9 saved, restored */
4463 if (!supervisor(dc
))
4465 #ifdef TARGET_SPARC64
4468 gen_helper_saved(cpu_env
);
4471 gen_helper_restored(cpu_env
);
4473 case 2: /* UA2005 allclean */
4474 case 3: /* UA2005 otherw */
4475 case 4: /* UA2005 normalw */
4476 case 5: /* UA2005 invalw */
4482 cpu_tmp0
= get_temp_tl(dc
);
4483 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4484 gen_helper_wrpsr(cpu_env
, cpu_tmp0
);
4485 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
4486 dc
->cc_op
= CC_OP_FLAGS
;
4489 tcg_gen_exit_tb(NULL
, 0);
4490 dc
->base
.is_jmp
= DISAS_NORETURN
;
4494 case 0x32: /* wrwim, V9 wrpr */
4496 if (!supervisor(dc
))
4498 cpu_tmp0
= get_temp_tl(dc
);
4499 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4500 #ifdef TARGET_SPARC64
4506 r_tsptr
= tcg_temp_new_ptr();
4507 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4508 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4509 offsetof(trap_state
, tpc
));
4510 tcg_temp_free_ptr(r_tsptr
);
4517 r_tsptr
= tcg_temp_new_ptr();
4518 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4519 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4520 offsetof(trap_state
, tnpc
));
4521 tcg_temp_free_ptr(r_tsptr
);
4528 r_tsptr
= tcg_temp_new_ptr();
4529 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4530 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4531 offsetof(trap_state
,
4533 tcg_temp_free_ptr(r_tsptr
);
4540 r_tsptr
= tcg_temp_new_ptr();
4541 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4542 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
4543 offsetof(trap_state
, tt
));
4544 tcg_temp_free_ptr(r_tsptr
);
4551 r_tickptr
= tcg_temp_new_ptr();
4552 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4553 offsetof(CPUSPARCState
, tick
));
4554 if (tb_cflags(dc
->base
.tb
) &
4558 gen_helper_tick_set_count(r_tickptr
,
4560 tcg_temp_free_ptr(r_tickptr
);
4561 /* End TB to handle timer interrupt */
4562 dc
->base
.is_jmp
= DISAS_EXIT
;
4566 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
4570 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
4573 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
4574 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
4575 /* I/O ops in icount mode must end the TB */
4576 dc
->base
.is_jmp
= DISAS_EXIT
;
4578 dc
->npc
= DYNAMIC_PC
;
4582 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4583 offsetof(CPUSPARCState
, tl
));
4584 dc
->npc
= DYNAMIC_PC
;
4587 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
4590 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
4591 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
4592 /* I/O ops in icount mode must end the TB */
4593 dc
->base
.is_jmp
= DISAS_EXIT
;
4597 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
4600 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4601 offsetof(CPUSPARCState
,
4604 case 11: // canrestore
4605 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4606 offsetof(CPUSPARCState
,
4609 case 12: // cleanwin
4610 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4611 offsetof(CPUSPARCState
,
4614 case 13: // otherwin
4615 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4616 offsetof(CPUSPARCState
,
4620 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4621 offsetof(CPUSPARCState
,
4624 case 16: // UA2005 gl
4625 CHECK_IU_FEATURE(dc
, GL
);
4626 gen_helper_wrgl(cpu_env
, cpu_tmp0
);
4628 case 26: // UA2005 strand status
4629 CHECK_IU_FEATURE(dc
, HYPV
);
4630 if (!hypervisor(dc
))
4632 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
4638 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
4639 if (dc
->def
->nwindows
!= 32) {
4640 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
4641 (1 << dc
->def
->nwindows
) - 1);
4646 case 0x33: /* wrtbr, UA2005 wrhpr */
4648 #ifndef TARGET_SPARC64
4649 if (!supervisor(dc
))
4651 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
4653 CHECK_IU_FEATURE(dc
, HYPV
);
4654 if (!hypervisor(dc
))
4656 cpu_tmp0
= get_temp_tl(dc
);
4657 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4660 tcg_gen_st_i64(cpu_tmp0
, cpu_env
,
4661 offsetof(CPUSPARCState
,
4665 tcg_gen_exit_tb(NULL
, 0);
4666 dc
->base
.is_jmp
= DISAS_NORETURN
;
4669 // XXX gen_op_wrhtstate();
4672 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
4675 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
4677 case 31: // hstick_cmpr
4681 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
4682 r_tickptr
= tcg_temp_new_ptr();
4683 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4684 offsetof(CPUSPARCState
, hstick
));
4685 if (tb_cflags(dc
->base
.tb
) &
4689 gen_helper_tick_set_limit(r_tickptr
,
4691 tcg_temp_free_ptr(r_tickptr
);
4692 /* End TB to handle timer interrupt */
4693 dc
->base
.is_jmp
= DISAS_EXIT
;
4696 case 6: // hver readonly
4704 #ifdef TARGET_SPARC64
4705 case 0x2c: /* V9 movcc */
4707 int cc
= GET_FIELD_SP(insn
, 11, 12);
4708 int cond
= GET_FIELD_SP(insn
, 14, 17);
4712 if (insn
& (1 << 18)) {
4714 gen_compare(&cmp
, 0, cond
, dc
);
4715 } else if (cc
== 2) {
4716 gen_compare(&cmp
, 1, cond
, dc
);
4721 gen_fcompare(&cmp
, cc
, cond
);
4724 /* The get_src2 above loaded the normal 13-bit
4725 immediate field, not the 11-bit field we have
4726 in movcc. But it did handle the reg case. */
4728 simm
= GET_FIELD_SPs(insn
, 0, 10);
4729 tcg_gen_movi_tl(cpu_src2
, simm
);
4732 dst
= gen_load_gpr(dc
, rd
);
4733 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4737 gen_store_gpr(dc
, rd
, dst
);
4740 case 0x2d: /* V9 sdivx */
4741 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
4742 gen_store_gpr(dc
, rd
, cpu_dst
);
4744 case 0x2e: /* V9 popc */
4745 tcg_gen_ctpop_tl(cpu_dst
, cpu_src2
);
4746 gen_store_gpr(dc
, rd
, cpu_dst
);
4748 case 0x2f: /* V9 movr */
4750 int cond
= GET_FIELD_SP(insn
, 10, 12);
4754 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4756 /* The get_src2 above loaded the normal 13-bit
4757 immediate field, not the 10-bit field we have
4758 in movr. But it did handle the reg case. */
4760 simm
= GET_FIELD_SPs(insn
, 0, 9);
4761 tcg_gen_movi_tl(cpu_src2
, simm
);
4764 dst
= gen_load_gpr(dc
, rd
);
4765 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4769 gen_store_gpr(dc
, rd
, dst
);
4777 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4778 #ifdef TARGET_SPARC64
4779 int opf
= GET_FIELD_SP(insn
, 5, 13);
4780 rs1
= GET_FIELD(insn
, 13, 17);
4781 rs2
= GET_FIELD(insn
, 27, 31);
4782 if (gen_trap_ifnofpu(dc
)) {
4787 case 0x000: /* VIS I edge8cc */
4788 CHECK_FPU_FEATURE(dc
, VIS1
);
4789 cpu_src1
= gen_load_gpr(dc
, rs1
);
4790 cpu_src2
= gen_load_gpr(dc
, rs2
);
4791 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4792 gen_store_gpr(dc
, rd
, cpu_dst
);
4794 case 0x001: /* VIS II edge8n */
4795 CHECK_FPU_FEATURE(dc
, VIS2
);
4796 cpu_src1
= gen_load_gpr(dc
, rs1
);
4797 cpu_src2
= gen_load_gpr(dc
, rs2
);
4798 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4799 gen_store_gpr(dc
, rd
, cpu_dst
);
4801 case 0x002: /* VIS I edge8lcc */
4802 CHECK_FPU_FEATURE(dc
, VIS1
);
4803 cpu_src1
= gen_load_gpr(dc
, rs1
);
4804 cpu_src2
= gen_load_gpr(dc
, rs2
);
4805 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4806 gen_store_gpr(dc
, rd
, cpu_dst
);
4808 case 0x003: /* VIS II edge8ln */
4809 CHECK_FPU_FEATURE(dc
, VIS2
);
4810 cpu_src1
= gen_load_gpr(dc
, rs1
);
4811 cpu_src2
= gen_load_gpr(dc
, rs2
);
4812 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4813 gen_store_gpr(dc
, rd
, cpu_dst
);
4815 case 0x004: /* VIS I edge16cc */
4816 CHECK_FPU_FEATURE(dc
, VIS1
);
4817 cpu_src1
= gen_load_gpr(dc
, rs1
);
4818 cpu_src2
= gen_load_gpr(dc
, rs2
);
4819 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4820 gen_store_gpr(dc
, rd
, cpu_dst
);
4822 case 0x005: /* VIS II edge16n */
4823 CHECK_FPU_FEATURE(dc
, VIS2
);
4824 cpu_src1
= gen_load_gpr(dc
, rs1
);
4825 cpu_src2
= gen_load_gpr(dc
, rs2
);
4826 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4827 gen_store_gpr(dc
, rd
, cpu_dst
);
4829 case 0x006: /* VIS I edge16lcc */
4830 CHECK_FPU_FEATURE(dc
, VIS1
);
4831 cpu_src1
= gen_load_gpr(dc
, rs1
);
4832 cpu_src2
= gen_load_gpr(dc
, rs2
);
4833 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4834 gen_store_gpr(dc
, rd
, cpu_dst
);
4836 case 0x007: /* VIS II edge16ln */
4837 CHECK_FPU_FEATURE(dc
, VIS2
);
4838 cpu_src1
= gen_load_gpr(dc
, rs1
);
4839 cpu_src2
= gen_load_gpr(dc
, rs2
);
4840 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4841 gen_store_gpr(dc
, rd
, cpu_dst
);
4843 case 0x008: /* VIS I edge32cc */
4844 CHECK_FPU_FEATURE(dc
, VIS1
);
4845 cpu_src1
= gen_load_gpr(dc
, rs1
);
4846 cpu_src2
= gen_load_gpr(dc
, rs2
);
4847 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4848 gen_store_gpr(dc
, rd
, cpu_dst
);
4850 case 0x009: /* VIS II edge32n */
4851 CHECK_FPU_FEATURE(dc
, VIS2
);
4852 cpu_src1
= gen_load_gpr(dc
, rs1
);
4853 cpu_src2
= gen_load_gpr(dc
, rs2
);
4854 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4855 gen_store_gpr(dc
, rd
, cpu_dst
);
4857 case 0x00a: /* VIS I edge32lcc */
4858 CHECK_FPU_FEATURE(dc
, VIS1
);
4859 cpu_src1
= gen_load_gpr(dc
, rs1
);
4860 cpu_src2
= gen_load_gpr(dc
, rs2
);
4861 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4862 gen_store_gpr(dc
, rd
, cpu_dst
);
4864 case 0x00b: /* VIS II edge32ln */
4865 CHECK_FPU_FEATURE(dc
, VIS2
);
4866 cpu_src1
= gen_load_gpr(dc
, rs1
);
4867 cpu_src2
= gen_load_gpr(dc
, rs2
);
4868 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4869 gen_store_gpr(dc
, rd
, cpu_dst
);
4871 case 0x010: /* VIS I array8 */
4872 CHECK_FPU_FEATURE(dc
, VIS1
);
4873 cpu_src1
= gen_load_gpr(dc
, rs1
);
4874 cpu_src2
= gen_load_gpr(dc
, rs2
);
4875 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4876 gen_store_gpr(dc
, rd
, cpu_dst
);
4878 case 0x012: /* VIS I array16 */
4879 CHECK_FPU_FEATURE(dc
, VIS1
);
4880 cpu_src1
= gen_load_gpr(dc
, rs1
);
4881 cpu_src2
= gen_load_gpr(dc
, rs2
);
4882 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4883 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4884 gen_store_gpr(dc
, rd
, cpu_dst
);
4886 case 0x014: /* VIS I array32 */
4887 CHECK_FPU_FEATURE(dc
, VIS1
);
4888 cpu_src1
= gen_load_gpr(dc
, rs1
);
4889 cpu_src2
= gen_load_gpr(dc
, rs2
);
4890 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4891 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4892 gen_store_gpr(dc
, rd
, cpu_dst
);
4894 case 0x018: /* VIS I alignaddr */
4895 CHECK_FPU_FEATURE(dc
, VIS1
);
4896 cpu_src1
= gen_load_gpr(dc
, rs1
);
4897 cpu_src2
= gen_load_gpr(dc
, rs2
);
4898 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4899 gen_store_gpr(dc
, rd
, cpu_dst
);
4901 case 0x01a: /* VIS I alignaddrl */
4902 CHECK_FPU_FEATURE(dc
, VIS1
);
4903 cpu_src1
= gen_load_gpr(dc
, rs1
);
4904 cpu_src2
= gen_load_gpr(dc
, rs2
);
4905 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4906 gen_store_gpr(dc
, rd
, cpu_dst
);
4908 case 0x019: /* VIS II bmask */
4909 CHECK_FPU_FEATURE(dc
, VIS2
);
4910 cpu_src1
= gen_load_gpr(dc
, rs1
);
4911 cpu_src2
= gen_load_gpr(dc
, rs2
);
4912 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4913 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4914 gen_store_gpr(dc
, rd
, cpu_dst
);
4916 case 0x020: /* VIS I fcmple16 */
4917 CHECK_FPU_FEATURE(dc
, VIS1
);
4918 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4919 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4920 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4921 gen_store_gpr(dc
, rd
, cpu_dst
);
4923 case 0x022: /* VIS I fcmpne16 */
4924 CHECK_FPU_FEATURE(dc
, VIS1
);
4925 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4926 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4927 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4928 gen_store_gpr(dc
, rd
, cpu_dst
);
4930 case 0x024: /* VIS I fcmple32 */
4931 CHECK_FPU_FEATURE(dc
, VIS1
);
4932 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4933 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4934 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4935 gen_store_gpr(dc
, rd
, cpu_dst
);
4937 case 0x026: /* VIS I fcmpne32 */
4938 CHECK_FPU_FEATURE(dc
, VIS1
);
4939 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4940 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4941 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4942 gen_store_gpr(dc
, rd
, cpu_dst
);
4944 case 0x028: /* VIS I fcmpgt16 */
4945 CHECK_FPU_FEATURE(dc
, VIS1
);
4946 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4947 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4948 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4949 gen_store_gpr(dc
, rd
, cpu_dst
);
4951 case 0x02a: /* VIS I fcmpeq16 */
4952 CHECK_FPU_FEATURE(dc
, VIS1
);
4953 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4954 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4955 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4956 gen_store_gpr(dc
, rd
, cpu_dst
);
4958 case 0x02c: /* VIS I fcmpgt32 */
4959 CHECK_FPU_FEATURE(dc
, VIS1
);
4960 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4961 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4962 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4963 gen_store_gpr(dc
, rd
, cpu_dst
);
4965 case 0x02e: /* VIS I fcmpeq32 */
4966 CHECK_FPU_FEATURE(dc
, VIS1
);
4967 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4968 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4969 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4970 gen_store_gpr(dc
, rd
, cpu_dst
);
4972 case 0x031: /* VIS I fmul8x16 */
4973 CHECK_FPU_FEATURE(dc
, VIS1
);
4974 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4976 case 0x033: /* VIS I fmul8x16au */
4977 CHECK_FPU_FEATURE(dc
, VIS1
);
4978 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4980 case 0x035: /* VIS I fmul8x16al */
4981 CHECK_FPU_FEATURE(dc
, VIS1
);
4982 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4984 case 0x036: /* VIS I fmul8sux16 */
4985 CHECK_FPU_FEATURE(dc
, VIS1
);
4986 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4988 case 0x037: /* VIS I fmul8ulx16 */
4989 CHECK_FPU_FEATURE(dc
, VIS1
);
4990 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4992 case 0x038: /* VIS I fmuld8sux16 */
4993 CHECK_FPU_FEATURE(dc
, VIS1
);
4994 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4996 case 0x039: /* VIS I fmuld8ulx16 */
4997 CHECK_FPU_FEATURE(dc
, VIS1
);
4998 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
5000 case 0x03a: /* VIS I fpack32 */
5001 CHECK_FPU_FEATURE(dc
, VIS1
);
5002 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
5004 case 0x03b: /* VIS I fpack16 */
5005 CHECK_FPU_FEATURE(dc
, VIS1
);
5006 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5007 cpu_dst_32
= gen_dest_fpr_F(dc
);
5008 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5009 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5011 case 0x03d: /* VIS I fpackfix */
5012 CHECK_FPU_FEATURE(dc
, VIS1
);
5013 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5014 cpu_dst_32
= gen_dest_fpr_F(dc
);
5015 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5016 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5018 case 0x03e: /* VIS I pdist */
5019 CHECK_FPU_FEATURE(dc
, VIS1
);
5020 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
5022 case 0x048: /* VIS I faligndata */
5023 CHECK_FPU_FEATURE(dc
, VIS1
);
5024 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
5026 case 0x04b: /* VIS I fpmerge */
5027 CHECK_FPU_FEATURE(dc
, VIS1
);
5028 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
5030 case 0x04c: /* VIS II bshuffle */
5031 CHECK_FPU_FEATURE(dc
, VIS2
);
5032 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
5034 case 0x04d: /* VIS I fexpand */
5035 CHECK_FPU_FEATURE(dc
, VIS1
);
5036 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
5038 case 0x050: /* VIS I fpadd16 */
5039 CHECK_FPU_FEATURE(dc
, VIS1
);
5040 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
5042 case 0x051: /* VIS I fpadd16s */
5043 CHECK_FPU_FEATURE(dc
, VIS1
);
5044 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
5046 case 0x052: /* VIS I fpadd32 */
5047 CHECK_FPU_FEATURE(dc
, VIS1
);
5048 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
5050 case 0x053: /* VIS I fpadd32s */
5051 CHECK_FPU_FEATURE(dc
, VIS1
);
5052 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
5054 case 0x054: /* VIS I fpsub16 */
5055 CHECK_FPU_FEATURE(dc
, VIS1
);
5056 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
5058 case 0x055: /* VIS I fpsub16s */
5059 CHECK_FPU_FEATURE(dc
, VIS1
);
5060 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
5062 case 0x056: /* VIS I fpsub32 */
5063 CHECK_FPU_FEATURE(dc
, VIS1
);
5064 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
5066 case 0x057: /* VIS I fpsub32s */
5067 CHECK_FPU_FEATURE(dc
, VIS1
);
5068 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
5070 case 0x060: /* VIS I fzero */
5071 CHECK_FPU_FEATURE(dc
, VIS1
);
5072 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5073 tcg_gen_movi_i64(cpu_dst_64
, 0);
5074 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5076 case 0x061: /* VIS I fzeros */
5077 CHECK_FPU_FEATURE(dc
, VIS1
);
5078 cpu_dst_32
= gen_dest_fpr_F(dc
);
5079 tcg_gen_movi_i32(cpu_dst_32
, 0);
5080 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5082 case 0x062: /* VIS I fnor */
5083 CHECK_FPU_FEATURE(dc
, VIS1
);
5084 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
5086 case 0x063: /* VIS I fnors */
5087 CHECK_FPU_FEATURE(dc
, VIS1
);
5088 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
5090 case 0x064: /* VIS I fandnot2 */
5091 CHECK_FPU_FEATURE(dc
, VIS1
);
5092 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
5094 case 0x065: /* VIS I fandnot2s */
5095 CHECK_FPU_FEATURE(dc
, VIS1
);
5096 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
5098 case 0x066: /* VIS I fnot2 */
5099 CHECK_FPU_FEATURE(dc
, VIS1
);
5100 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
5102 case 0x067: /* VIS I fnot2s */
5103 CHECK_FPU_FEATURE(dc
, VIS1
);
5104 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
5106 case 0x068: /* VIS I fandnot1 */
5107 CHECK_FPU_FEATURE(dc
, VIS1
);
5108 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
5110 case 0x069: /* VIS I fandnot1s */
5111 CHECK_FPU_FEATURE(dc
, VIS1
);
5112 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
5114 case 0x06a: /* VIS I fnot1 */
5115 CHECK_FPU_FEATURE(dc
, VIS1
);
5116 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
5118 case 0x06b: /* VIS I fnot1s */
5119 CHECK_FPU_FEATURE(dc
, VIS1
);
5120 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
5122 case 0x06c: /* VIS I fxor */
5123 CHECK_FPU_FEATURE(dc
, VIS1
);
5124 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
5126 case 0x06d: /* VIS I fxors */
5127 CHECK_FPU_FEATURE(dc
, VIS1
);
5128 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
5130 case 0x06e: /* VIS I fnand */
5131 CHECK_FPU_FEATURE(dc
, VIS1
);
5132 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
5134 case 0x06f: /* VIS I fnands */
5135 CHECK_FPU_FEATURE(dc
, VIS1
);
5136 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
5138 case 0x070: /* VIS I fand */
5139 CHECK_FPU_FEATURE(dc
, VIS1
);
5140 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
5142 case 0x071: /* VIS I fands */
5143 CHECK_FPU_FEATURE(dc
, VIS1
);
5144 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
5146 case 0x072: /* VIS I fxnor */
5147 CHECK_FPU_FEATURE(dc
, VIS1
);
5148 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
5150 case 0x073: /* VIS I fxnors */
5151 CHECK_FPU_FEATURE(dc
, VIS1
);
5152 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
5154 case 0x074: /* VIS I fsrc1 */
5155 CHECK_FPU_FEATURE(dc
, VIS1
);
5156 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5157 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
5159 case 0x075: /* VIS I fsrc1s */
5160 CHECK_FPU_FEATURE(dc
, VIS1
);
5161 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
5162 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
5164 case 0x076: /* VIS I fornot2 */
5165 CHECK_FPU_FEATURE(dc
, VIS1
);
5166 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
5168 case 0x077: /* VIS I fornot2s */
5169 CHECK_FPU_FEATURE(dc
, VIS1
);
5170 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
5172 case 0x078: /* VIS I fsrc2 */
5173 CHECK_FPU_FEATURE(dc
, VIS1
);
5174 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5175 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
5177 case 0x079: /* VIS I fsrc2s */
5178 CHECK_FPU_FEATURE(dc
, VIS1
);
5179 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
5180 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
5182 case 0x07a: /* VIS I fornot1 */
5183 CHECK_FPU_FEATURE(dc
, VIS1
);
5184 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
5186 case 0x07b: /* VIS I fornot1s */
5187 CHECK_FPU_FEATURE(dc
, VIS1
);
5188 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
5190 case 0x07c: /* VIS I for */
5191 CHECK_FPU_FEATURE(dc
, VIS1
);
5192 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
5194 case 0x07d: /* VIS I fors */
5195 CHECK_FPU_FEATURE(dc
, VIS1
);
5196 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
5198 case 0x07e: /* VIS I fone */
5199 CHECK_FPU_FEATURE(dc
, VIS1
);
5200 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5201 tcg_gen_movi_i64(cpu_dst_64
, -1);
5202 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5204 case 0x07f: /* VIS I fones */
5205 CHECK_FPU_FEATURE(dc
, VIS1
);
5206 cpu_dst_32
= gen_dest_fpr_F(dc
);
5207 tcg_gen_movi_i32(cpu_dst_32
, -1);
5208 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5210 case 0x080: /* VIS I shutdown */
5211 case 0x081: /* VIS II siam */
5220 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
5221 #ifdef TARGET_SPARC64
5226 #ifdef TARGET_SPARC64
5227 } else if (xop
== 0x39) { /* V9 return */
5229 cpu_src1
= get_src1(dc
, insn
);
5230 cpu_tmp0
= get_temp_tl(dc
);
5231 if (IS_IMM
) { /* immediate */
5232 simm
= GET_FIELDs(insn
, 19, 31);
5233 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5234 } else { /* register */
5235 rs2
= GET_FIELD(insn
, 27, 31);
5237 cpu_src2
= gen_load_gpr(dc
, rs2
);
5238 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5240 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5243 gen_helper_restore(cpu_env
);
5245 gen_check_align(cpu_tmp0
, 3);
5246 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5247 dc
->npc
= DYNAMIC_PC
;
5251 cpu_src1
= get_src1(dc
, insn
);
5252 cpu_tmp0
= get_temp_tl(dc
);
5253 if (IS_IMM
) { /* immediate */
5254 simm
= GET_FIELDs(insn
, 19, 31);
5255 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5256 } else { /* register */
5257 rs2
= GET_FIELD(insn
, 27, 31);
5259 cpu_src2
= gen_load_gpr(dc
, rs2
);
5260 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5262 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5266 case 0x38: /* jmpl */
5268 TCGv t
= gen_dest_gpr(dc
, rd
);
5269 tcg_gen_movi_tl(t
, dc
->pc
);
5270 gen_store_gpr(dc
, rd
, t
);
5273 gen_check_align(cpu_tmp0
, 3);
5274 gen_address_mask(dc
, cpu_tmp0
);
5275 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5276 dc
->npc
= DYNAMIC_PC
;
5279 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5280 case 0x39: /* rett, V9 return */
5282 if (!supervisor(dc
))
5285 gen_check_align(cpu_tmp0
, 3);
5286 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5287 dc
->npc
= DYNAMIC_PC
;
5288 gen_helper_rett(cpu_env
);
5292 case 0x3b: /* flush */
5293 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
5297 case 0x3c: /* save */
5298 gen_helper_save(cpu_env
);
5299 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5301 case 0x3d: /* restore */
5302 gen_helper_restore(cpu_env
);
5303 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5305 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5306 case 0x3e: /* V9 done/retry */
5310 if (!supervisor(dc
))
5312 dc
->npc
= DYNAMIC_PC
;
5313 dc
->pc
= DYNAMIC_PC
;
5314 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
5317 gen_helper_done(cpu_env
);
5320 if (!supervisor(dc
))
5322 dc
->npc
= DYNAMIC_PC
;
5323 dc
->pc
= DYNAMIC_PC
;
5324 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
5327 gen_helper_retry(cpu_env
);
5342 case 3: /* load/store instructions */
5344 unsigned int xop
= GET_FIELD(insn
, 7, 12);
5345 /* ??? gen_address_mask prevents us from using a source
5346 register directly. Always generate a temporary. */
5347 TCGv cpu_addr
= get_temp_tl(dc
);
5349 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
5350 if (xop
== 0x3c || xop
== 0x3e) {
5351 /* V9 casa/casxa : no offset */
5352 } else if (IS_IMM
) { /* immediate */
5353 simm
= GET_FIELDs(insn
, 19, 31);
5355 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
5357 } else { /* register */
5358 rs2
= GET_FIELD(insn
, 27, 31);
5360 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
5363 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
5364 (xop
> 0x17 && xop
<= 0x1d ) ||
5365 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
5366 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
5369 case 0x0: /* ld, V9 lduw, load unsigned word */
5370 gen_address_mask(dc
, cpu_addr
);
5371 tcg_gen_qemu_ld32u(cpu_val
, cpu_addr
, dc
->mem_idx
);
5373 case 0x1: /* ldub, load unsigned byte */
5374 gen_address_mask(dc
, cpu_addr
);
5375 tcg_gen_qemu_ld8u(cpu_val
, cpu_addr
, dc
->mem_idx
);
5377 case 0x2: /* lduh, load unsigned halfword */
5378 gen_address_mask(dc
, cpu_addr
);
5379 tcg_gen_qemu_ld16u(cpu_val
, cpu_addr
, dc
->mem_idx
);
5381 case 0x3: /* ldd, load double word */
5387 gen_address_mask(dc
, cpu_addr
);
5388 t64
= tcg_temp_new_i64();
5389 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
5390 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5391 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5392 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
5393 tcg_gen_shri_i64(t64
, t64
, 32);
5394 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5395 tcg_temp_free_i64(t64
);
5396 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5399 case 0x9: /* ldsb, load signed byte */
5400 gen_address_mask(dc
, cpu_addr
);
5401 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
5403 case 0xa: /* ldsh, load signed halfword */
5404 gen_address_mask(dc
, cpu_addr
);
5405 tcg_gen_qemu_ld16s(cpu_val
, cpu_addr
, dc
->mem_idx
);
5407 case 0xd: /* ldstub */
5408 gen_ldstub(dc
, cpu_val
, cpu_addr
, dc
->mem_idx
);
5411 /* swap, swap register with memory. Also atomically */
5412 CHECK_IU_FEATURE(dc
, SWAP
);
5413 cpu_src1
= gen_load_gpr(dc
, rd
);
5414 gen_swap(dc
, cpu_val
, cpu_src1
, cpu_addr
,
5415 dc
->mem_idx
, MO_TEUL
);
5417 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5418 case 0x10: /* lda, V9 lduwa, load word alternate */
5419 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5421 case 0x11: /* lduba, load unsigned byte alternate */
5422 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5424 case 0x12: /* lduha, load unsigned halfword alternate */
5425 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5427 case 0x13: /* ldda, load double word alternate */
5431 gen_ldda_asi(dc
, cpu_addr
, insn
, rd
);
5433 case 0x19: /* ldsba, load signed byte alternate */
5434 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_SB
);
5436 case 0x1a: /* ldsha, load signed halfword alternate */
5437 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESW
);
5439 case 0x1d: /* ldstuba -- XXX: should be atomically */
5440 gen_ldstub_asi(dc
, cpu_val
, cpu_addr
, insn
);
5442 case 0x1f: /* swapa, swap reg with alt. memory. Also
5444 CHECK_IU_FEATURE(dc
, SWAP
);
5445 cpu_src1
= gen_load_gpr(dc
, rd
);
5446 gen_swap_asi(dc
, cpu_val
, cpu_src1
, cpu_addr
, insn
);
5449 #ifndef TARGET_SPARC64
5450 case 0x30: /* ldc */
5451 case 0x31: /* ldcsr */
5452 case 0x33: /* lddc */
5456 #ifdef TARGET_SPARC64
5457 case 0x08: /* V9 ldsw */
5458 gen_address_mask(dc
, cpu_addr
);
5459 tcg_gen_qemu_ld32s(cpu_val
, cpu_addr
, dc
->mem_idx
);
5461 case 0x0b: /* V9 ldx */
5462 gen_address_mask(dc
, cpu_addr
);
5463 tcg_gen_qemu_ld64(cpu_val
, cpu_addr
, dc
->mem_idx
);
5465 case 0x18: /* V9 ldswa */
5466 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESL
);
5468 case 0x1b: /* V9 ldxa */
5469 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5471 case 0x2d: /* V9 prefetch, no effect */
5473 case 0x30: /* V9 ldfa */
5474 if (gen_trap_ifnofpu(dc
)) {
5477 gen_ldf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5478 gen_update_fprs_dirty(dc
, rd
);
5480 case 0x33: /* V9 lddfa */
5481 if (gen_trap_ifnofpu(dc
)) {
5484 gen_ldf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5485 gen_update_fprs_dirty(dc
, DFPREG(rd
));
5487 case 0x3d: /* V9 prefetcha, no effect */
5489 case 0x32: /* V9 ldqfa */
5490 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5491 if (gen_trap_ifnofpu(dc
)) {
5494 gen_ldf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5495 gen_update_fprs_dirty(dc
, QFPREG(rd
));
5501 gen_store_gpr(dc
, rd
, cpu_val
);
5502 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5505 } else if (xop
>= 0x20 && xop
< 0x24) {
5506 if (gen_trap_ifnofpu(dc
)) {
5510 case 0x20: /* ldf, load fpreg */
5511 gen_address_mask(dc
, cpu_addr
);
5512 cpu_dst_32
= gen_dest_fpr_F(dc
);
5513 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5514 dc
->mem_idx
, MO_TEUL
);
5515 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5517 case 0x21: /* ldfsr, V9 ldxfsr */
5518 #ifdef TARGET_SPARC64
5519 gen_address_mask(dc
, cpu_addr
);
5521 TCGv_i64 t64
= tcg_temp_new_i64();
5522 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5523 dc
->mem_idx
, MO_TEUQ
);
5524 gen_helper_ldxfsr(cpu_fsr
, cpu_env
, cpu_fsr
, t64
);
5525 tcg_temp_free_i64(t64
);
5529 cpu_dst_32
= get_temp_i32(dc
);
5530 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5531 dc
->mem_idx
, MO_TEUL
);
5532 gen_helper_ldfsr(cpu_fsr
, cpu_env
, cpu_fsr
, cpu_dst_32
);
5534 case 0x22: /* ldqf, load quad fpreg */
5535 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5536 gen_address_mask(dc
, cpu_addr
);
5537 cpu_src1_64
= tcg_temp_new_i64();
5538 tcg_gen_qemu_ld_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5539 MO_TEUQ
| MO_ALIGN_4
);
5540 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5541 cpu_src2_64
= tcg_temp_new_i64();
5542 tcg_gen_qemu_ld_i64(cpu_src2_64
, cpu_addr
, dc
->mem_idx
,
5543 MO_TEUQ
| MO_ALIGN_4
);
5544 gen_store_fpr_Q(dc
, rd
, cpu_src1_64
, cpu_src2_64
);
5545 tcg_temp_free_i64(cpu_src1_64
);
5546 tcg_temp_free_i64(cpu_src2_64
);
5548 case 0x23: /* lddf, load double fpreg */
5549 gen_address_mask(dc
, cpu_addr
);
5550 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5551 tcg_gen_qemu_ld_i64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
,
5552 MO_TEUQ
| MO_ALIGN_4
);
5553 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5558 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
5559 xop
== 0xe || xop
== 0x1e) {
5560 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
5563 case 0x4: /* st, store word */
5564 gen_address_mask(dc
, cpu_addr
);
5565 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
5567 case 0x5: /* stb, store byte */
5568 gen_address_mask(dc
, cpu_addr
);
5569 tcg_gen_qemu_st8(cpu_val
, cpu_addr
, dc
->mem_idx
);
5571 case 0x6: /* sth, store halfword */
5572 gen_address_mask(dc
, cpu_addr
);
5573 tcg_gen_qemu_st16(cpu_val
, cpu_addr
, dc
->mem_idx
);
5575 case 0x7: /* std, store double word */
5582 gen_address_mask(dc
, cpu_addr
);
5583 lo
= gen_load_gpr(dc
, rd
+ 1);
5584 t64
= tcg_temp_new_i64();
5585 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
5586 tcg_gen_qemu_st64(t64
, cpu_addr
, dc
->mem_idx
);
5587 tcg_temp_free_i64(t64
);
5590 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5591 case 0x14: /* sta, V9 stwa, store word alternate */
5592 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5594 case 0x15: /* stba, store byte alternate */
5595 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5597 case 0x16: /* stha, store halfword alternate */
5598 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5600 case 0x17: /* stda, store double word alternate */
5604 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
5607 #ifdef TARGET_SPARC64
5608 case 0x0e: /* V9 stx */
5609 gen_address_mask(dc
, cpu_addr
);
5610 tcg_gen_qemu_st64(cpu_val
, cpu_addr
, dc
->mem_idx
);
5612 case 0x1e: /* V9 stxa */
5613 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5619 } else if (xop
> 0x23 && xop
< 0x28) {
5620 if (gen_trap_ifnofpu(dc
)) {
5624 case 0x24: /* stf, store fpreg */
5625 gen_address_mask(dc
, cpu_addr
);
5626 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5627 tcg_gen_qemu_st_i32(cpu_src1_32
, cpu_addr
,
5628 dc
->mem_idx
, MO_TEUL
);
5630 case 0x25: /* stfsr, V9 stxfsr */
5632 #ifdef TARGET_SPARC64
5633 gen_address_mask(dc
, cpu_addr
);
5635 tcg_gen_qemu_st64(cpu_fsr
, cpu_addr
, dc
->mem_idx
);
5639 tcg_gen_qemu_st32(cpu_fsr
, cpu_addr
, dc
->mem_idx
);
5643 #ifdef TARGET_SPARC64
5644 /* V9 stqf, store quad fpreg */
5645 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5646 gen_address_mask(dc
, cpu_addr
);
5647 /* ??? While stqf only requires 4-byte alignment, it is
5648 legal for the cpu to signal the unaligned exception.
5649 The OS trap handler is then required to fix it up.
5650 For qemu, this avoids having to probe the second page
5651 before performing the first write. */
5652 cpu_src1_64
= gen_load_fpr_Q0(dc
, rd
);
5653 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5654 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN_16
);
5655 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5656 cpu_src2_64
= gen_load_fpr_Q1(dc
, rd
);
5657 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5658 dc
->mem_idx
, MO_TEUQ
);
5660 #else /* !TARGET_SPARC64 */
5661 /* stdfq, store floating point queue */
5662 #if defined(CONFIG_USER_ONLY)
5665 if (!supervisor(dc
))
5667 if (gen_trap_ifnofpu(dc
)) {
5673 case 0x27: /* stdf, store double fpreg */
5674 gen_address_mask(dc
, cpu_addr
);
5675 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5676 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5677 MO_TEUQ
| MO_ALIGN_4
);
5682 } else if (xop
> 0x33 && xop
< 0x3f) {
5684 #ifdef TARGET_SPARC64
5685 case 0x34: /* V9 stfa */
5686 if (gen_trap_ifnofpu(dc
)) {
5689 gen_stf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5691 case 0x36: /* V9 stqfa */
5693 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5694 if (gen_trap_ifnofpu(dc
)) {
5697 gen_stf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5700 case 0x37: /* V9 stdfa */
5701 if (gen_trap_ifnofpu(dc
)) {
5704 gen_stf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5706 case 0x3e: /* V9 casxa */
5707 rs2
= GET_FIELD(insn
, 27, 31);
5708 cpu_src2
= gen_load_gpr(dc
, rs2
);
5709 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5712 case 0x34: /* stc */
5713 case 0x35: /* stcsr */
5714 case 0x36: /* stdcq */
5715 case 0x37: /* stdc */
5718 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5719 case 0x3c: /* V9 or LEON3 casa */
5720 #ifndef TARGET_SPARC64
5721 CHECK_IU_FEATURE(dc
, CASA
);
5723 rs2
= GET_FIELD(insn
, 27, 31);
5724 cpu_src2
= gen_load_gpr(dc
, rs2
);
5725 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5737 /* default case for non jump instructions */
5738 if (dc
->npc
== DYNAMIC_PC
) {
5739 dc
->pc
= DYNAMIC_PC
;
5741 } else if (dc
->npc
== JUMP_PC
) {
5742 /* we can do a static jump */
5743 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5744 dc
->base
.is_jmp
= DISAS_NORETURN
;
5747 dc
->npc
= dc
->npc
+ 4;
5752 gen_exception(dc
, TT_ILL_INSN
);
5755 gen_exception(dc
, TT_UNIMP_FLUSH
);
5757 #if !defined(CONFIG_USER_ONLY)
5759 gen_exception(dc
, TT_PRIV_INSN
);
5763 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
5765 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5767 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
5770 #ifndef TARGET_SPARC64
5772 gen_exception(dc
, TT_NCP_INSN
);
5776 if (dc
->n_t32
!= 0) {
5778 for (i
= dc
->n_t32
- 1; i
>= 0; --i
) {
5779 tcg_temp_free_i32(dc
->t32
[i
]);
5783 if (dc
->n_ttl
!= 0) {
5785 for (i
= dc
->n_ttl
- 1; i
>= 0; --i
) {
5786 tcg_temp_free(dc
->ttl
[i
]);
5792 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
5794 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5795 CPUSPARCState
*env
= cs
->env_ptr
;
5798 dc
->pc
= dc
->base
.pc_first
;
5799 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
5800 dc
->cc_op
= CC_OP_DYNAMIC
;
5801 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
5802 dc
->def
= &env
->def
;
5803 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
5804 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
5805 #ifndef CONFIG_USER_ONLY
5806 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
5808 #ifdef TARGET_SPARC64
5810 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5811 #ifndef CONFIG_USER_ONLY
5812 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
5816 * if we reach a page boundary, we stop generation so that the
5817 * PC of a TT_TFAULT exception is always in the right page
5819 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
5820 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
5823 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
5827 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
5829 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5831 if (dc
->npc
& JUMP_PC
) {
5832 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5833 tcg_gen_insn_start(dc
->pc
, dc
->jump_pc
[0] | JUMP_PC
);
5835 tcg_gen_insn_start(dc
->pc
, dc
->npc
);
5839 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
5841 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5842 CPUSPARCState
*env
= cs
->env_ptr
;
5845 insn
= translator_ldl(env
, &dc
->base
, dc
->pc
);
5846 dc
->base
.pc_next
+= 4;
5847 disas_sparc_insn(dc
, insn
);
5849 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
5852 if (dc
->pc
!= dc
->base
.pc_next
) {
5853 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
5857 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
5859 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5861 switch (dc
->base
.is_jmp
) {
5863 case DISAS_TOO_MANY
:
5864 if (dc
->pc
!= DYNAMIC_PC
&&
5865 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
5866 /* static PC and NPC: we can use direct chaining */
5867 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5869 if (dc
->pc
!= DYNAMIC_PC
) {
5870 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5873 tcg_gen_exit_tb(NULL
, 0);
5877 case DISAS_NORETURN
:
5883 tcg_gen_exit_tb(NULL
, 0);
5887 g_assert_not_reached();
5891 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
,
5892 CPUState
*cpu
, FILE *logfile
)
5894 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5895 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5898 static const TranslatorOps sparc_tr_ops
= {
5899 .init_disas_context
= sparc_tr_init_disas_context
,
5900 .tb_start
= sparc_tr_tb_start
,
5901 .insn_start
= sparc_tr_insn_start
,
5902 .translate_insn
= sparc_tr_translate_insn
,
5903 .tb_stop
= sparc_tr_tb_stop
,
5904 .disas_log
= sparc_tr_disas_log
,
5907 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
,
5908 target_ulong pc
, void *host_pc
)
5910 DisasContext dc
= {};
5912 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &sparc_tr_ops
, &dc
.base
);
5915 void sparc_tcg_init(void)
5917 static const char gregnames
[32][4] = {
5918 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5919 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5920 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5921 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5923 static const char fregnames
[32][4] = {
5924 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5925 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5926 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5927 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5930 static const struct { TCGv_i32
*ptr
; int off
; const char *name
; } r32
[] = {
5931 #ifdef TARGET_SPARC64
5932 { &cpu_xcc
, offsetof(CPUSPARCState
, xcc
), "xcc" },
5933 { &cpu_fprs
, offsetof(CPUSPARCState
, fprs
), "fprs" },
5935 { &cpu_wim
, offsetof(CPUSPARCState
, wim
), "wim" },
5937 { &cpu_cc_op
, offsetof(CPUSPARCState
, cc_op
), "cc_op" },
5938 { &cpu_psr
, offsetof(CPUSPARCState
, psr
), "psr" },
5941 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5942 #ifdef TARGET_SPARC64
5943 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5944 { &cpu_tick_cmpr
, offsetof(CPUSPARCState
, tick_cmpr
), "tick_cmpr" },
5945 { &cpu_stick_cmpr
, offsetof(CPUSPARCState
, stick_cmpr
), "stick_cmpr" },
5946 { &cpu_hstick_cmpr
, offsetof(CPUSPARCState
, hstick_cmpr
),
5948 { &cpu_hintp
, offsetof(CPUSPARCState
, hintp
), "hintp" },
5949 { &cpu_htba
, offsetof(CPUSPARCState
, htba
), "htba" },
5950 { &cpu_hver
, offsetof(CPUSPARCState
, hver
), "hver" },
5951 { &cpu_ssr
, offsetof(CPUSPARCState
, ssr
), "ssr" },
5952 { &cpu_ver
, offsetof(CPUSPARCState
, version
), "ver" },
5954 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5955 { &cpu_cc_src
, offsetof(CPUSPARCState
, cc_src
), "cc_src" },
5956 { &cpu_cc_src2
, offsetof(CPUSPARCState
, cc_src2
), "cc_src2" },
5957 { &cpu_cc_dst
, offsetof(CPUSPARCState
, cc_dst
), "cc_dst" },
5958 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5959 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5960 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5961 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5962 #ifndef CONFIG_USER_ONLY
5963 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5969 cpu_regwptr
= tcg_global_mem_new_ptr(cpu_env
,
5970 offsetof(CPUSPARCState
, regwptr
),
5973 for (i
= 0; i
< ARRAY_SIZE(r32
); ++i
) {
5974 *r32
[i
].ptr
= tcg_global_mem_new_i32(cpu_env
, r32
[i
].off
, r32
[i
].name
);
5977 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5978 *rtl
[i
].ptr
= tcg_global_mem_new(cpu_env
, rtl
[i
].off
, rtl
[i
].name
);
5982 for (i
= 1; i
< 8; ++i
) {
5983 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
5984 offsetof(CPUSPARCState
, gregs
[i
]),
5988 for (i
= 8; i
< 32; ++i
) {
5989 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5990 (i
- 8) * sizeof(target_ulong
),
5994 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5995 cpu_fpr
[i
] = tcg_global_mem_new_i64(cpu_env
,
5996 offsetof(CPUSPARCState
, fpr
[i
]),
6001 void sparc_restore_state_to_opc(CPUState
*cs
,
6002 const TranslationBlock
*tb
,
6003 const uint64_t *data
)
6005 SPARCCPU
*cpu
= SPARC_CPU(cs
);
6006 CPUSPARCState
*env
= &cpu
->env
;
6007 target_ulong pc
= data
[0];
6008 target_ulong npc
= data
[1];
6011 if (npc
== DYNAMIC_PC
) {
6012 /* dynamic NPC: already stored */
6013 } else if (npc
& JUMP_PC
) {
6014 /* jump PC: use 'cond' and the jump targets of the translation */
6016 env
->npc
= npc
& ~3;