4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "disas/disas.h"
29 #include "exec/helper-proto.h"
31 #include "exec/cpu_ldst.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
40 #define DYNAMIC_PC 1 /* dynamic pc value */
41 #define JUMP_PC 2 /* dynamic pc value which takes only two values
42 according to jump_pc[T2] */
44 /* global register indexes */
45 static TCGv_ptr cpu_env
, cpu_regwptr
;
46 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
47 static TCGv_i32 cpu_cc_op
;
48 static TCGv_i32 cpu_psr
;
49 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
, cpu_gregs
[8];
51 #ifndef CONFIG_USER_ONLY
56 static TCGv_i32 cpu_xcc
, cpu_asi
, cpu_fprs
;
58 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
59 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
60 static TCGv_i32 cpu_softint
;
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
67 static target_ulong gen_opc_npc
[OPC_BUF_SIZE
];
68 static target_ulong gen_opc_jump_pc
[2];
70 #include "exec/gen-icount.h"
72 typedef struct DisasContext
{
73 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
74 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
75 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
79 int address_mask_32bit
;
81 uint32_t cc_op
; /* current CC operation */
82 struct TranslationBlock
*tb
;
97 // This function uses non-native bit order
98 #define GET_FIELD(X, FROM, TO) \
99 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
101 // This function uses the order in the manuals, i.e. bit 0 is 2^0
102 #define GET_FIELD_SP(X, FROM, TO) \
103 GET_FIELD(X, 31 - (TO), 31 - (FROM))
105 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
106 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
108 #ifdef TARGET_SPARC64
109 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
110 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
112 #define DFPREG(r) (r & 0x1e)
113 #define QFPREG(r) (r & 0x1c)
116 #define UA2005_HTRAP_MASK 0xff
117 #define V8_TRAP_MASK 0x7f
119 static int sign_extend(int x
, int len
)
122 return (x
<< len
) >> len
;
125 #define IS_IMM (insn & (1<<13))
127 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
130 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
131 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
135 static inline TCGv
get_temp_tl(DisasContext
*dc
)
138 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
139 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
143 static inline void gen_update_fprs_dirty(int rd
)
145 #if defined(TARGET_SPARC64)
146 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, (rd
< 32) ? 1 : 2);
150 /* floating point registers moves */
151 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
153 #if TCG_TARGET_REG_BITS == 32
155 return TCGV_LOW(cpu_fpr
[src
/ 2]);
157 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
161 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr
[src
/ 2]));
163 TCGv_i32 ret
= get_temp_i32(dc
);
164 TCGv_i64 t
= tcg_temp_new_i64();
166 tcg_gen_shri_i64(t
, cpu_fpr
[src
/ 2], 32);
167 tcg_gen_trunc_i64_i32(ret
, t
);
168 tcg_temp_free_i64(t
);
175 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
177 #if TCG_TARGET_REG_BITS == 32
179 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
181 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
184 TCGv_i64 t
= MAKE_TCGV_I64(GET_TCGV_I32(v
));
185 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
186 (dst
& 1 ? 0 : 32), 32);
188 gen_update_fprs_dirty(dst
);
191 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
193 return get_temp_i32(dc
);
196 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
199 return cpu_fpr
[src
/ 2];
202 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
205 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
206 gen_update_fprs_dirty(dst
);
209 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
211 return cpu_fpr
[DFPREG(dst
) / 2];
214 static void gen_op_load_fpr_QT0(unsigned int src
)
216 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
217 offsetof(CPU_QuadU
, ll
.upper
));
218 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
219 offsetof(CPU_QuadU
, ll
.lower
));
222 static void gen_op_load_fpr_QT1(unsigned int src
)
224 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
225 offsetof(CPU_QuadU
, ll
.upper
));
226 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
227 offsetof(CPU_QuadU
, ll
.lower
));
230 static void gen_op_store_QT0_fpr(unsigned int dst
)
232 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
233 offsetof(CPU_QuadU
, ll
.upper
));
234 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
235 offsetof(CPU_QuadU
, ll
.lower
));
238 #ifdef TARGET_SPARC64
239 static void gen_move_Q(unsigned int rd
, unsigned int rs
)
244 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
245 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
246 gen_update_fprs_dirty(rd
);
251 #ifdef CONFIG_USER_ONLY
252 #define supervisor(dc) 0
253 #ifdef TARGET_SPARC64
254 #define hypervisor(dc) 0
257 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
258 #ifdef TARGET_SPARC64
259 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
264 #ifdef TARGET_SPARC64
266 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
268 #define AM_CHECK(dc) (1)
272 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
274 #ifdef TARGET_SPARC64
276 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
280 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
282 if (reg
== 0 || reg
>= 8) {
283 TCGv t
= get_temp_tl(dc
);
285 tcg_gen_movi_tl(t
, 0);
287 tcg_gen_ld_tl(t
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
291 return cpu_gregs
[reg
];
295 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
299 tcg_gen_mov_tl(cpu_gregs
[reg
], v
);
301 tcg_gen_st_tl(v
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
306 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
308 if (reg
== 0 || reg
>= 8) {
309 return get_temp_tl(dc
);
311 return cpu_gregs
[reg
];
315 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
,
316 target_ulong pc
, target_ulong npc
)
318 TranslationBlock
*tb
;
321 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
322 (npc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
324 /* jump to same page: we can use a direct jump */
325 tcg_gen_goto_tb(tb_num
);
326 tcg_gen_movi_tl(cpu_pc
, pc
);
327 tcg_gen_movi_tl(cpu_npc
, npc
);
328 tcg_gen_exit_tb((uintptr_t)tb
+ tb_num
);
330 /* jump to another page: currently not optimized */
331 tcg_gen_movi_tl(cpu_pc
, pc
);
332 tcg_gen_movi_tl(cpu_npc
, npc
);
338 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
340 tcg_gen_extu_i32_tl(reg
, src
);
341 tcg_gen_shri_tl(reg
, reg
, PSR_NEG_SHIFT
);
342 tcg_gen_andi_tl(reg
, reg
, 0x1);
345 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
347 tcg_gen_extu_i32_tl(reg
, src
);
348 tcg_gen_shri_tl(reg
, reg
, PSR_ZERO_SHIFT
);
349 tcg_gen_andi_tl(reg
, reg
, 0x1);
352 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
354 tcg_gen_extu_i32_tl(reg
, src
);
355 tcg_gen_shri_tl(reg
, reg
, PSR_OVF_SHIFT
);
356 tcg_gen_andi_tl(reg
, reg
, 0x1);
359 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
361 tcg_gen_extu_i32_tl(reg
, src
);
362 tcg_gen_shri_tl(reg
, reg
, PSR_CARRY_SHIFT
);
363 tcg_gen_andi_tl(reg
, reg
, 0x1);
366 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
368 tcg_gen_mov_tl(cpu_cc_src
, src1
);
369 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
370 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
371 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
374 static TCGv_i32
gen_add32_carry32(void)
376 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
378 /* Carry is computed from a previous add: (dst < src) */
379 #if TARGET_LONG_BITS == 64
380 cc_src1_32
= tcg_temp_new_i32();
381 cc_src2_32
= tcg_temp_new_i32();
382 tcg_gen_trunc_i64_i32(cc_src1_32
, cpu_cc_dst
);
383 tcg_gen_trunc_i64_i32(cc_src2_32
, cpu_cc_src
);
385 cc_src1_32
= cpu_cc_dst
;
386 cc_src2_32
= cpu_cc_src
;
389 carry_32
= tcg_temp_new_i32();
390 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
392 #if TARGET_LONG_BITS == 64
393 tcg_temp_free_i32(cc_src1_32
);
394 tcg_temp_free_i32(cc_src2_32
);
400 static TCGv_i32
gen_sub32_carry32(void)
402 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
404 /* Carry is computed from a previous borrow: (src1 < src2) */
405 #if TARGET_LONG_BITS == 64
406 cc_src1_32
= tcg_temp_new_i32();
407 cc_src2_32
= tcg_temp_new_i32();
408 tcg_gen_trunc_i64_i32(cc_src1_32
, cpu_cc_src
);
409 tcg_gen_trunc_i64_i32(cc_src2_32
, cpu_cc_src2
);
411 cc_src1_32
= cpu_cc_src
;
412 cc_src2_32
= cpu_cc_src2
;
415 carry_32
= tcg_temp_new_i32();
416 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
418 #if TARGET_LONG_BITS == 64
419 tcg_temp_free_i32(cc_src1_32
);
420 tcg_temp_free_i32(cc_src2_32
);
426 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
427 TCGv src2
, int update_cc
)
435 /* Carry is known to be zero. Fall back to plain ADD. */
437 gen_op_add_cc(dst
, src1
, src2
);
439 tcg_gen_add_tl(dst
, src1
, src2
);
446 if (TARGET_LONG_BITS
== 32) {
447 /* We can re-use the host's hardware carry generation by using
448 an ADD2 opcode. We discard the low part of the output.
449 Ideally we'd combine this operation with the add that
450 generated the carry in the first place. */
451 carry
= tcg_temp_new();
452 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
453 tcg_temp_free(carry
);
456 carry_32
= gen_add32_carry32();
462 carry_32
= gen_sub32_carry32();
466 /* We need external help to produce the carry. */
467 carry_32
= tcg_temp_new_i32();
468 gen_helper_compute_C_icc(carry_32
, cpu_env
);
472 #if TARGET_LONG_BITS == 64
473 carry
= tcg_temp_new();
474 tcg_gen_extu_i32_i64(carry
, carry_32
);
479 tcg_gen_add_tl(dst
, src1
, src2
);
480 tcg_gen_add_tl(dst
, dst
, carry
);
482 tcg_temp_free_i32(carry_32
);
483 #if TARGET_LONG_BITS == 64
484 tcg_temp_free(carry
);
489 tcg_gen_mov_tl(cpu_cc_src
, src1
);
490 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
491 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
492 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
493 dc
->cc_op
= CC_OP_ADDX
;
497 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
499 tcg_gen_mov_tl(cpu_cc_src
, src1
);
500 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
501 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
502 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
505 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
506 TCGv src2
, int update_cc
)
514 /* Carry is known to be zero. Fall back to plain SUB. */
516 gen_op_sub_cc(dst
, src1
, src2
);
518 tcg_gen_sub_tl(dst
, src1
, src2
);
525 carry_32
= gen_add32_carry32();
531 if (TARGET_LONG_BITS
== 32) {
532 /* We can re-use the host's hardware carry generation by using
533 a SUB2 opcode. We discard the low part of the output.
534 Ideally we'd combine this operation with the add that
535 generated the carry in the first place. */
536 carry
= tcg_temp_new();
537 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
538 tcg_temp_free(carry
);
541 carry_32
= gen_sub32_carry32();
545 /* We need external help to produce the carry. */
546 carry_32
= tcg_temp_new_i32();
547 gen_helper_compute_C_icc(carry_32
, cpu_env
);
551 #if TARGET_LONG_BITS == 64
552 carry
= tcg_temp_new();
553 tcg_gen_extu_i32_i64(carry
, carry_32
);
558 tcg_gen_sub_tl(dst
, src1
, src2
);
559 tcg_gen_sub_tl(dst
, dst
, carry
);
561 tcg_temp_free_i32(carry_32
);
562 #if TARGET_LONG_BITS == 64
563 tcg_temp_free(carry
);
568 tcg_gen_mov_tl(cpu_cc_src
, src1
);
569 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
570 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
571 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
572 dc
->cc_op
= CC_OP_SUBX
;
576 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
578 TCGv r_temp
, zero
, t0
;
580 r_temp
= tcg_temp_new();
587 zero
= tcg_const_tl(0);
588 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
589 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
590 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
591 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
596 // env->y = (b2 << 31) | (env->y >> 1);
597 tcg_gen_andi_tl(r_temp
, cpu_cc_src
, 0x1);
598 tcg_gen_shli_tl(r_temp
, r_temp
, 31);
599 tcg_gen_shri_tl(t0
, cpu_y
, 1);
600 tcg_gen_andi_tl(t0
, t0
, 0x7fffffff);
601 tcg_gen_or_tl(t0
, t0
, r_temp
);
602 tcg_gen_andi_tl(cpu_y
, t0
, 0xffffffff);
605 gen_mov_reg_N(t0
, cpu_psr
);
606 gen_mov_reg_V(r_temp
, cpu_psr
);
607 tcg_gen_xor_tl(t0
, t0
, r_temp
);
608 tcg_temp_free(r_temp
);
610 // T0 = (b1 << 31) | (T0 >> 1);
612 tcg_gen_shli_tl(t0
, t0
, 31);
613 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
614 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
617 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
619 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
622 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
624 #if TARGET_LONG_BITS == 32
626 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
628 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
631 TCGv t0
= tcg_temp_new_i64();
632 TCGv t1
= tcg_temp_new_i64();
635 tcg_gen_ext32s_i64(t0
, src1
);
636 tcg_gen_ext32s_i64(t1
, src2
);
638 tcg_gen_ext32u_i64(t0
, src1
);
639 tcg_gen_ext32u_i64(t1
, src2
);
642 tcg_gen_mul_i64(dst
, t0
, t1
);
646 tcg_gen_shri_i64(cpu_y
, dst
, 32);
650 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
652 /* zero-extend truncated operands before multiplication */
653 gen_op_multiply(dst
, src1
, src2
, 0);
656 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
658 /* sign-extend truncated operands before multiplication */
659 gen_op_multiply(dst
, src1
, src2
, 1);
663 static inline void gen_op_eval_ba(TCGv dst
)
665 tcg_gen_movi_tl(dst
, 1);
669 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
671 gen_mov_reg_Z(dst
, src
);
675 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
677 TCGv t0
= tcg_temp_new();
678 gen_mov_reg_N(t0
, src
);
679 gen_mov_reg_V(dst
, src
);
680 tcg_gen_xor_tl(dst
, dst
, t0
);
681 gen_mov_reg_Z(t0
, src
);
682 tcg_gen_or_tl(dst
, dst
, t0
);
687 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
689 TCGv t0
= tcg_temp_new();
690 gen_mov_reg_V(t0
, src
);
691 gen_mov_reg_N(dst
, src
);
692 tcg_gen_xor_tl(dst
, dst
, t0
);
697 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
699 TCGv t0
= tcg_temp_new();
700 gen_mov_reg_Z(t0
, src
);
701 gen_mov_reg_C(dst
, src
);
702 tcg_gen_or_tl(dst
, dst
, t0
);
707 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
709 gen_mov_reg_C(dst
, src
);
713 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
715 gen_mov_reg_V(dst
, src
);
719 static inline void gen_op_eval_bn(TCGv dst
)
721 tcg_gen_movi_tl(dst
, 0);
725 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
727 gen_mov_reg_N(dst
, src
);
731 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
733 gen_mov_reg_Z(dst
, src
);
734 tcg_gen_xori_tl(dst
, dst
, 0x1);
738 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
740 gen_op_eval_ble(dst
, src
);
741 tcg_gen_xori_tl(dst
, dst
, 0x1);
745 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
747 gen_op_eval_bl(dst
, src
);
748 tcg_gen_xori_tl(dst
, dst
, 0x1);
752 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
754 gen_op_eval_bleu(dst
, src
);
755 tcg_gen_xori_tl(dst
, dst
, 0x1);
759 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
761 gen_mov_reg_C(dst
, src
);
762 tcg_gen_xori_tl(dst
, dst
, 0x1);
766 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
768 gen_mov_reg_N(dst
, src
);
769 tcg_gen_xori_tl(dst
, dst
, 0x1);
773 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
775 gen_mov_reg_V(dst
, src
);
776 tcg_gen_xori_tl(dst
, dst
, 0x1);
780 FPSR bit field FCC1 | FCC0:
786 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
787 unsigned int fcc_offset
)
789 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
790 tcg_gen_andi_tl(reg
, reg
, 0x1);
793 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
794 unsigned int fcc_offset
)
796 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
797 tcg_gen_andi_tl(reg
, reg
, 0x1);
801 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
802 unsigned int fcc_offset
)
804 TCGv t0
= tcg_temp_new();
805 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
806 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
807 tcg_gen_or_tl(dst
, dst
, t0
);
811 // 1 or 2: FCC0 ^ FCC1
812 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
813 unsigned int fcc_offset
)
815 TCGv t0
= tcg_temp_new();
816 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
817 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
818 tcg_gen_xor_tl(dst
, dst
, t0
);
823 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
824 unsigned int fcc_offset
)
826 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
830 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
831 unsigned int fcc_offset
)
833 TCGv t0
= tcg_temp_new();
834 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
835 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
836 tcg_gen_andc_tl(dst
, dst
, t0
);
841 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
842 unsigned int fcc_offset
)
844 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
848 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
849 unsigned int fcc_offset
)
851 TCGv t0
= tcg_temp_new();
852 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
853 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
854 tcg_gen_andc_tl(dst
, t0
, dst
);
859 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
860 unsigned int fcc_offset
)
862 TCGv t0
= tcg_temp_new();
863 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
864 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
865 tcg_gen_and_tl(dst
, dst
, t0
);
870 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
871 unsigned int fcc_offset
)
873 TCGv t0
= tcg_temp_new();
874 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
875 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
876 tcg_gen_or_tl(dst
, dst
, t0
);
877 tcg_gen_xori_tl(dst
, dst
, 0x1);
881 // 0 or 3: !(FCC0 ^ FCC1)
882 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
883 unsigned int fcc_offset
)
885 TCGv t0
= tcg_temp_new();
886 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
887 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
888 tcg_gen_xor_tl(dst
, dst
, t0
);
889 tcg_gen_xori_tl(dst
, dst
, 0x1);
894 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
895 unsigned int fcc_offset
)
897 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
898 tcg_gen_xori_tl(dst
, dst
, 0x1);
901 // !1: !(FCC0 & !FCC1)
902 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
903 unsigned int fcc_offset
)
905 TCGv t0
= tcg_temp_new();
906 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
907 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
908 tcg_gen_andc_tl(dst
, dst
, t0
);
909 tcg_gen_xori_tl(dst
, dst
, 0x1);
914 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
915 unsigned int fcc_offset
)
917 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
918 tcg_gen_xori_tl(dst
, dst
, 0x1);
921 // !2: !(!FCC0 & FCC1)
922 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
923 unsigned int fcc_offset
)
925 TCGv t0
= tcg_temp_new();
926 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
927 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
928 tcg_gen_andc_tl(dst
, t0
, dst
);
929 tcg_gen_xori_tl(dst
, dst
, 0x1);
933 // !3: !(FCC0 & FCC1)
934 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
935 unsigned int fcc_offset
)
937 TCGv t0
= tcg_temp_new();
938 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
939 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
940 tcg_gen_and_tl(dst
, dst
, t0
);
941 tcg_gen_xori_tl(dst
, dst
, 0x1);
945 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
946 target_ulong pc2
, TCGv r_cond
)
950 l1
= gen_new_label();
952 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
954 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
957 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
960 static inline void gen_branch_a(DisasContext
*dc
, target_ulong pc1
,
961 target_ulong pc2
, TCGv r_cond
)
965 l1
= gen_new_label();
967 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
969 gen_goto_tb(dc
, 0, pc2
, pc1
);
972 gen_goto_tb(dc
, 1, pc2
+ 4, pc2
+ 8);
975 static inline void gen_generic_branch(DisasContext
*dc
)
977 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
978 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
979 TCGv zero
= tcg_const_tl(0);
981 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
988 /* call this function before using the condition register as it may
989 have been set for a jump */
990 static inline void flush_cond(DisasContext
*dc
)
992 if (dc
->npc
== JUMP_PC
) {
993 gen_generic_branch(dc
);
994 dc
->npc
= DYNAMIC_PC
;
998 static inline void save_npc(DisasContext
*dc
)
1000 if (dc
->npc
== JUMP_PC
) {
1001 gen_generic_branch(dc
);
1002 dc
->npc
= DYNAMIC_PC
;
1003 } else if (dc
->npc
!= DYNAMIC_PC
) {
1004 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1008 static inline void update_psr(DisasContext
*dc
)
1010 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1011 dc
->cc_op
= CC_OP_FLAGS
;
1012 gen_helper_compute_psr(cpu_env
);
1016 static inline void save_state(DisasContext
*dc
)
1018 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1022 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1024 if (dc
->npc
== JUMP_PC
) {
1025 gen_generic_branch(dc
);
1026 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1027 dc
->pc
= DYNAMIC_PC
;
1028 } else if (dc
->npc
== DYNAMIC_PC
) {
1029 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1030 dc
->pc
= DYNAMIC_PC
;
1036 static inline void gen_op_next_insn(void)
1038 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1039 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1042 static void free_compare(DisasCompare
*cmp
)
1045 tcg_temp_free(cmp
->c1
);
1048 tcg_temp_free(cmp
->c2
);
1052 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1055 static int subcc_cond
[16] = {
1071 -1, /* no overflow */
1074 static int logic_cond
[16] = {
1076 TCG_COND_EQ
, /* eq: Z */
1077 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1078 TCG_COND_LT
, /* lt: N ^ V -> N */
1079 TCG_COND_EQ
, /* leu: C | Z -> Z */
1080 TCG_COND_NEVER
, /* ltu: C -> 0 */
1081 TCG_COND_LT
, /* neg: N */
1082 TCG_COND_NEVER
, /* vs: V -> 0 */
1084 TCG_COND_NE
, /* ne: !Z */
1085 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1086 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1087 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1088 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1089 TCG_COND_GE
, /* pos: !N */
1090 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1096 #ifdef TARGET_SPARC64
1106 switch (dc
->cc_op
) {
1108 cmp
->cond
= logic_cond
[cond
];
1110 cmp
->is_bool
= false;
1112 cmp
->c2
= tcg_const_tl(0);
1113 #ifdef TARGET_SPARC64
1116 cmp
->c1
= tcg_temp_new();
1117 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1122 cmp
->c1
= cpu_cc_dst
;
1129 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1130 goto do_compare_dst_0
;
1132 case 7: /* overflow */
1133 case 15: /* !overflow */
1137 cmp
->cond
= subcc_cond
[cond
];
1138 cmp
->is_bool
= false;
1139 #ifdef TARGET_SPARC64
1141 /* Note that sign-extension works for unsigned compares as
1142 long as both operands are sign-extended. */
1143 cmp
->g1
= cmp
->g2
= false;
1144 cmp
->c1
= tcg_temp_new();
1145 cmp
->c2
= tcg_temp_new();
1146 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1147 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1151 cmp
->g1
= cmp
->g2
= true;
1152 cmp
->c1
= cpu_cc_src
;
1153 cmp
->c2
= cpu_cc_src2
;
1160 gen_helper_compute_psr(cpu_env
);
1161 dc
->cc_op
= CC_OP_FLAGS
;
1165 /* We're going to generate a boolean result. */
1166 cmp
->cond
= TCG_COND_NE
;
1167 cmp
->is_bool
= true;
1168 cmp
->g1
= cmp
->g2
= false;
1169 cmp
->c1
= r_dst
= tcg_temp_new();
1170 cmp
->c2
= tcg_const_tl(0);
1174 gen_op_eval_bn(r_dst
);
1177 gen_op_eval_be(r_dst
, r_src
);
1180 gen_op_eval_ble(r_dst
, r_src
);
1183 gen_op_eval_bl(r_dst
, r_src
);
1186 gen_op_eval_bleu(r_dst
, r_src
);
1189 gen_op_eval_bcs(r_dst
, r_src
);
1192 gen_op_eval_bneg(r_dst
, r_src
);
1195 gen_op_eval_bvs(r_dst
, r_src
);
1198 gen_op_eval_ba(r_dst
);
1201 gen_op_eval_bne(r_dst
, r_src
);
1204 gen_op_eval_bg(r_dst
, r_src
);
1207 gen_op_eval_bge(r_dst
, r_src
);
1210 gen_op_eval_bgu(r_dst
, r_src
);
1213 gen_op_eval_bcc(r_dst
, r_src
);
1216 gen_op_eval_bpos(r_dst
, r_src
);
1219 gen_op_eval_bvc(r_dst
, r_src
);
1226 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1228 unsigned int offset
;
1231 /* For now we still generate a straight boolean result. */
1232 cmp
->cond
= TCG_COND_NE
;
1233 cmp
->is_bool
= true;
1234 cmp
->g1
= cmp
->g2
= false;
1235 cmp
->c1
= r_dst
= tcg_temp_new();
1236 cmp
->c2
= tcg_const_tl(0);
1256 gen_op_eval_bn(r_dst
);
1259 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1262 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1265 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1268 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1271 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1274 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1277 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1280 gen_op_eval_ba(r_dst
);
1283 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1286 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1289 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1292 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1295 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1298 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1301 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1306 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1310 gen_compare(&cmp
, cc
, cond
, dc
);
1312 /* The interface is to return a boolean in r_dst. */
1314 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1316 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1322 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1325 gen_fcompare(&cmp
, cc
, cond
);
1327 /* The interface is to return a boolean in r_dst. */
1329 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1331 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1337 #ifdef TARGET_SPARC64
1339 static const int gen_tcg_cond_reg
[8] = {
1350 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1352 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1353 cmp
->is_bool
= false;
1357 cmp
->c2
= tcg_const_tl(0);
1360 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1363 gen_compare_reg(&cmp
, cond
, r_src
);
1365 /* The interface is to return a boolean in r_dst. */
1366 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1372 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1374 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1375 target_ulong target
= dc
->pc
+ offset
;
1377 #ifdef TARGET_SPARC64
1378 if (unlikely(AM_CHECK(dc
))) {
1379 target
&= 0xffffffffULL
;
1383 /* unconditional not taken */
1385 dc
->pc
= dc
->npc
+ 4;
1386 dc
->npc
= dc
->pc
+ 4;
1389 dc
->npc
= dc
->pc
+ 4;
1391 } else if (cond
== 0x8) {
1392 /* unconditional taken */
1395 dc
->npc
= dc
->pc
+ 4;
1399 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1403 gen_cond(cpu_cond
, cc
, cond
, dc
);
1405 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1409 dc
->jump_pc
[0] = target
;
1410 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1411 dc
->jump_pc
[1] = DYNAMIC_PC
;
1412 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1414 dc
->jump_pc
[1] = dc
->npc
+ 4;
1421 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1423 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1424 target_ulong target
= dc
->pc
+ offset
;
1426 #ifdef TARGET_SPARC64
1427 if (unlikely(AM_CHECK(dc
))) {
1428 target
&= 0xffffffffULL
;
1432 /* unconditional not taken */
1434 dc
->pc
= dc
->npc
+ 4;
1435 dc
->npc
= dc
->pc
+ 4;
1438 dc
->npc
= dc
->pc
+ 4;
1440 } else if (cond
== 0x8) {
1441 /* unconditional taken */
1444 dc
->npc
= dc
->pc
+ 4;
1448 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1452 gen_fcond(cpu_cond
, cc
, cond
);
1454 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1458 dc
->jump_pc
[0] = target
;
1459 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1460 dc
->jump_pc
[1] = DYNAMIC_PC
;
1461 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1463 dc
->jump_pc
[1] = dc
->npc
+ 4;
1470 #ifdef TARGET_SPARC64
1471 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1474 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1475 target_ulong target
= dc
->pc
+ offset
;
1477 if (unlikely(AM_CHECK(dc
))) {
1478 target
&= 0xffffffffULL
;
1481 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1483 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1487 dc
->jump_pc
[0] = target
;
1488 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1489 dc
->jump_pc
[1] = DYNAMIC_PC
;
1490 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1492 dc
->jump_pc
[1] = dc
->npc
+ 4;
1498 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1502 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1505 gen_helper_fcmps_fcc1(cpu_env
, r_rs1
, r_rs2
);
1508 gen_helper_fcmps_fcc2(cpu_env
, r_rs1
, r_rs2
);
1511 gen_helper_fcmps_fcc3(cpu_env
, r_rs1
, r_rs2
);
1516 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1520 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1523 gen_helper_fcmpd_fcc1(cpu_env
, r_rs1
, r_rs2
);
1526 gen_helper_fcmpd_fcc2(cpu_env
, r_rs1
, r_rs2
);
1529 gen_helper_fcmpd_fcc3(cpu_env
, r_rs1
, r_rs2
);
1534 static inline void gen_op_fcmpq(int fccno
)
1538 gen_helper_fcmpq(cpu_env
);
1541 gen_helper_fcmpq_fcc1(cpu_env
);
1544 gen_helper_fcmpq_fcc2(cpu_env
);
1547 gen_helper_fcmpq_fcc3(cpu_env
);
1552 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1556 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1559 gen_helper_fcmpes_fcc1(cpu_env
, r_rs1
, r_rs2
);
1562 gen_helper_fcmpes_fcc2(cpu_env
, r_rs1
, r_rs2
);
1565 gen_helper_fcmpes_fcc3(cpu_env
, r_rs1
, r_rs2
);
1570 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1574 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1577 gen_helper_fcmped_fcc1(cpu_env
, r_rs1
, r_rs2
);
1580 gen_helper_fcmped_fcc2(cpu_env
, r_rs1
, r_rs2
);
1583 gen_helper_fcmped_fcc3(cpu_env
, r_rs1
, r_rs2
);
1588 static inline void gen_op_fcmpeq(int fccno
)
1592 gen_helper_fcmpeq(cpu_env
);
1595 gen_helper_fcmpeq_fcc1(cpu_env
);
1598 gen_helper_fcmpeq_fcc2(cpu_env
);
1601 gen_helper_fcmpeq_fcc3(cpu_env
);
1608 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1610 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1613 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1615 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1618 static inline void gen_op_fcmpq(int fccno
)
1620 gen_helper_fcmpq(cpu_env
);
1623 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1625 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1628 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1630 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1633 static inline void gen_op_fcmpeq(int fccno
)
1635 gen_helper_fcmpeq(cpu_env
);
1639 static inline void gen_op_fpexception_im(int fsr_flags
)
1643 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1644 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1645 r_const
= tcg_const_i32(TT_FP_EXCP
);
1646 gen_helper_raise_exception(cpu_env
, r_const
);
1647 tcg_temp_free_i32(r_const
);
1650 static int gen_trap_ifnofpu(DisasContext
*dc
)
1652 #if !defined(CONFIG_USER_ONLY)
1653 if (!dc
->fpu_enabled
) {
1657 r_const
= tcg_const_i32(TT_NFPU_INSN
);
1658 gen_helper_raise_exception(cpu_env
, r_const
);
1659 tcg_temp_free_i32(r_const
);
1667 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1669 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1672 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1673 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1677 src
= gen_load_fpr_F(dc
, rs
);
1678 dst
= gen_dest_fpr_F(dc
);
1680 gen(dst
, cpu_env
, src
);
1682 gen_store_fpr_F(dc
, rd
, dst
);
1685 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1686 void (*gen
)(TCGv_i32
, TCGv_i32
))
1690 src
= gen_load_fpr_F(dc
, rs
);
1691 dst
= gen_dest_fpr_F(dc
);
1695 gen_store_fpr_F(dc
, rd
, dst
);
1698 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1699 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1701 TCGv_i32 dst
, src1
, src2
;
1703 src1
= gen_load_fpr_F(dc
, rs1
);
1704 src2
= gen_load_fpr_F(dc
, rs2
);
1705 dst
= gen_dest_fpr_F(dc
);
1707 gen(dst
, cpu_env
, src1
, src2
);
1709 gen_store_fpr_F(dc
, rd
, dst
);
1712 #ifdef TARGET_SPARC64
1713 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1714 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1716 TCGv_i32 dst
, src1
, src2
;
1718 src1
= gen_load_fpr_F(dc
, rs1
);
1719 src2
= gen_load_fpr_F(dc
, rs2
);
1720 dst
= gen_dest_fpr_F(dc
);
1722 gen(dst
, src1
, src2
);
1724 gen_store_fpr_F(dc
, rd
, dst
);
1728 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1729 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1733 src
= gen_load_fpr_D(dc
, rs
);
1734 dst
= gen_dest_fpr_D(dc
, rd
);
1736 gen(dst
, cpu_env
, src
);
1738 gen_store_fpr_D(dc
, rd
, dst
);
1741 #ifdef TARGET_SPARC64
1742 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1743 void (*gen
)(TCGv_i64
, TCGv_i64
))
1747 src
= gen_load_fpr_D(dc
, rs
);
1748 dst
= gen_dest_fpr_D(dc
, rd
);
1752 gen_store_fpr_D(dc
, rd
, dst
);
1756 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1757 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1759 TCGv_i64 dst
, src1
, src2
;
1761 src1
= gen_load_fpr_D(dc
, rs1
);
1762 src2
= gen_load_fpr_D(dc
, rs2
);
1763 dst
= gen_dest_fpr_D(dc
, rd
);
1765 gen(dst
, cpu_env
, src1
, src2
);
1767 gen_store_fpr_D(dc
, rd
, dst
);
1770 #ifdef TARGET_SPARC64
1771 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1772 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1774 TCGv_i64 dst
, src1
, src2
;
1776 src1
= gen_load_fpr_D(dc
, rs1
);
1777 src2
= gen_load_fpr_D(dc
, rs2
);
1778 dst
= gen_dest_fpr_D(dc
, rd
);
1780 gen(dst
, src1
, src2
);
1782 gen_store_fpr_D(dc
, rd
, dst
);
1785 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1786 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1788 TCGv_i64 dst
, src1
, src2
;
1790 src1
= gen_load_fpr_D(dc
, rs1
);
1791 src2
= gen_load_fpr_D(dc
, rs2
);
1792 dst
= gen_dest_fpr_D(dc
, rd
);
1794 gen(dst
, cpu_gsr
, src1
, src2
);
1796 gen_store_fpr_D(dc
, rd
, dst
);
1799 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1800 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1802 TCGv_i64 dst
, src0
, src1
, src2
;
1804 src1
= gen_load_fpr_D(dc
, rs1
);
1805 src2
= gen_load_fpr_D(dc
, rs2
);
1806 src0
= gen_load_fpr_D(dc
, rd
);
1807 dst
= gen_dest_fpr_D(dc
, rd
);
1809 gen(dst
, src0
, src1
, src2
);
1811 gen_store_fpr_D(dc
, rd
, dst
);
1815 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1816 void (*gen
)(TCGv_ptr
))
1818 gen_op_load_fpr_QT1(QFPREG(rs
));
1822 gen_op_store_QT0_fpr(QFPREG(rd
));
1823 gen_update_fprs_dirty(QFPREG(rd
));
1826 #ifdef TARGET_SPARC64
1827 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1828 void (*gen
)(TCGv_ptr
))
1830 gen_op_load_fpr_QT1(QFPREG(rs
));
1834 gen_op_store_QT0_fpr(QFPREG(rd
));
1835 gen_update_fprs_dirty(QFPREG(rd
));
1839 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1840 void (*gen
)(TCGv_ptr
))
1842 gen_op_load_fpr_QT0(QFPREG(rs1
));
1843 gen_op_load_fpr_QT1(QFPREG(rs2
));
1847 gen_op_store_QT0_fpr(QFPREG(rd
));
1848 gen_update_fprs_dirty(QFPREG(rd
));
1851 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1852 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1855 TCGv_i32 src1
, src2
;
1857 src1
= gen_load_fpr_F(dc
, rs1
);
1858 src2
= gen_load_fpr_F(dc
, rs2
);
1859 dst
= gen_dest_fpr_D(dc
, rd
);
1861 gen(dst
, cpu_env
, src1
, src2
);
1863 gen_store_fpr_D(dc
, rd
, dst
);
1866 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1867 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1869 TCGv_i64 src1
, src2
;
1871 src1
= gen_load_fpr_D(dc
, rs1
);
1872 src2
= gen_load_fpr_D(dc
, rs2
);
1874 gen(cpu_env
, src1
, src2
);
1876 gen_op_store_QT0_fpr(QFPREG(rd
));
1877 gen_update_fprs_dirty(QFPREG(rd
));
1880 #ifdef TARGET_SPARC64
1881 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1882 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1887 src
= gen_load_fpr_F(dc
, rs
);
1888 dst
= gen_dest_fpr_D(dc
, rd
);
1890 gen(dst
, cpu_env
, src
);
1892 gen_store_fpr_D(dc
, rd
, dst
);
1896 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1897 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1902 src
= gen_load_fpr_F(dc
, rs
);
1903 dst
= gen_dest_fpr_D(dc
, rd
);
1905 gen(dst
, cpu_env
, src
);
1907 gen_store_fpr_D(dc
, rd
, dst
);
1910 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1911 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1916 src
= gen_load_fpr_D(dc
, rs
);
1917 dst
= gen_dest_fpr_F(dc
);
1919 gen(dst
, cpu_env
, src
);
1921 gen_store_fpr_F(dc
, rd
, dst
);
1924 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1925 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1929 gen_op_load_fpr_QT1(QFPREG(rs
));
1930 dst
= gen_dest_fpr_F(dc
);
1934 gen_store_fpr_F(dc
, rd
, dst
);
1937 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1938 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1942 gen_op_load_fpr_QT1(QFPREG(rs
));
1943 dst
= gen_dest_fpr_D(dc
, rd
);
1947 gen_store_fpr_D(dc
, rd
, dst
);
1950 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1951 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1955 src
= gen_load_fpr_F(dc
, rs
);
1959 gen_op_store_QT0_fpr(QFPREG(rd
));
1960 gen_update_fprs_dirty(QFPREG(rd
));
1963 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1964 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1968 src
= gen_load_fpr_D(dc
, rs
);
1972 gen_op_store_QT0_fpr(QFPREG(rd
));
1973 gen_update_fprs_dirty(QFPREG(rd
));
1977 #ifdef TARGET_SPARC64
1978 static inline TCGv_i32
gen_get_asi(int insn
, TCGv r_addr
)
1984 r_asi
= tcg_temp_new_i32();
1985 tcg_gen_mov_i32(r_asi
, cpu_asi
);
1987 asi
= GET_FIELD(insn
, 19, 26);
1988 r_asi
= tcg_const_i32(asi
);
1993 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
1996 TCGv_i32 r_asi
, r_size
, r_sign
;
1998 r_asi
= gen_get_asi(insn
, addr
);
1999 r_size
= tcg_const_i32(size
);
2000 r_sign
= tcg_const_i32(sign
);
2001 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2002 tcg_temp_free_i32(r_sign
);
2003 tcg_temp_free_i32(r_size
);
2004 tcg_temp_free_i32(r_asi
);
2007 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2009 TCGv_i32 r_asi
, r_size
;
2011 r_asi
= gen_get_asi(insn
, addr
);
2012 r_size
= tcg_const_i32(size
);
2013 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2014 tcg_temp_free_i32(r_size
);
2015 tcg_temp_free_i32(r_asi
);
2018 static inline void gen_ldf_asi(TCGv addr
, int insn
, int size
, int rd
)
2020 TCGv_i32 r_asi
, r_size
, r_rd
;
2022 r_asi
= gen_get_asi(insn
, addr
);
2023 r_size
= tcg_const_i32(size
);
2024 r_rd
= tcg_const_i32(rd
);
2025 gen_helper_ldf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2026 tcg_temp_free_i32(r_rd
);
2027 tcg_temp_free_i32(r_size
);
2028 tcg_temp_free_i32(r_asi
);
2031 static inline void gen_stf_asi(TCGv addr
, int insn
, int size
, int rd
)
2033 TCGv_i32 r_asi
, r_size
, r_rd
;
2035 r_asi
= gen_get_asi(insn
, addr
);
2036 r_size
= tcg_const_i32(size
);
2037 r_rd
= tcg_const_i32(rd
);
2038 gen_helper_stf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2039 tcg_temp_free_i32(r_rd
);
2040 tcg_temp_free_i32(r_size
);
2041 tcg_temp_free_i32(r_asi
);
2044 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2046 TCGv_i32 r_asi
, r_size
, r_sign
;
2047 TCGv_i64 t64
= tcg_temp_new_i64();
2049 r_asi
= gen_get_asi(insn
, addr
);
2050 r_size
= tcg_const_i32(4);
2051 r_sign
= tcg_const_i32(0);
2052 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2053 tcg_temp_free_i32(r_sign
);
2054 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2055 tcg_temp_free_i32(r_size
);
2056 tcg_temp_free_i32(r_asi
);
2057 tcg_gen_trunc_i64_tl(dst
, t64
);
2058 tcg_temp_free_i64(t64
);
2061 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2064 TCGv_i32 r_asi
, r_rd
;
2066 r_asi
= gen_get_asi(insn
, addr
);
2067 r_rd
= tcg_const_i32(rd
);
2068 gen_helper_ldda_asi(cpu_env
, addr
, r_asi
, r_rd
);
2069 tcg_temp_free_i32(r_rd
);
2070 tcg_temp_free_i32(r_asi
);
2073 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2076 TCGv_i32 r_asi
, r_size
;
2077 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2078 TCGv_i64 t64
= tcg_temp_new_i64();
2080 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2081 r_asi
= gen_get_asi(insn
, addr
);
2082 r_size
= tcg_const_i32(8);
2083 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2084 tcg_temp_free_i32(r_size
);
2085 tcg_temp_free_i32(r_asi
);
2086 tcg_temp_free_i64(t64
);
2089 static inline void gen_casx_asi(DisasContext
*dc
, TCGv addr
,
2090 TCGv val2
, int insn
, int rd
)
2092 TCGv val1
= gen_load_gpr(dc
, rd
);
2093 TCGv dst
= gen_dest_gpr(dc
, rd
);
2094 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2096 gen_helper_casx_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2097 tcg_temp_free_i32(r_asi
);
2098 gen_store_gpr(dc
, rd
, dst
);
2101 #elif !defined(CONFIG_USER_ONLY)
2103 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2106 TCGv_i32 r_asi
, r_size
, r_sign
;
2107 TCGv_i64 t64
= tcg_temp_new_i64();
2109 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2110 r_size
= tcg_const_i32(size
);
2111 r_sign
= tcg_const_i32(sign
);
2112 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2113 tcg_temp_free_i32(r_sign
);
2114 tcg_temp_free_i32(r_size
);
2115 tcg_temp_free_i32(r_asi
);
2116 tcg_gen_trunc_i64_tl(dst
, t64
);
2117 tcg_temp_free_i64(t64
);
2120 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2122 TCGv_i32 r_asi
, r_size
;
2123 TCGv_i64 t64
= tcg_temp_new_i64();
2125 tcg_gen_extu_tl_i64(t64
, src
);
2126 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2127 r_size
= tcg_const_i32(size
);
2128 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2129 tcg_temp_free_i32(r_size
);
2130 tcg_temp_free_i32(r_asi
);
2131 tcg_temp_free_i64(t64
);
2134 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2136 TCGv_i32 r_asi
, r_size
, r_sign
;
2137 TCGv_i64 r_val
, t64
;
2139 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2140 r_size
= tcg_const_i32(4);
2141 r_sign
= tcg_const_i32(0);
2142 t64
= tcg_temp_new_i64();
2143 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2144 tcg_temp_free(r_sign
);
2145 r_val
= tcg_temp_new_i64();
2146 tcg_gen_extu_tl_i64(r_val
, src
);
2147 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2148 tcg_temp_free_i64(r_val
);
2149 tcg_temp_free_i32(r_size
);
2150 tcg_temp_free_i32(r_asi
);
2151 tcg_gen_trunc_i64_tl(dst
, t64
);
2152 tcg_temp_free_i64(t64
);
2155 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2158 TCGv_i32 r_asi
, r_size
, r_sign
;
2162 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2163 r_size
= tcg_const_i32(8);
2164 r_sign
= tcg_const_i32(0);
2165 t64
= tcg_temp_new_i64();
2166 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2167 tcg_temp_free_i32(r_sign
);
2168 tcg_temp_free_i32(r_size
);
2169 tcg_temp_free_i32(r_asi
);
2171 t
= gen_dest_gpr(dc
, rd
+ 1);
2172 tcg_gen_trunc_i64_tl(t
, t64
);
2173 gen_store_gpr(dc
, rd
+ 1, t
);
2175 tcg_gen_shri_i64(t64
, t64
, 32);
2176 tcg_gen_trunc_i64_tl(hi
, t64
);
2177 tcg_temp_free_i64(t64
);
2178 gen_store_gpr(dc
, rd
, hi
);
2181 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2184 TCGv_i32 r_asi
, r_size
;
2185 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2186 TCGv_i64 t64
= tcg_temp_new_i64();
2188 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2189 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2190 r_size
= tcg_const_i32(8);
2191 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2192 tcg_temp_free_i32(r_size
);
2193 tcg_temp_free_i32(r_asi
);
2194 tcg_temp_free_i64(t64
);
2198 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2199 static inline void gen_cas_asi(DisasContext
*dc
, TCGv addr
,
2200 TCGv val2
, int insn
, int rd
)
2202 TCGv val1
= gen_load_gpr(dc
, rd
);
2203 TCGv dst
= gen_dest_gpr(dc
, rd
);
2204 #ifdef TARGET_SPARC64
2205 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2207 TCGv_i32 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2210 gen_helper_cas_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2211 tcg_temp_free_i32(r_asi
);
2212 gen_store_gpr(dc
, rd
, dst
);
2215 static inline void gen_ldstub_asi(TCGv dst
, TCGv addr
, int insn
)
2218 TCGv_i32 r_asi
, r_size
;
2220 gen_ld_asi(dst
, addr
, insn
, 1, 0);
2222 r_val
= tcg_const_i64(0xffULL
);
2223 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2224 r_size
= tcg_const_i32(1);
2225 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2226 tcg_temp_free_i32(r_size
);
2227 tcg_temp_free_i32(r_asi
);
2228 tcg_temp_free_i64(r_val
);
2232 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2234 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2235 return gen_load_gpr(dc
, rs1
);
2238 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2240 if (IS_IMM
) { /* immediate */
2241 target_long simm
= GET_FIELDs(insn
, 19, 31);
2242 TCGv t
= get_temp_tl(dc
);
2243 tcg_gen_movi_tl(t
, simm
);
2245 } else { /* register */
2246 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2247 return gen_load_gpr(dc
, rs2
);
2251 #ifdef TARGET_SPARC64
2252 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2254 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2256 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2257 or fold the comparison down to 32 bits and use movcond_i32. Choose
2259 c32
= tcg_temp_new_i32();
2261 tcg_gen_trunc_i64_i32(c32
, cmp
->c1
);
2263 TCGv_i64 c64
= tcg_temp_new_i64();
2264 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2265 tcg_gen_trunc_i64_i32(c32
, c64
);
2266 tcg_temp_free_i64(c64
);
2269 s1
= gen_load_fpr_F(dc
, rs
);
2270 s2
= gen_load_fpr_F(dc
, rd
);
2271 dst
= gen_dest_fpr_F(dc
);
2272 zero
= tcg_const_i32(0);
2274 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2276 tcg_temp_free_i32(c32
);
2277 tcg_temp_free_i32(zero
);
2278 gen_store_fpr_F(dc
, rd
, dst
);
2281 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2283 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2284 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2285 gen_load_fpr_D(dc
, rs
),
2286 gen_load_fpr_D(dc
, rd
));
2287 gen_store_fpr_D(dc
, rd
, dst
);
2290 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2292 int qd
= QFPREG(rd
);
2293 int qs
= QFPREG(rs
);
2295 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2296 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2297 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2298 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2300 gen_update_fprs_dirty(qd
);
2303 #ifndef CONFIG_USER_ONLY
2304 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_ptr cpu_env
)
2306 TCGv_i32 r_tl
= tcg_temp_new_i32();
2308 /* load env->tl into r_tl */
2309 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2311 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2312 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2314 /* calculate offset to current trap state from env->ts, reuse r_tl */
2315 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2316 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2318 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2320 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2321 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2322 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2323 tcg_temp_free_ptr(r_tl_tmp
);
2326 tcg_temp_free_i32(r_tl
);
2330 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2331 int width
, bool cc
, bool left
)
2333 TCGv lo1
, lo2
, t1
, t2
;
2334 uint64_t amask
, tabl
, tabr
;
2335 int shift
, imask
, omask
;
2338 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2339 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2340 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2341 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2342 dc
->cc_op
= CC_OP_SUB
;
2345 /* Theory of operation: there are two tables, left and right (not to
2346 be confused with the left and right versions of the opcode). These
2347 are indexed by the low 3 bits of the inputs. To make things "easy",
2348 these tables are loaded into two constants, TABL and TABR below.
2349 The operation index = (input & imask) << shift calculates the index
2350 into the constant, while val = (table >> index) & omask calculates
2351 the value we're looking for. */
2358 tabl
= 0x80c0e0f0f8fcfeffULL
;
2359 tabr
= 0xff7f3f1f0f070301ULL
;
2361 tabl
= 0x0103070f1f3f7fffULL
;
2362 tabr
= 0xfffefcf8f0e0c080ULL
;
2382 tabl
= (2 << 2) | 3;
2383 tabr
= (3 << 2) | 1;
2385 tabl
= (1 << 2) | 3;
2386 tabr
= (3 << 2) | 2;
2393 lo1
= tcg_temp_new();
2394 lo2
= tcg_temp_new();
2395 tcg_gen_andi_tl(lo1
, s1
, imask
);
2396 tcg_gen_andi_tl(lo2
, s2
, imask
);
2397 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2398 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2400 t1
= tcg_const_tl(tabl
);
2401 t2
= tcg_const_tl(tabr
);
2402 tcg_gen_shr_tl(lo1
, t1
, lo1
);
2403 tcg_gen_shr_tl(lo2
, t2
, lo2
);
2404 tcg_gen_andi_tl(dst
, lo1
, omask
);
2405 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2409 amask
&= 0xffffffffULL
;
2411 tcg_gen_andi_tl(s1
, s1
, amask
);
2412 tcg_gen_andi_tl(s2
, s2
, amask
);
2414 /* We want to compute
2415 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2416 We've already done dst = lo1, so this reduces to
2417 dst &= (s1 == s2 ? -1 : lo2)
2422 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
2423 tcg_gen_neg_tl(t1
, t1
);
2424 tcg_gen_or_tl(lo2
, lo2
, t1
);
2425 tcg_gen_and_tl(dst
, dst
, lo2
);
2433 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2435 TCGv tmp
= tcg_temp_new();
2437 tcg_gen_add_tl(tmp
, s1
, s2
);
2438 tcg_gen_andi_tl(dst
, tmp
, -8);
2440 tcg_gen_neg_tl(tmp
, tmp
);
2442 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2447 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2451 t1
= tcg_temp_new();
2452 t2
= tcg_temp_new();
2453 shift
= tcg_temp_new();
2455 tcg_gen_andi_tl(shift
, gsr
, 7);
2456 tcg_gen_shli_tl(shift
, shift
, 3);
2457 tcg_gen_shl_tl(t1
, s1
, shift
);
2459 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2460 shift of (up to 63) followed by a constant shift of 1. */
2461 tcg_gen_xori_tl(shift
, shift
, 63);
2462 tcg_gen_shr_tl(t2
, s2
, shift
);
2463 tcg_gen_shri_tl(t2
, t2
, 1);
2465 tcg_gen_or_tl(dst
, t1
, t2
);
2469 tcg_temp_free(shift
);
2473 #define CHECK_IU_FEATURE(dc, FEATURE) \
2474 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2476 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2477 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2480 /* before an instruction, dc->pc must be static */
2481 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2483 unsigned int opc
, rs1
, rs2
, rd
;
2484 TCGv cpu_src1
, cpu_src2
;
2485 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2486 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2489 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2490 tcg_gen_debug_insn_start(dc
->pc
);
2493 opc
= GET_FIELD(insn
, 0, 1);
2494 rd
= GET_FIELD(insn
, 2, 6);
2497 case 0: /* branches/sethi */
2499 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2502 #ifdef TARGET_SPARC64
2503 case 0x1: /* V9 BPcc */
2507 target
= GET_FIELD_SP(insn
, 0, 18);
2508 target
= sign_extend(target
, 19);
2510 cc
= GET_FIELD_SP(insn
, 20, 21);
2512 do_branch(dc
, target
, insn
, 0);
2514 do_branch(dc
, target
, insn
, 1);
2519 case 0x3: /* V9 BPr */
2521 target
= GET_FIELD_SP(insn
, 0, 13) |
2522 (GET_FIELD_SP(insn
, 20, 21) << 14);
2523 target
= sign_extend(target
, 16);
2525 cpu_src1
= get_src1(dc
, insn
);
2526 do_branch_reg(dc
, target
, insn
, cpu_src1
);
2529 case 0x5: /* V9 FBPcc */
2531 int cc
= GET_FIELD_SP(insn
, 20, 21);
2532 if (gen_trap_ifnofpu(dc
)) {
2535 target
= GET_FIELD_SP(insn
, 0, 18);
2536 target
= sign_extend(target
, 19);
2538 do_fbranch(dc
, target
, insn
, cc
);
2542 case 0x7: /* CBN+x */
2547 case 0x2: /* BN+x */
2549 target
= GET_FIELD(insn
, 10, 31);
2550 target
= sign_extend(target
, 22);
2552 do_branch(dc
, target
, insn
, 0);
2555 case 0x6: /* FBN+x */
2557 if (gen_trap_ifnofpu(dc
)) {
2560 target
= GET_FIELD(insn
, 10, 31);
2561 target
= sign_extend(target
, 22);
2563 do_fbranch(dc
, target
, insn
, 0);
2566 case 0x4: /* SETHI */
2567 /* Special-case %g0 because that's the canonical nop. */
2569 uint32_t value
= GET_FIELD(insn
, 10, 31);
2570 TCGv t
= gen_dest_gpr(dc
, rd
);
2571 tcg_gen_movi_tl(t
, value
<< 10);
2572 gen_store_gpr(dc
, rd
, t
);
2575 case 0x0: /* UNIMPL */
2584 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
2585 TCGv o7
= gen_dest_gpr(dc
, 15);
2587 tcg_gen_movi_tl(o7
, dc
->pc
);
2588 gen_store_gpr(dc
, 15, o7
);
2591 #ifdef TARGET_SPARC64
2592 if (unlikely(AM_CHECK(dc
))) {
2593 target
&= 0xffffffffULL
;
2599 case 2: /* FPU & Logical Operations */
2601 unsigned int xop
= GET_FIELD(insn
, 7, 12);
2602 TCGv cpu_dst
= get_temp_tl(dc
);
2605 if (xop
== 0x3a) { /* generate trap */
2606 int cond
= GET_FIELD(insn
, 3, 6);
2618 /* Conditional trap. */
2620 #ifdef TARGET_SPARC64
2622 int cc
= GET_FIELD_SP(insn
, 11, 12);
2624 gen_compare(&cmp
, 0, cond
, dc
);
2625 } else if (cc
== 2) {
2626 gen_compare(&cmp
, 1, cond
, dc
);
2631 gen_compare(&cmp
, 0, cond
, dc
);
2633 l1
= gen_new_label();
2634 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
2635 cmp
.c1
, cmp
.c2
, l1
);
2639 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2640 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2642 /* Don't use the normal temporaries, as they may well have
2643 gone out of scope with the branch above. While we're
2644 doing that we might as well pre-truncate to 32-bit. */
2645 trap
= tcg_temp_new_i32();
2647 rs1
= GET_FIELD_SP(insn
, 14, 18);
2649 rs2
= GET_FIELD_SP(insn
, 0, 6);
2651 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
2652 /* Signal that the trap value is fully constant. */
2655 TCGv t1
= gen_load_gpr(dc
, rs1
);
2656 tcg_gen_trunc_tl_i32(trap
, t1
);
2657 tcg_gen_addi_i32(trap
, trap
, rs2
);
2661 rs2
= GET_FIELD_SP(insn
, 0, 4);
2662 t1
= gen_load_gpr(dc
, rs1
);
2663 t2
= gen_load_gpr(dc
, rs2
);
2664 tcg_gen_add_tl(t1
, t1
, t2
);
2665 tcg_gen_trunc_tl_i32(trap
, t1
);
2668 tcg_gen_andi_i32(trap
, trap
, mask
);
2669 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2672 gen_helper_raise_exception(cpu_env
, trap
);
2673 tcg_temp_free_i32(trap
);
2676 /* An unconditional trap ends the TB. */
2680 /* A conditional trap falls through to the next insn. */
2684 } else if (xop
== 0x28) {
2685 rs1
= GET_FIELD(insn
, 13, 17);
2688 #ifndef TARGET_SPARC64
2689 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2690 manual, rdy on the microSPARC
2692 case 0x0f: /* stbar in the SPARCv8 manual,
2693 rdy on the microSPARC II */
2694 case 0x10 ... 0x1f: /* implementation-dependent in the
2695 SPARCv8 manual, rdy on the
2698 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
2699 TCGv t
= gen_dest_gpr(dc
, rd
);
2700 /* Read Asr17 for a Leon3 monoprocessor */
2701 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
2702 gen_store_gpr(dc
, rd
, t
);
2706 gen_store_gpr(dc
, rd
, cpu_y
);
2708 #ifdef TARGET_SPARC64
2709 case 0x2: /* V9 rdccr */
2711 gen_helper_rdccr(cpu_dst
, cpu_env
);
2712 gen_store_gpr(dc
, rd
, cpu_dst
);
2714 case 0x3: /* V9 rdasi */
2715 tcg_gen_ext_i32_tl(cpu_dst
, cpu_asi
);
2716 gen_store_gpr(dc
, rd
, cpu_dst
);
2718 case 0x4: /* V9 rdtick */
2722 r_tickptr
= tcg_temp_new_ptr();
2723 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2724 offsetof(CPUSPARCState
, tick
));
2725 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2726 tcg_temp_free_ptr(r_tickptr
);
2727 gen_store_gpr(dc
, rd
, cpu_dst
);
2730 case 0x5: /* V9 rdpc */
2732 TCGv t
= gen_dest_gpr(dc
, rd
);
2733 if (unlikely(AM_CHECK(dc
))) {
2734 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
2736 tcg_gen_movi_tl(t
, dc
->pc
);
2738 gen_store_gpr(dc
, rd
, t
);
2741 case 0x6: /* V9 rdfprs */
2742 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
2743 gen_store_gpr(dc
, rd
, cpu_dst
);
2745 case 0xf: /* V9 membar */
2746 break; /* no effect */
2747 case 0x13: /* Graphics Status */
2748 if (gen_trap_ifnofpu(dc
)) {
2751 gen_store_gpr(dc
, rd
, cpu_gsr
);
2753 case 0x16: /* Softint */
2754 tcg_gen_ext_i32_tl(cpu_dst
, cpu_softint
);
2755 gen_store_gpr(dc
, rd
, cpu_dst
);
2757 case 0x17: /* Tick compare */
2758 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
2760 case 0x18: /* System tick */
2764 r_tickptr
= tcg_temp_new_ptr();
2765 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2766 offsetof(CPUSPARCState
, stick
));
2767 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2768 tcg_temp_free_ptr(r_tickptr
);
2769 gen_store_gpr(dc
, rd
, cpu_dst
);
2772 case 0x19: /* System tick compare */
2773 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
2775 case 0x10: /* Performance Control */
2776 case 0x11: /* Performance Instrumentation Counter */
2777 case 0x12: /* Dispatch Control */
2778 case 0x14: /* Softint set, WO */
2779 case 0x15: /* Softint clear, WO */
2784 #if !defined(CONFIG_USER_ONLY)
2785 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
2786 #ifndef TARGET_SPARC64
2787 if (!supervisor(dc
)) {
2791 gen_helper_rdpsr(cpu_dst
, cpu_env
);
2793 CHECK_IU_FEATURE(dc
, HYPV
);
2794 if (!hypervisor(dc
))
2796 rs1
= GET_FIELD(insn
, 13, 17);
2799 // gen_op_rdhpstate();
2802 // gen_op_rdhtstate();
2805 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
2808 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
2811 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
2813 case 31: // hstick_cmpr
2814 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
2820 gen_store_gpr(dc
, rd
, cpu_dst
);
2822 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
2823 if (!supervisor(dc
)) {
2826 cpu_tmp0
= get_temp_tl(dc
);
2827 #ifdef TARGET_SPARC64
2828 rs1
= GET_FIELD(insn
, 13, 17);
2834 r_tsptr
= tcg_temp_new_ptr();
2835 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2836 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2837 offsetof(trap_state
, tpc
));
2838 tcg_temp_free_ptr(r_tsptr
);
2845 r_tsptr
= tcg_temp_new_ptr();
2846 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2847 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2848 offsetof(trap_state
, tnpc
));
2849 tcg_temp_free_ptr(r_tsptr
);
2856 r_tsptr
= tcg_temp_new_ptr();
2857 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2858 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2859 offsetof(trap_state
, tstate
));
2860 tcg_temp_free_ptr(r_tsptr
);
2865 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2867 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2868 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
2869 offsetof(trap_state
, tt
));
2870 tcg_temp_free_ptr(r_tsptr
);
2877 r_tickptr
= tcg_temp_new_ptr();
2878 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2879 offsetof(CPUSPARCState
, tick
));
2880 gen_helper_tick_get_count(cpu_tmp0
, r_tickptr
);
2881 tcg_temp_free_ptr(r_tickptr
);
2885 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
2888 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2889 offsetof(CPUSPARCState
, pstate
));
2892 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2893 offsetof(CPUSPARCState
, tl
));
2896 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2897 offsetof(CPUSPARCState
, psrpil
));
2900 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
2903 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2904 offsetof(CPUSPARCState
, cansave
));
2906 case 11: // canrestore
2907 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2908 offsetof(CPUSPARCState
, canrestore
));
2910 case 12: // cleanwin
2911 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2912 offsetof(CPUSPARCState
, cleanwin
));
2914 case 13: // otherwin
2915 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2916 offsetof(CPUSPARCState
, otherwin
));
2919 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2920 offsetof(CPUSPARCState
, wstate
));
2922 case 16: // UA2005 gl
2923 CHECK_IU_FEATURE(dc
, GL
);
2924 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2925 offsetof(CPUSPARCState
, gl
));
2927 case 26: // UA2005 strand status
2928 CHECK_IU_FEATURE(dc
, HYPV
);
2929 if (!hypervisor(dc
))
2931 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
2934 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
2941 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
2943 gen_store_gpr(dc
, rd
, cpu_tmp0
);
2945 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
2946 #ifdef TARGET_SPARC64
2948 gen_helper_flushw(cpu_env
);
2950 if (!supervisor(dc
))
2952 gen_store_gpr(dc
, rd
, cpu_tbr
);
2956 } else if (xop
== 0x34) { /* FPU Operations */
2957 if (gen_trap_ifnofpu(dc
)) {
2960 gen_op_clear_ieee_excp_and_FTT();
2961 rs1
= GET_FIELD(insn
, 13, 17);
2962 rs2
= GET_FIELD(insn
, 27, 31);
2963 xop
= GET_FIELD(insn
, 18, 26);
2966 case 0x1: /* fmovs */
2967 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
2968 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
2970 case 0x5: /* fnegs */
2971 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
2973 case 0x9: /* fabss */
2974 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
2976 case 0x29: /* fsqrts */
2977 CHECK_FPU_FEATURE(dc
, FSQRT
);
2978 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
2980 case 0x2a: /* fsqrtd */
2981 CHECK_FPU_FEATURE(dc
, FSQRT
);
2982 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
2984 case 0x2b: /* fsqrtq */
2985 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2986 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
2988 case 0x41: /* fadds */
2989 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
2991 case 0x42: /* faddd */
2992 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
2994 case 0x43: /* faddq */
2995 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2996 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
2998 case 0x45: /* fsubs */
2999 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
3001 case 0x46: /* fsubd */
3002 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3004 case 0x47: /* fsubq */
3005 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3006 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3008 case 0x49: /* fmuls */
3009 CHECK_FPU_FEATURE(dc
, FMUL
);
3010 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3012 case 0x4a: /* fmuld */
3013 CHECK_FPU_FEATURE(dc
, FMUL
);
3014 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3016 case 0x4b: /* fmulq */
3017 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3018 CHECK_FPU_FEATURE(dc
, FMUL
);
3019 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3021 case 0x4d: /* fdivs */
3022 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3024 case 0x4e: /* fdivd */
3025 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3027 case 0x4f: /* fdivq */
3028 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3029 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3031 case 0x69: /* fsmuld */
3032 CHECK_FPU_FEATURE(dc
, FSMULD
);
3033 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3035 case 0x6e: /* fdmulq */
3036 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3037 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3039 case 0xc4: /* fitos */
3040 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3042 case 0xc6: /* fdtos */
3043 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3045 case 0xc7: /* fqtos */
3046 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3047 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3049 case 0xc8: /* fitod */
3050 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3052 case 0xc9: /* fstod */
3053 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3055 case 0xcb: /* fqtod */
3056 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3057 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3059 case 0xcc: /* fitoq */
3060 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3061 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3063 case 0xcd: /* fstoq */
3064 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3065 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3067 case 0xce: /* fdtoq */
3068 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3069 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3071 case 0xd1: /* fstoi */
3072 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3074 case 0xd2: /* fdtoi */
3075 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3077 case 0xd3: /* fqtoi */
3078 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3079 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3081 #ifdef TARGET_SPARC64
3082 case 0x2: /* V9 fmovd */
3083 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3084 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3086 case 0x3: /* V9 fmovq */
3087 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3088 gen_move_Q(rd
, rs2
);
3090 case 0x6: /* V9 fnegd */
3091 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3093 case 0x7: /* V9 fnegq */
3094 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3095 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3097 case 0xa: /* V9 fabsd */
3098 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3100 case 0xb: /* V9 fabsq */
3101 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3102 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3104 case 0x81: /* V9 fstox */
3105 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3107 case 0x82: /* V9 fdtox */
3108 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3110 case 0x83: /* V9 fqtox */
3111 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3112 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3114 case 0x84: /* V9 fxtos */
3115 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3117 case 0x88: /* V9 fxtod */
3118 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3120 case 0x8c: /* V9 fxtoq */
3121 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3122 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3128 } else if (xop
== 0x35) { /* FPU Operations */
3129 #ifdef TARGET_SPARC64
3132 if (gen_trap_ifnofpu(dc
)) {
3135 gen_op_clear_ieee_excp_and_FTT();
3136 rs1
= GET_FIELD(insn
, 13, 17);
3137 rs2
= GET_FIELD(insn
, 27, 31);
3138 xop
= GET_FIELD(insn
, 18, 26);
3141 #ifdef TARGET_SPARC64
3145 cond = GET_FIELD_SP(insn, 10, 12); \
3146 cpu_src1 = get_src1(dc, insn); \
3147 gen_compare_reg(&cmp, cond, cpu_src1); \
3148 gen_fmov##sz(dc, &cmp, rd, rs2); \
3149 free_compare(&cmp); \
3152 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3155 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3158 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3159 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3166 #ifdef TARGET_SPARC64
3167 #define FMOVCC(fcc, sz) \
3170 cond = GET_FIELD_SP(insn, 14, 17); \
3171 gen_fcompare(&cmp, fcc, cond); \
3172 gen_fmov##sz(dc, &cmp, rd, rs2); \
3173 free_compare(&cmp); \
3176 case 0x001: /* V9 fmovscc %fcc0 */
3179 case 0x002: /* V9 fmovdcc %fcc0 */
3182 case 0x003: /* V9 fmovqcc %fcc0 */
3183 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3186 case 0x041: /* V9 fmovscc %fcc1 */
3189 case 0x042: /* V9 fmovdcc %fcc1 */
3192 case 0x043: /* V9 fmovqcc %fcc1 */
3193 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3196 case 0x081: /* V9 fmovscc %fcc2 */
3199 case 0x082: /* V9 fmovdcc %fcc2 */
3202 case 0x083: /* V9 fmovqcc %fcc2 */
3203 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3206 case 0x0c1: /* V9 fmovscc %fcc3 */
3209 case 0x0c2: /* V9 fmovdcc %fcc3 */
3212 case 0x0c3: /* V9 fmovqcc %fcc3 */
3213 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3217 #define FMOVCC(xcc, sz) \
3220 cond = GET_FIELD_SP(insn, 14, 17); \
3221 gen_compare(&cmp, xcc, cond, dc); \
3222 gen_fmov##sz(dc, &cmp, rd, rs2); \
3223 free_compare(&cmp); \
3226 case 0x101: /* V9 fmovscc %icc */
3229 case 0x102: /* V9 fmovdcc %icc */
3232 case 0x103: /* V9 fmovqcc %icc */
3233 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3236 case 0x181: /* V9 fmovscc %xcc */
3239 case 0x182: /* V9 fmovdcc %xcc */
3242 case 0x183: /* V9 fmovqcc %xcc */
3243 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3248 case 0x51: /* fcmps, V9 %fcc */
3249 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3250 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3251 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3253 case 0x52: /* fcmpd, V9 %fcc */
3254 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3255 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3256 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3258 case 0x53: /* fcmpq, V9 %fcc */
3259 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3260 gen_op_load_fpr_QT0(QFPREG(rs1
));
3261 gen_op_load_fpr_QT1(QFPREG(rs2
));
3262 gen_op_fcmpq(rd
& 3);
3264 case 0x55: /* fcmpes, V9 %fcc */
3265 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3266 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3267 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3269 case 0x56: /* fcmped, V9 %fcc */
3270 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3271 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3272 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3274 case 0x57: /* fcmpeq, V9 %fcc */
3275 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3276 gen_op_load_fpr_QT0(QFPREG(rs1
));
3277 gen_op_load_fpr_QT1(QFPREG(rs2
));
3278 gen_op_fcmpeq(rd
& 3);
3283 } else if (xop
== 0x2) {
3284 TCGv dst
= gen_dest_gpr(dc
, rd
);
3285 rs1
= GET_FIELD(insn
, 13, 17);
3287 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3288 if (IS_IMM
) { /* immediate */
3289 simm
= GET_FIELDs(insn
, 19, 31);
3290 tcg_gen_movi_tl(dst
, simm
);
3291 gen_store_gpr(dc
, rd
, dst
);
3292 } else { /* register */
3293 rs2
= GET_FIELD(insn
, 27, 31);
3295 tcg_gen_movi_tl(dst
, 0);
3296 gen_store_gpr(dc
, rd
, dst
);
3298 cpu_src2
= gen_load_gpr(dc
, rs2
);
3299 gen_store_gpr(dc
, rd
, cpu_src2
);
3303 cpu_src1
= get_src1(dc
, insn
);
3304 if (IS_IMM
) { /* immediate */
3305 simm
= GET_FIELDs(insn
, 19, 31);
3306 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
3307 gen_store_gpr(dc
, rd
, dst
);
3308 } else { /* register */
3309 rs2
= GET_FIELD(insn
, 27, 31);
3311 /* mov shortcut: or x, %g0, y -> mov x, y */
3312 gen_store_gpr(dc
, rd
, cpu_src1
);
3314 cpu_src2
= gen_load_gpr(dc
, rs2
);
3315 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
3316 gen_store_gpr(dc
, rd
, dst
);
3320 #ifdef TARGET_SPARC64
3321 } else if (xop
== 0x25) { /* sll, V9 sllx */
3322 cpu_src1
= get_src1(dc
, insn
);
3323 if (IS_IMM
) { /* immediate */
3324 simm
= GET_FIELDs(insn
, 20, 31);
3325 if (insn
& (1 << 12)) {
3326 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3328 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3330 } else { /* register */
3331 rs2
= GET_FIELD(insn
, 27, 31);
3332 cpu_src2
= gen_load_gpr(dc
, rs2
);
3333 cpu_tmp0
= get_temp_tl(dc
);
3334 if (insn
& (1 << 12)) {
3335 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3337 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3339 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3341 gen_store_gpr(dc
, rd
, cpu_dst
);
3342 } else if (xop
== 0x26) { /* srl, V9 srlx */
3343 cpu_src1
= get_src1(dc
, insn
);
3344 if (IS_IMM
) { /* immediate */
3345 simm
= GET_FIELDs(insn
, 20, 31);
3346 if (insn
& (1 << 12)) {
3347 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3349 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3350 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3352 } else { /* register */
3353 rs2
= GET_FIELD(insn
, 27, 31);
3354 cpu_src2
= gen_load_gpr(dc
, rs2
);
3355 cpu_tmp0
= get_temp_tl(dc
);
3356 if (insn
& (1 << 12)) {
3357 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3358 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3360 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3361 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3362 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3365 gen_store_gpr(dc
, rd
, cpu_dst
);
3366 } else if (xop
== 0x27) { /* sra, V9 srax */
3367 cpu_src1
= get_src1(dc
, insn
);
3368 if (IS_IMM
) { /* immediate */
3369 simm
= GET_FIELDs(insn
, 20, 31);
3370 if (insn
& (1 << 12)) {
3371 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3373 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3374 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3376 } else { /* register */
3377 rs2
= GET_FIELD(insn
, 27, 31);
3378 cpu_src2
= gen_load_gpr(dc
, rs2
);
3379 cpu_tmp0
= get_temp_tl(dc
);
3380 if (insn
& (1 << 12)) {
3381 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3382 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3384 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3385 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3386 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3389 gen_store_gpr(dc
, rd
, cpu_dst
);
3391 } else if (xop
< 0x36) {
3393 cpu_src1
= get_src1(dc
, insn
);
3394 cpu_src2
= get_src2(dc
, insn
);
3395 switch (xop
& ~0x10) {
3398 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3399 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3400 dc
->cc_op
= CC_OP_ADD
;
3402 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3406 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3408 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3409 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3410 dc
->cc_op
= CC_OP_LOGIC
;
3414 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3416 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3417 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3418 dc
->cc_op
= CC_OP_LOGIC
;
3422 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3424 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3425 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3426 dc
->cc_op
= CC_OP_LOGIC
;
3431 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3432 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3433 dc
->cc_op
= CC_OP_SUB
;
3435 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3438 case 0x5: /* andn */
3439 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3441 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3442 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3443 dc
->cc_op
= CC_OP_LOGIC
;
3447 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3449 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3450 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3451 dc
->cc_op
= CC_OP_LOGIC
;
3454 case 0x7: /* xorn */
3455 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3457 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3458 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3459 dc
->cc_op
= CC_OP_LOGIC
;
3462 case 0x8: /* addx, V9 addc */
3463 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3466 #ifdef TARGET_SPARC64
3467 case 0x9: /* V9 mulx */
3468 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3471 case 0xa: /* umul */
3472 CHECK_IU_FEATURE(dc
, MUL
);
3473 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3475 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3476 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3477 dc
->cc_op
= CC_OP_LOGIC
;
3480 case 0xb: /* smul */
3481 CHECK_IU_FEATURE(dc
, MUL
);
3482 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
3484 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3485 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3486 dc
->cc_op
= CC_OP_LOGIC
;
3489 case 0xc: /* subx, V9 subc */
3490 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3493 #ifdef TARGET_SPARC64
3494 case 0xd: /* V9 udivx */
3495 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3498 case 0xe: /* udiv */
3499 CHECK_IU_FEATURE(dc
, DIV
);
3501 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3503 dc
->cc_op
= CC_OP_DIV
;
3505 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
3509 case 0xf: /* sdiv */
3510 CHECK_IU_FEATURE(dc
, DIV
);
3512 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3514 dc
->cc_op
= CC_OP_DIV
;
3516 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
3523 gen_store_gpr(dc
, rd
, cpu_dst
);
3525 cpu_src1
= get_src1(dc
, insn
);
3526 cpu_src2
= get_src2(dc
, insn
);
3528 case 0x20: /* taddcc */
3529 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3530 gen_store_gpr(dc
, rd
, cpu_dst
);
3531 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
3532 dc
->cc_op
= CC_OP_TADD
;
3534 case 0x21: /* tsubcc */
3535 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3536 gen_store_gpr(dc
, rd
, cpu_dst
);
3537 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
3538 dc
->cc_op
= CC_OP_TSUB
;
3540 case 0x22: /* taddcctv */
3541 gen_helper_taddcctv(cpu_dst
, cpu_env
,
3542 cpu_src1
, cpu_src2
);
3543 gen_store_gpr(dc
, rd
, cpu_dst
);
3544 dc
->cc_op
= CC_OP_TADDTV
;
3546 case 0x23: /* tsubcctv */
3547 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
3548 cpu_src1
, cpu_src2
);
3549 gen_store_gpr(dc
, rd
, cpu_dst
);
3550 dc
->cc_op
= CC_OP_TSUBTV
;
3552 case 0x24: /* mulscc */
3554 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
3555 gen_store_gpr(dc
, rd
, cpu_dst
);
3556 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3557 dc
->cc_op
= CC_OP_ADD
;
3559 #ifndef TARGET_SPARC64
3560 case 0x25: /* sll */
3561 if (IS_IMM
) { /* immediate */
3562 simm
= GET_FIELDs(insn
, 20, 31);
3563 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3564 } else { /* register */
3565 cpu_tmp0
= get_temp_tl(dc
);
3566 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3567 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3569 gen_store_gpr(dc
, rd
, cpu_dst
);
3571 case 0x26: /* srl */
3572 if (IS_IMM
) { /* immediate */
3573 simm
= GET_FIELDs(insn
, 20, 31);
3574 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3575 } else { /* register */
3576 cpu_tmp0
= get_temp_tl(dc
);
3577 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3578 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3580 gen_store_gpr(dc
, rd
, cpu_dst
);
3582 case 0x27: /* sra */
3583 if (IS_IMM
) { /* immediate */
3584 simm
= GET_FIELDs(insn
, 20, 31);
3585 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3586 } else { /* register */
3587 cpu_tmp0
= get_temp_tl(dc
);
3588 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3589 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3591 gen_store_gpr(dc
, rd
, cpu_dst
);
3596 cpu_tmp0
= get_temp_tl(dc
);
3599 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3600 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
3602 #ifndef TARGET_SPARC64
3603 case 0x01 ... 0x0f: /* undefined in the
3607 case 0x10 ... 0x1f: /* implementation-dependent
3611 if ((rd
== 0x13) && (dc
->def
->features
&
3612 CPU_FEATURE_POWERDOWN
)) {
3613 /* LEON3 power-down */
3615 gen_helper_power_down(cpu_env
);
3619 case 0x2: /* V9 wrccr */
3620 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3621 gen_helper_wrccr(cpu_env
, cpu_tmp0
);
3622 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3623 dc
->cc_op
= CC_OP_FLAGS
;
3625 case 0x3: /* V9 wrasi */
3626 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3627 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
3628 tcg_gen_trunc_tl_i32(cpu_asi
, cpu_tmp0
);
3630 case 0x6: /* V9 wrfprs */
3631 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3632 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
3638 case 0xf: /* V9 sir, nop if user */
3639 #if !defined(CONFIG_USER_ONLY)
3640 if (supervisor(dc
)) {
3645 case 0x13: /* Graphics Status */
3646 if (gen_trap_ifnofpu(dc
)) {
3649 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
3651 case 0x14: /* Softint set */
3652 if (!supervisor(dc
))
3654 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3655 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
3657 case 0x15: /* Softint clear */
3658 if (!supervisor(dc
))
3660 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3661 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
3663 case 0x16: /* Softint write */
3664 if (!supervisor(dc
))
3666 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3667 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
3669 case 0x17: /* Tick compare */
3670 #if !defined(CONFIG_USER_ONLY)
3671 if (!supervisor(dc
))
3677 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
3679 r_tickptr
= tcg_temp_new_ptr();
3680 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3681 offsetof(CPUSPARCState
, tick
));
3682 gen_helper_tick_set_limit(r_tickptr
,
3684 tcg_temp_free_ptr(r_tickptr
);
3687 case 0x18: /* System tick */
3688 #if !defined(CONFIG_USER_ONLY)
3689 if (!supervisor(dc
))
3695 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
3697 r_tickptr
= tcg_temp_new_ptr();
3698 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3699 offsetof(CPUSPARCState
, stick
));
3700 gen_helper_tick_set_count(r_tickptr
,
3702 tcg_temp_free_ptr(r_tickptr
);
3705 case 0x19: /* System tick compare */
3706 #if !defined(CONFIG_USER_ONLY)
3707 if (!supervisor(dc
))
3713 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
3715 r_tickptr
= tcg_temp_new_ptr();
3716 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3717 offsetof(CPUSPARCState
, stick
));
3718 gen_helper_tick_set_limit(r_tickptr
,
3720 tcg_temp_free_ptr(r_tickptr
);
3724 case 0x10: /* Performance Control */
3725 case 0x11: /* Performance Instrumentation
3727 case 0x12: /* Dispatch Control */
3734 #if !defined(CONFIG_USER_ONLY)
3735 case 0x31: /* wrpsr, V9 saved, restored */
3737 if (!supervisor(dc
))
3739 #ifdef TARGET_SPARC64
3742 gen_helper_saved(cpu_env
);
3745 gen_helper_restored(cpu_env
);
3747 case 2: /* UA2005 allclean */
3748 case 3: /* UA2005 otherw */
3749 case 4: /* UA2005 normalw */
3750 case 5: /* UA2005 invalw */
3756 cpu_tmp0
= get_temp_tl(dc
);
3757 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3758 gen_helper_wrpsr(cpu_env
, cpu_tmp0
);
3759 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3760 dc
->cc_op
= CC_OP_FLAGS
;
3768 case 0x32: /* wrwim, V9 wrpr */
3770 if (!supervisor(dc
))
3772 cpu_tmp0
= get_temp_tl(dc
);
3773 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3774 #ifdef TARGET_SPARC64
3780 r_tsptr
= tcg_temp_new_ptr();
3781 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3782 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3783 offsetof(trap_state
, tpc
));
3784 tcg_temp_free_ptr(r_tsptr
);
3791 r_tsptr
= tcg_temp_new_ptr();
3792 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3793 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3794 offsetof(trap_state
, tnpc
));
3795 tcg_temp_free_ptr(r_tsptr
);
3802 r_tsptr
= tcg_temp_new_ptr();
3803 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3804 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3805 offsetof(trap_state
,
3807 tcg_temp_free_ptr(r_tsptr
);
3814 r_tsptr
= tcg_temp_new_ptr();
3815 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3816 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
3817 offsetof(trap_state
, tt
));
3818 tcg_temp_free_ptr(r_tsptr
);
3825 r_tickptr
= tcg_temp_new_ptr();
3826 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3827 offsetof(CPUSPARCState
, tick
));
3828 gen_helper_tick_set_count(r_tickptr
,
3830 tcg_temp_free_ptr(r_tickptr
);
3834 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
3838 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
3839 dc
->npc
= DYNAMIC_PC
;
3843 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3844 offsetof(CPUSPARCState
, tl
));
3845 dc
->npc
= DYNAMIC_PC
;
3848 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
3851 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
3854 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3855 offsetof(CPUSPARCState
,
3858 case 11: // canrestore
3859 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3860 offsetof(CPUSPARCState
,
3863 case 12: // cleanwin
3864 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3865 offsetof(CPUSPARCState
,
3868 case 13: // otherwin
3869 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3870 offsetof(CPUSPARCState
,
3874 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3875 offsetof(CPUSPARCState
,
3878 case 16: // UA2005 gl
3879 CHECK_IU_FEATURE(dc
, GL
);
3880 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3881 offsetof(CPUSPARCState
, gl
));
3883 case 26: // UA2005 strand status
3884 CHECK_IU_FEATURE(dc
, HYPV
);
3885 if (!hypervisor(dc
))
3887 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
3893 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
3894 if (dc
->def
->nwindows
!= 32) {
3895 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
3896 (1 << dc
->def
->nwindows
) - 1);
3901 case 0x33: /* wrtbr, UA2005 wrhpr */
3903 #ifndef TARGET_SPARC64
3904 if (!supervisor(dc
))
3906 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
3908 CHECK_IU_FEATURE(dc
, HYPV
);
3909 if (!hypervisor(dc
))
3911 cpu_tmp0
= get_temp_tl(dc
);
3912 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3915 // XXX gen_op_wrhpstate();
3922 // XXX gen_op_wrhtstate();
3925 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
3928 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
3930 case 31: // hstick_cmpr
3934 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
3935 r_tickptr
= tcg_temp_new_ptr();
3936 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3937 offsetof(CPUSPARCState
, hstick
));
3938 gen_helper_tick_set_limit(r_tickptr
,
3940 tcg_temp_free_ptr(r_tickptr
);
3943 case 6: // hver readonly
3951 #ifdef TARGET_SPARC64
3952 case 0x2c: /* V9 movcc */
3954 int cc
= GET_FIELD_SP(insn
, 11, 12);
3955 int cond
= GET_FIELD_SP(insn
, 14, 17);
3959 if (insn
& (1 << 18)) {
3961 gen_compare(&cmp
, 0, cond
, dc
);
3962 } else if (cc
== 2) {
3963 gen_compare(&cmp
, 1, cond
, dc
);
3968 gen_fcompare(&cmp
, cc
, cond
);
3971 /* The get_src2 above loaded the normal 13-bit
3972 immediate field, not the 11-bit field we have
3973 in movcc. But it did handle the reg case. */
3975 simm
= GET_FIELD_SPs(insn
, 0, 10);
3976 tcg_gen_movi_tl(cpu_src2
, simm
);
3979 dst
= gen_load_gpr(dc
, rd
);
3980 tcg_gen_movcond_tl(cmp
.cond
, dst
,
3984 gen_store_gpr(dc
, rd
, dst
);
3987 case 0x2d: /* V9 sdivx */
3988 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3989 gen_store_gpr(dc
, rd
, cpu_dst
);
3991 case 0x2e: /* V9 popc */
3992 gen_helper_popc(cpu_dst
, cpu_src2
);
3993 gen_store_gpr(dc
, rd
, cpu_dst
);
3995 case 0x2f: /* V9 movr */
3997 int cond
= GET_FIELD_SP(insn
, 10, 12);
4001 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4003 /* The get_src2 above loaded the normal 13-bit
4004 immediate field, not the 10-bit field we have
4005 in movr. But it did handle the reg case. */
4007 simm
= GET_FIELD_SPs(insn
, 0, 9);
4008 tcg_gen_movi_tl(cpu_src2
, simm
);
4011 dst
= gen_load_gpr(dc
, rd
);
4012 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4016 gen_store_gpr(dc
, rd
, dst
);
4024 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4025 #ifdef TARGET_SPARC64
4026 int opf
= GET_FIELD_SP(insn
, 5, 13);
4027 rs1
= GET_FIELD(insn
, 13, 17);
4028 rs2
= GET_FIELD(insn
, 27, 31);
4029 if (gen_trap_ifnofpu(dc
)) {
4034 case 0x000: /* VIS I edge8cc */
4035 CHECK_FPU_FEATURE(dc
, VIS1
);
4036 cpu_src1
= gen_load_gpr(dc
, rs1
);
4037 cpu_src2
= gen_load_gpr(dc
, rs2
);
4038 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4039 gen_store_gpr(dc
, rd
, cpu_dst
);
4041 case 0x001: /* VIS II edge8n */
4042 CHECK_FPU_FEATURE(dc
, VIS2
);
4043 cpu_src1
= gen_load_gpr(dc
, rs1
);
4044 cpu_src2
= gen_load_gpr(dc
, rs2
);
4045 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4046 gen_store_gpr(dc
, rd
, cpu_dst
);
4048 case 0x002: /* VIS I edge8lcc */
4049 CHECK_FPU_FEATURE(dc
, VIS1
);
4050 cpu_src1
= gen_load_gpr(dc
, rs1
);
4051 cpu_src2
= gen_load_gpr(dc
, rs2
);
4052 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4053 gen_store_gpr(dc
, rd
, cpu_dst
);
4055 case 0x003: /* VIS II edge8ln */
4056 CHECK_FPU_FEATURE(dc
, VIS2
);
4057 cpu_src1
= gen_load_gpr(dc
, rs1
);
4058 cpu_src2
= gen_load_gpr(dc
, rs2
);
4059 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4060 gen_store_gpr(dc
, rd
, cpu_dst
);
4062 case 0x004: /* VIS I edge16cc */
4063 CHECK_FPU_FEATURE(dc
, VIS1
);
4064 cpu_src1
= gen_load_gpr(dc
, rs1
);
4065 cpu_src2
= gen_load_gpr(dc
, rs2
);
4066 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4067 gen_store_gpr(dc
, rd
, cpu_dst
);
4069 case 0x005: /* VIS II edge16n */
4070 CHECK_FPU_FEATURE(dc
, VIS2
);
4071 cpu_src1
= gen_load_gpr(dc
, rs1
);
4072 cpu_src2
= gen_load_gpr(dc
, rs2
);
4073 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4074 gen_store_gpr(dc
, rd
, cpu_dst
);
4076 case 0x006: /* VIS I edge16lcc */
4077 CHECK_FPU_FEATURE(dc
, VIS1
);
4078 cpu_src1
= gen_load_gpr(dc
, rs1
);
4079 cpu_src2
= gen_load_gpr(dc
, rs2
);
4080 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4081 gen_store_gpr(dc
, rd
, cpu_dst
);
4083 case 0x007: /* VIS II edge16ln */
4084 CHECK_FPU_FEATURE(dc
, VIS2
);
4085 cpu_src1
= gen_load_gpr(dc
, rs1
);
4086 cpu_src2
= gen_load_gpr(dc
, rs2
);
4087 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4088 gen_store_gpr(dc
, rd
, cpu_dst
);
4090 case 0x008: /* VIS I edge32cc */
4091 CHECK_FPU_FEATURE(dc
, VIS1
);
4092 cpu_src1
= gen_load_gpr(dc
, rs1
);
4093 cpu_src2
= gen_load_gpr(dc
, rs2
);
4094 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4095 gen_store_gpr(dc
, rd
, cpu_dst
);
4097 case 0x009: /* VIS II edge32n */
4098 CHECK_FPU_FEATURE(dc
, VIS2
);
4099 cpu_src1
= gen_load_gpr(dc
, rs1
);
4100 cpu_src2
= gen_load_gpr(dc
, rs2
);
4101 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4102 gen_store_gpr(dc
, rd
, cpu_dst
);
4104 case 0x00a: /* VIS I edge32lcc */
4105 CHECK_FPU_FEATURE(dc
, VIS1
);
4106 cpu_src1
= gen_load_gpr(dc
, rs1
);
4107 cpu_src2
= gen_load_gpr(dc
, rs2
);
4108 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4109 gen_store_gpr(dc
, rd
, cpu_dst
);
4111 case 0x00b: /* VIS II edge32ln */
4112 CHECK_FPU_FEATURE(dc
, VIS2
);
4113 cpu_src1
= gen_load_gpr(dc
, rs1
);
4114 cpu_src2
= gen_load_gpr(dc
, rs2
);
4115 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4116 gen_store_gpr(dc
, rd
, cpu_dst
);
4118 case 0x010: /* VIS I array8 */
4119 CHECK_FPU_FEATURE(dc
, VIS1
);
4120 cpu_src1
= gen_load_gpr(dc
, rs1
);
4121 cpu_src2
= gen_load_gpr(dc
, rs2
);
4122 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4123 gen_store_gpr(dc
, rd
, cpu_dst
);
4125 case 0x012: /* VIS I array16 */
4126 CHECK_FPU_FEATURE(dc
, VIS1
);
4127 cpu_src1
= gen_load_gpr(dc
, rs1
);
4128 cpu_src2
= gen_load_gpr(dc
, rs2
);
4129 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4130 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4131 gen_store_gpr(dc
, rd
, cpu_dst
);
4133 case 0x014: /* VIS I array32 */
4134 CHECK_FPU_FEATURE(dc
, VIS1
);
4135 cpu_src1
= gen_load_gpr(dc
, rs1
);
4136 cpu_src2
= gen_load_gpr(dc
, rs2
);
4137 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4138 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4139 gen_store_gpr(dc
, rd
, cpu_dst
);
4141 case 0x018: /* VIS I alignaddr */
4142 CHECK_FPU_FEATURE(dc
, VIS1
);
4143 cpu_src1
= gen_load_gpr(dc
, rs1
);
4144 cpu_src2
= gen_load_gpr(dc
, rs2
);
4145 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4146 gen_store_gpr(dc
, rd
, cpu_dst
);
4148 case 0x01a: /* VIS I alignaddrl */
4149 CHECK_FPU_FEATURE(dc
, VIS1
);
4150 cpu_src1
= gen_load_gpr(dc
, rs1
);
4151 cpu_src2
= gen_load_gpr(dc
, rs2
);
4152 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4153 gen_store_gpr(dc
, rd
, cpu_dst
);
4155 case 0x019: /* VIS II bmask */
4156 CHECK_FPU_FEATURE(dc
, VIS2
);
4157 cpu_src1
= gen_load_gpr(dc
, rs1
);
4158 cpu_src2
= gen_load_gpr(dc
, rs2
);
4159 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4160 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4161 gen_store_gpr(dc
, rd
, cpu_dst
);
4163 case 0x020: /* VIS I fcmple16 */
4164 CHECK_FPU_FEATURE(dc
, VIS1
);
4165 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4166 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4167 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4168 gen_store_gpr(dc
, rd
, cpu_dst
);
4170 case 0x022: /* VIS I fcmpne16 */
4171 CHECK_FPU_FEATURE(dc
, VIS1
);
4172 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4173 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4174 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4175 gen_store_gpr(dc
, rd
, cpu_dst
);
4177 case 0x024: /* VIS I fcmple32 */
4178 CHECK_FPU_FEATURE(dc
, VIS1
);
4179 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4180 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4181 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4182 gen_store_gpr(dc
, rd
, cpu_dst
);
4184 case 0x026: /* VIS I fcmpne32 */
4185 CHECK_FPU_FEATURE(dc
, VIS1
);
4186 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4187 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4188 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4189 gen_store_gpr(dc
, rd
, cpu_dst
);
4191 case 0x028: /* VIS I fcmpgt16 */
4192 CHECK_FPU_FEATURE(dc
, VIS1
);
4193 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4194 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4195 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4196 gen_store_gpr(dc
, rd
, cpu_dst
);
4198 case 0x02a: /* VIS I fcmpeq16 */
4199 CHECK_FPU_FEATURE(dc
, VIS1
);
4200 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4201 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4202 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4203 gen_store_gpr(dc
, rd
, cpu_dst
);
4205 case 0x02c: /* VIS I fcmpgt32 */
4206 CHECK_FPU_FEATURE(dc
, VIS1
);
4207 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4208 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4209 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4210 gen_store_gpr(dc
, rd
, cpu_dst
);
4212 case 0x02e: /* VIS I fcmpeq32 */
4213 CHECK_FPU_FEATURE(dc
, VIS1
);
4214 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4215 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4216 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4217 gen_store_gpr(dc
, rd
, cpu_dst
);
4219 case 0x031: /* VIS I fmul8x16 */
4220 CHECK_FPU_FEATURE(dc
, VIS1
);
4221 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4223 case 0x033: /* VIS I fmul8x16au */
4224 CHECK_FPU_FEATURE(dc
, VIS1
);
4225 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4227 case 0x035: /* VIS I fmul8x16al */
4228 CHECK_FPU_FEATURE(dc
, VIS1
);
4229 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4231 case 0x036: /* VIS I fmul8sux16 */
4232 CHECK_FPU_FEATURE(dc
, VIS1
);
4233 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4235 case 0x037: /* VIS I fmul8ulx16 */
4236 CHECK_FPU_FEATURE(dc
, VIS1
);
4237 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4239 case 0x038: /* VIS I fmuld8sux16 */
4240 CHECK_FPU_FEATURE(dc
, VIS1
);
4241 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4243 case 0x039: /* VIS I fmuld8ulx16 */
4244 CHECK_FPU_FEATURE(dc
, VIS1
);
4245 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4247 case 0x03a: /* VIS I fpack32 */
4248 CHECK_FPU_FEATURE(dc
, VIS1
);
4249 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4251 case 0x03b: /* VIS I fpack16 */
4252 CHECK_FPU_FEATURE(dc
, VIS1
);
4253 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4254 cpu_dst_32
= gen_dest_fpr_F(dc
);
4255 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4256 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4258 case 0x03d: /* VIS I fpackfix */
4259 CHECK_FPU_FEATURE(dc
, VIS1
);
4260 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4261 cpu_dst_32
= gen_dest_fpr_F(dc
);
4262 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4263 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4265 case 0x03e: /* VIS I pdist */
4266 CHECK_FPU_FEATURE(dc
, VIS1
);
4267 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4269 case 0x048: /* VIS I faligndata */
4270 CHECK_FPU_FEATURE(dc
, VIS1
);
4271 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4273 case 0x04b: /* VIS I fpmerge */
4274 CHECK_FPU_FEATURE(dc
, VIS1
);
4275 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4277 case 0x04c: /* VIS II bshuffle */
4278 CHECK_FPU_FEATURE(dc
, VIS2
);
4279 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4281 case 0x04d: /* VIS I fexpand */
4282 CHECK_FPU_FEATURE(dc
, VIS1
);
4283 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4285 case 0x050: /* VIS I fpadd16 */
4286 CHECK_FPU_FEATURE(dc
, VIS1
);
4287 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4289 case 0x051: /* VIS I fpadd16s */
4290 CHECK_FPU_FEATURE(dc
, VIS1
);
4291 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4293 case 0x052: /* VIS I fpadd32 */
4294 CHECK_FPU_FEATURE(dc
, VIS1
);
4295 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4297 case 0x053: /* VIS I fpadd32s */
4298 CHECK_FPU_FEATURE(dc
, VIS1
);
4299 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4301 case 0x054: /* VIS I fpsub16 */
4302 CHECK_FPU_FEATURE(dc
, VIS1
);
4303 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4305 case 0x055: /* VIS I fpsub16s */
4306 CHECK_FPU_FEATURE(dc
, VIS1
);
4307 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4309 case 0x056: /* VIS I fpsub32 */
4310 CHECK_FPU_FEATURE(dc
, VIS1
);
4311 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4313 case 0x057: /* VIS I fpsub32s */
4314 CHECK_FPU_FEATURE(dc
, VIS1
);
4315 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4317 case 0x060: /* VIS I fzero */
4318 CHECK_FPU_FEATURE(dc
, VIS1
);
4319 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4320 tcg_gen_movi_i64(cpu_dst_64
, 0);
4321 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4323 case 0x061: /* VIS I fzeros */
4324 CHECK_FPU_FEATURE(dc
, VIS1
);
4325 cpu_dst_32
= gen_dest_fpr_F(dc
);
4326 tcg_gen_movi_i32(cpu_dst_32
, 0);
4327 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4329 case 0x062: /* VIS I fnor */
4330 CHECK_FPU_FEATURE(dc
, VIS1
);
4331 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4333 case 0x063: /* VIS I fnors */
4334 CHECK_FPU_FEATURE(dc
, VIS1
);
4335 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4337 case 0x064: /* VIS I fandnot2 */
4338 CHECK_FPU_FEATURE(dc
, VIS1
);
4339 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4341 case 0x065: /* VIS I fandnot2s */
4342 CHECK_FPU_FEATURE(dc
, VIS1
);
4343 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4345 case 0x066: /* VIS I fnot2 */
4346 CHECK_FPU_FEATURE(dc
, VIS1
);
4347 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4349 case 0x067: /* VIS I fnot2s */
4350 CHECK_FPU_FEATURE(dc
, VIS1
);
4351 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4353 case 0x068: /* VIS I fandnot1 */
4354 CHECK_FPU_FEATURE(dc
, VIS1
);
4355 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4357 case 0x069: /* VIS I fandnot1s */
4358 CHECK_FPU_FEATURE(dc
, VIS1
);
4359 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4361 case 0x06a: /* VIS I fnot1 */
4362 CHECK_FPU_FEATURE(dc
, VIS1
);
4363 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4365 case 0x06b: /* VIS I fnot1s */
4366 CHECK_FPU_FEATURE(dc
, VIS1
);
4367 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4369 case 0x06c: /* VIS I fxor */
4370 CHECK_FPU_FEATURE(dc
, VIS1
);
4371 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4373 case 0x06d: /* VIS I fxors */
4374 CHECK_FPU_FEATURE(dc
, VIS1
);
4375 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4377 case 0x06e: /* VIS I fnand */
4378 CHECK_FPU_FEATURE(dc
, VIS1
);
4379 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4381 case 0x06f: /* VIS I fnands */
4382 CHECK_FPU_FEATURE(dc
, VIS1
);
4383 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4385 case 0x070: /* VIS I fand */
4386 CHECK_FPU_FEATURE(dc
, VIS1
);
4387 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4389 case 0x071: /* VIS I fands */
4390 CHECK_FPU_FEATURE(dc
, VIS1
);
4391 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4393 case 0x072: /* VIS I fxnor */
4394 CHECK_FPU_FEATURE(dc
, VIS1
);
4395 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4397 case 0x073: /* VIS I fxnors */
4398 CHECK_FPU_FEATURE(dc
, VIS1
);
4399 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4401 case 0x074: /* VIS I fsrc1 */
4402 CHECK_FPU_FEATURE(dc
, VIS1
);
4403 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4404 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4406 case 0x075: /* VIS I fsrc1s */
4407 CHECK_FPU_FEATURE(dc
, VIS1
);
4408 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4409 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4411 case 0x076: /* VIS I fornot2 */
4412 CHECK_FPU_FEATURE(dc
, VIS1
);
4413 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4415 case 0x077: /* VIS I fornot2s */
4416 CHECK_FPU_FEATURE(dc
, VIS1
);
4417 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4419 case 0x078: /* VIS I fsrc2 */
4420 CHECK_FPU_FEATURE(dc
, VIS1
);
4421 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4422 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4424 case 0x079: /* VIS I fsrc2s */
4425 CHECK_FPU_FEATURE(dc
, VIS1
);
4426 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4427 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4429 case 0x07a: /* VIS I fornot1 */
4430 CHECK_FPU_FEATURE(dc
, VIS1
);
4431 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4433 case 0x07b: /* VIS I fornot1s */
4434 CHECK_FPU_FEATURE(dc
, VIS1
);
4435 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4437 case 0x07c: /* VIS I for */
4438 CHECK_FPU_FEATURE(dc
, VIS1
);
4439 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4441 case 0x07d: /* VIS I fors */
4442 CHECK_FPU_FEATURE(dc
, VIS1
);
4443 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4445 case 0x07e: /* VIS I fone */
4446 CHECK_FPU_FEATURE(dc
, VIS1
);
4447 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4448 tcg_gen_movi_i64(cpu_dst_64
, -1);
4449 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4451 case 0x07f: /* VIS I fones */
4452 CHECK_FPU_FEATURE(dc
, VIS1
);
4453 cpu_dst_32
= gen_dest_fpr_F(dc
);
4454 tcg_gen_movi_i32(cpu_dst_32
, -1);
4455 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4457 case 0x080: /* VIS I shutdown */
4458 case 0x081: /* VIS II siam */
4467 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
4468 #ifdef TARGET_SPARC64
4473 #ifdef TARGET_SPARC64
4474 } else if (xop
== 0x39) { /* V9 return */
4478 cpu_src1
= get_src1(dc
, insn
);
4479 cpu_tmp0
= get_temp_tl(dc
);
4480 if (IS_IMM
) { /* immediate */
4481 simm
= GET_FIELDs(insn
, 19, 31);
4482 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4483 } else { /* register */
4484 rs2
= GET_FIELD(insn
, 27, 31);
4486 cpu_src2
= gen_load_gpr(dc
, rs2
);
4487 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4489 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4492 gen_helper_restore(cpu_env
);
4494 r_const
= tcg_const_i32(3);
4495 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4496 tcg_temp_free_i32(r_const
);
4497 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4498 dc
->npc
= DYNAMIC_PC
;
4502 cpu_src1
= get_src1(dc
, insn
);
4503 cpu_tmp0
= get_temp_tl(dc
);
4504 if (IS_IMM
) { /* immediate */
4505 simm
= GET_FIELDs(insn
, 19, 31);
4506 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4507 } else { /* register */
4508 rs2
= GET_FIELD(insn
, 27, 31);
4510 cpu_src2
= gen_load_gpr(dc
, rs2
);
4511 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4513 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4517 case 0x38: /* jmpl */
4522 t
= gen_dest_gpr(dc
, rd
);
4523 tcg_gen_movi_tl(t
, dc
->pc
);
4524 gen_store_gpr(dc
, rd
, t
);
4526 r_const
= tcg_const_i32(3);
4527 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4528 tcg_temp_free_i32(r_const
);
4529 gen_address_mask(dc
, cpu_tmp0
);
4530 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4531 dc
->npc
= DYNAMIC_PC
;
4534 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4535 case 0x39: /* rett, V9 return */
4539 if (!supervisor(dc
))
4542 r_const
= tcg_const_i32(3);
4543 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4544 tcg_temp_free_i32(r_const
);
4545 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4546 dc
->npc
= DYNAMIC_PC
;
4547 gen_helper_rett(cpu_env
);
4551 case 0x3b: /* flush */
4552 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
4556 case 0x3c: /* save */
4558 gen_helper_save(cpu_env
);
4559 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4561 case 0x3d: /* restore */
4563 gen_helper_restore(cpu_env
);
4564 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4566 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4567 case 0x3e: /* V9 done/retry */
4571 if (!supervisor(dc
))
4573 dc
->npc
= DYNAMIC_PC
;
4574 dc
->pc
= DYNAMIC_PC
;
4575 gen_helper_done(cpu_env
);
4578 if (!supervisor(dc
))
4580 dc
->npc
= DYNAMIC_PC
;
4581 dc
->pc
= DYNAMIC_PC
;
4582 gen_helper_retry(cpu_env
);
4597 case 3: /* load/store instructions */
4599 unsigned int xop
= GET_FIELD(insn
, 7, 12);
4600 /* ??? gen_address_mask prevents us from using a source
4601 register directly. Always generate a temporary. */
4602 TCGv cpu_addr
= get_temp_tl(dc
);
4604 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
4605 if (xop
== 0x3c || xop
== 0x3e) {
4606 /* V9 casa/casxa : no offset */
4607 } else if (IS_IMM
) { /* immediate */
4608 simm
= GET_FIELDs(insn
, 19, 31);
4610 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
4612 } else { /* register */
4613 rs2
= GET_FIELD(insn
, 27, 31);
4615 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
4618 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
4619 (xop
> 0x17 && xop
<= 0x1d ) ||
4620 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
4621 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
4624 case 0x0: /* ld, V9 lduw, load unsigned word */
4625 gen_address_mask(dc
, cpu_addr
);
4626 tcg_gen_qemu_ld32u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4628 case 0x1: /* ldub, load unsigned byte */
4629 gen_address_mask(dc
, cpu_addr
);
4630 tcg_gen_qemu_ld8u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4632 case 0x2: /* lduh, load unsigned halfword */
4633 gen_address_mask(dc
, cpu_addr
);
4634 tcg_gen_qemu_ld16u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4636 case 0x3: /* ldd, load double word */
4644 r_const
= tcg_const_i32(7);
4645 /* XXX remove alignment check */
4646 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4647 tcg_temp_free_i32(r_const
);
4648 gen_address_mask(dc
, cpu_addr
);
4649 t64
= tcg_temp_new_i64();
4650 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4651 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4652 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4653 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
4654 tcg_gen_shri_i64(t64
, t64
, 32);
4655 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4656 tcg_temp_free_i64(t64
);
4657 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4660 case 0x9: /* ldsb, load signed byte */
4661 gen_address_mask(dc
, cpu_addr
);
4662 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4664 case 0xa: /* ldsh, load signed halfword */
4665 gen_address_mask(dc
, cpu_addr
);
4666 tcg_gen_qemu_ld16s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4668 case 0xd: /* ldstub -- XXX: should be atomically */
4672 gen_address_mask(dc
, cpu_addr
);
4673 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4674 r_const
= tcg_const_tl(0xff);
4675 tcg_gen_qemu_st8(r_const
, cpu_addr
, dc
->mem_idx
);
4676 tcg_temp_free(r_const
);
4680 /* swap, swap register with memory. Also atomically */
4682 TCGv t0
= get_temp_tl(dc
);
4683 CHECK_IU_FEATURE(dc
, SWAP
);
4684 cpu_src1
= gen_load_gpr(dc
, rd
);
4685 gen_address_mask(dc
, cpu_addr
);
4686 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4687 tcg_gen_qemu_st32(cpu_src1
, cpu_addr
, dc
->mem_idx
);
4688 tcg_gen_mov_tl(cpu_val
, t0
);
4691 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4692 case 0x10: /* lda, V9 lduwa, load word alternate */
4693 #ifndef TARGET_SPARC64
4696 if (!supervisor(dc
))
4700 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 0);
4702 case 0x11: /* lduba, load unsigned byte alternate */
4703 #ifndef TARGET_SPARC64
4706 if (!supervisor(dc
))
4710 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 0);
4712 case 0x12: /* lduha, load unsigned halfword alternate */
4713 #ifndef TARGET_SPARC64
4716 if (!supervisor(dc
))
4720 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 0);
4722 case 0x13: /* ldda, load double word alternate */
4723 #ifndef TARGET_SPARC64
4726 if (!supervisor(dc
))
4732 gen_ldda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4734 case 0x19: /* ldsba, load signed byte alternate */
4735 #ifndef TARGET_SPARC64
4738 if (!supervisor(dc
))
4742 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 1);
4744 case 0x1a: /* ldsha, load signed halfword alternate */
4745 #ifndef TARGET_SPARC64
4748 if (!supervisor(dc
))
4752 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 1);
4754 case 0x1d: /* ldstuba -- XXX: should be atomically */
4755 #ifndef TARGET_SPARC64
4758 if (!supervisor(dc
))
4762 gen_ldstub_asi(cpu_val
, cpu_addr
, insn
);
4764 case 0x1f: /* swapa, swap reg with alt. memory. Also
4766 CHECK_IU_FEATURE(dc
, SWAP
);
4767 #ifndef TARGET_SPARC64
4770 if (!supervisor(dc
))
4774 cpu_src1
= gen_load_gpr(dc
, rd
);
4775 gen_swap_asi(cpu_val
, cpu_src1
, cpu_addr
, insn
);
4778 #ifndef TARGET_SPARC64
4779 case 0x30: /* ldc */
4780 case 0x31: /* ldcsr */
4781 case 0x33: /* lddc */
4785 #ifdef TARGET_SPARC64
4786 case 0x08: /* V9 ldsw */
4787 gen_address_mask(dc
, cpu_addr
);
4788 tcg_gen_qemu_ld32s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4790 case 0x0b: /* V9 ldx */
4791 gen_address_mask(dc
, cpu_addr
);
4792 tcg_gen_qemu_ld64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4794 case 0x18: /* V9 ldswa */
4796 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 1);
4798 case 0x1b: /* V9 ldxa */
4800 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 8, 0);
4802 case 0x2d: /* V9 prefetch, no effect */
4804 case 0x30: /* V9 ldfa */
4805 if (gen_trap_ifnofpu(dc
)) {
4809 gen_ldf_asi(cpu_addr
, insn
, 4, rd
);
4810 gen_update_fprs_dirty(rd
);
4812 case 0x33: /* V9 lddfa */
4813 if (gen_trap_ifnofpu(dc
)) {
4817 gen_ldf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
4818 gen_update_fprs_dirty(DFPREG(rd
));
4820 case 0x3d: /* V9 prefetcha, no effect */
4822 case 0x32: /* V9 ldqfa */
4823 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4824 if (gen_trap_ifnofpu(dc
)) {
4828 gen_ldf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
4829 gen_update_fprs_dirty(QFPREG(rd
));
4835 gen_store_gpr(dc
, rd
, cpu_val
);
4836 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4839 } else if (xop
>= 0x20 && xop
< 0x24) {
4842 if (gen_trap_ifnofpu(dc
)) {
4847 case 0x20: /* ldf, load fpreg */
4848 gen_address_mask(dc
, cpu_addr
);
4849 t0
= get_temp_tl(dc
);
4850 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4851 cpu_dst_32
= gen_dest_fpr_F(dc
);
4852 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4853 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4855 case 0x21: /* ldfsr, V9 ldxfsr */
4856 #ifdef TARGET_SPARC64
4857 gen_address_mask(dc
, cpu_addr
);
4859 TCGv_i64 t64
= tcg_temp_new_i64();
4860 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4861 gen_helper_ldxfsr(cpu_env
, t64
);
4862 tcg_temp_free_i64(t64
);
4866 cpu_dst_32
= get_temp_i32(dc
);
4867 t0
= get_temp_tl(dc
);
4868 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4869 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4870 gen_helper_ldfsr(cpu_env
, cpu_dst_32
);
4872 case 0x22: /* ldqf, load quad fpreg */
4876 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4877 r_const
= tcg_const_i32(dc
->mem_idx
);
4878 gen_address_mask(dc
, cpu_addr
);
4879 gen_helper_ldqf(cpu_env
, cpu_addr
, r_const
);
4880 tcg_temp_free_i32(r_const
);
4881 gen_op_store_QT0_fpr(QFPREG(rd
));
4882 gen_update_fprs_dirty(QFPREG(rd
));
4885 case 0x23: /* lddf, load double fpreg */
4886 gen_address_mask(dc
, cpu_addr
);
4887 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4888 tcg_gen_qemu_ld64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
);
4889 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4894 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
4895 xop
== 0xe || xop
== 0x1e) {
4896 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
4899 case 0x4: /* st, store word */
4900 gen_address_mask(dc
, cpu_addr
);
4901 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
4903 case 0x5: /* stb, store byte */
4904 gen_address_mask(dc
, cpu_addr
);
4905 tcg_gen_qemu_st8(cpu_val
, cpu_addr
, dc
->mem_idx
);
4907 case 0x6: /* sth, store halfword */
4908 gen_address_mask(dc
, cpu_addr
);
4909 tcg_gen_qemu_st16(cpu_val
, cpu_addr
, dc
->mem_idx
);
4911 case 0x7: /* std, store double word */
4920 gen_address_mask(dc
, cpu_addr
);
4921 r_const
= tcg_const_i32(7);
4922 /* XXX remove alignment check */
4923 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4924 tcg_temp_free_i32(r_const
);
4925 lo
= gen_load_gpr(dc
, rd
+ 1);
4927 t64
= tcg_temp_new_i64();
4928 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
4929 tcg_gen_qemu_st64(t64
, cpu_addr
, dc
->mem_idx
);
4930 tcg_temp_free_i64(t64
);
4933 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4934 case 0x14: /* sta, V9 stwa, store word alternate */
4935 #ifndef TARGET_SPARC64
4938 if (!supervisor(dc
))
4942 gen_st_asi(cpu_val
, cpu_addr
, insn
, 4);
4943 dc
->npc
= DYNAMIC_PC
;
4945 case 0x15: /* stba, store byte alternate */
4946 #ifndef TARGET_SPARC64
4949 if (!supervisor(dc
))
4953 gen_st_asi(cpu_val
, cpu_addr
, insn
, 1);
4954 dc
->npc
= DYNAMIC_PC
;
4956 case 0x16: /* stha, store halfword alternate */
4957 #ifndef TARGET_SPARC64
4960 if (!supervisor(dc
))
4964 gen_st_asi(cpu_val
, cpu_addr
, insn
, 2);
4965 dc
->npc
= DYNAMIC_PC
;
4967 case 0x17: /* stda, store double word alternate */
4968 #ifndef TARGET_SPARC64
4971 if (!supervisor(dc
))
4978 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4982 #ifdef TARGET_SPARC64
4983 case 0x0e: /* V9 stx */
4984 gen_address_mask(dc
, cpu_addr
);
4985 tcg_gen_qemu_st64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4987 case 0x1e: /* V9 stxa */
4989 gen_st_asi(cpu_val
, cpu_addr
, insn
, 8);
4990 dc
->npc
= DYNAMIC_PC
;
4996 } else if (xop
> 0x23 && xop
< 0x28) {
4997 if (gen_trap_ifnofpu(dc
)) {
5002 case 0x24: /* stf, store fpreg */
5004 TCGv t
= get_temp_tl(dc
);
5005 gen_address_mask(dc
, cpu_addr
);
5006 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5007 tcg_gen_ext_i32_tl(t
, cpu_src1_32
);
5008 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5011 case 0x25: /* stfsr, V9 stxfsr */
5013 TCGv t
= get_temp_tl(dc
);
5015 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUSPARCState
, fsr
));
5016 #ifdef TARGET_SPARC64
5017 gen_address_mask(dc
, cpu_addr
);
5019 tcg_gen_qemu_st64(t
, cpu_addr
, dc
->mem_idx
);
5023 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5027 #ifdef TARGET_SPARC64
5028 /* V9 stqf, store quad fpreg */
5032 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5033 gen_op_load_fpr_QT0(QFPREG(rd
));
5034 r_const
= tcg_const_i32(dc
->mem_idx
);
5035 gen_address_mask(dc
, cpu_addr
);
5036 gen_helper_stqf(cpu_env
, cpu_addr
, r_const
);
5037 tcg_temp_free_i32(r_const
);
5040 #else /* !TARGET_SPARC64 */
5041 /* stdfq, store floating point queue */
5042 #if defined(CONFIG_USER_ONLY)
5045 if (!supervisor(dc
))
5047 if (gen_trap_ifnofpu(dc
)) {
5053 case 0x27: /* stdf, store double fpreg */
5054 gen_address_mask(dc
, cpu_addr
);
5055 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5056 tcg_gen_qemu_st64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
);
5061 } else if (xop
> 0x33 && xop
< 0x3f) {
5064 #ifdef TARGET_SPARC64
5065 case 0x34: /* V9 stfa */
5066 if (gen_trap_ifnofpu(dc
)) {
5069 gen_stf_asi(cpu_addr
, insn
, 4, rd
);
5071 case 0x36: /* V9 stqfa */
5075 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5076 if (gen_trap_ifnofpu(dc
)) {
5079 r_const
= tcg_const_i32(7);
5080 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
5081 tcg_temp_free_i32(r_const
);
5082 gen_stf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
5085 case 0x37: /* V9 stdfa */
5086 if (gen_trap_ifnofpu(dc
)) {
5089 gen_stf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
5091 case 0x3e: /* V9 casxa */
5092 rs2
= GET_FIELD(insn
, 27, 31);
5093 cpu_src2
= gen_load_gpr(dc
, rs2
);
5094 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5097 case 0x34: /* stc */
5098 case 0x35: /* stcsr */
5099 case 0x36: /* stdcq */
5100 case 0x37: /* stdc */
5103 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5104 case 0x3c: /* V9 or LEON3 casa */
5105 #ifndef TARGET_SPARC64
5106 CHECK_IU_FEATURE(dc
, CASA
);
5110 if (!supervisor(dc
)) {
5114 rs2
= GET_FIELD(insn
, 27, 31);
5115 cpu_src2
= gen_load_gpr(dc
, rs2
);
5116 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5128 /* default case for non jump instructions */
5129 if (dc
->npc
== DYNAMIC_PC
) {
5130 dc
->pc
= DYNAMIC_PC
;
5132 } else if (dc
->npc
== JUMP_PC
) {
5133 /* we can do a static jump */
5134 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5138 dc
->npc
= dc
->npc
+ 4;
5147 r_const
= tcg_const_i32(TT_ILL_INSN
);
5148 gen_helper_raise_exception(cpu_env
, r_const
);
5149 tcg_temp_free_i32(r_const
);
5158 r_const
= tcg_const_i32(TT_UNIMP_FLUSH
);
5159 gen_helper_raise_exception(cpu_env
, r_const
);
5160 tcg_temp_free_i32(r_const
);
5164 #if !defined(CONFIG_USER_ONLY)
5170 r_const
= tcg_const_i32(TT_PRIV_INSN
);
5171 gen_helper_raise_exception(cpu_env
, r_const
);
5172 tcg_temp_free_i32(r_const
);
5179 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP
);
5182 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5185 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR
);
5189 #ifndef TARGET_SPARC64
5195 r_const
= tcg_const_i32(TT_NCP_INSN
);
5196 gen_helper_raise_exception(cpu_env
, r_const
);
5197 tcg_temp_free(r_const
);
5203 if (dc
->n_t32
!= 0) {
5205 for (i
= dc
->n_t32
- 1; i
>= 0; --i
) {
5206 tcg_temp_free_i32(dc
->t32
[i
]);
5210 if (dc
->n_ttl
!= 0) {
5212 for (i
= dc
->n_ttl
- 1; i
>= 0; --i
) {
5213 tcg_temp_free(dc
->ttl
[i
]);
5219 static inline void gen_intermediate_code_internal(SPARCCPU
*cpu
,
5220 TranslationBlock
*tb
,
5223 CPUState
*cs
= CPU(cpu
);
5224 CPUSPARCState
*env
= &cpu
->env
;
5225 target_ulong pc_start
, last_pc
;
5226 uint16_t *gen_opc_end
;
5227 DisasContext dc1
, *dc
= &dc1
;
5234 memset(dc
, 0, sizeof(DisasContext
));
5239 dc
->npc
= (target_ulong
) tb
->cs_base
;
5240 dc
->cc_op
= CC_OP_DYNAMIC
;
5241 dc
->mem_idx
= cpu_mmu_index(env
);
5243 dc
->fpu_enabled
= tb_fpu_enabled(tb
->flags
);
5244 dc
->address_mask_32bit
= tb_am_enabled(tb
->flags
);
5245 dc
->singlestep
= (cs
->singlestep_enabled
|| singlestep
);
5246 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
5249 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5251 max_insns
= CF_COUNT_MASK
;
5254 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
5255 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
5256 if (bp
->pc
== dc
->pc
) {
5257 if (dc
->pc
!= pc_start
)
5259 gen_helper_debug(cpu_env
);
5267 qemu_log("Search PC...\n");
5268 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
5272 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5273 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
5274 gen_opc_npc
[lj
] = dc
->npc
;
5275 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
5276 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
5279 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
5282 insn
= cpu_ldl_code(env
, dc
->pc
);
5284 disas_sparc_insn(dc
, insn
);
5289 /* if the next PC is different, we abort now */
5290 if (dc
->pc
!= (last_pc
+ 4))
5292 /* if we reach a page boundary, we stop generation so that the
5293 PC of a TT_TFAULT exception is always in the right page */
5294 if ((dc
->pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
5296 /* if single step mode, we generate only one instruction and
5297 generate an exception */
5298 if (dc
->singlestep
) {
5301 } while ((tcg_ctx
.gen_opc_ptr
< gen_opc_end
) &&
5302 (dc
->pc
- pc_start
) < (TARGET_PAGE_SIZE
- 32) &&
5303 num_insns
< max_insns
);
5306 if (tb
->cflags
& CF_LAST_IO
) {
5310 if (dc
->pc
!= DYNAMIC_PC
&&
5311 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
5312 /* static PC and NPC: we can use direct chaining */
5313 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5315 if (dc
->pc
!= DYNAMIC_PC
) {
5316 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5322 gen_tb_end(tb
, num_insns
);
5323 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
5325 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
5328 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5332 gen_opc_jump_pc
[0] = dc
->jump_pc
[0];
5333 gen_opc_jump_pc
[1] = dc
->jump_pc
[1];
5335 tb
->size
= last_pc
+ 4 - pc_start
;
5336 tb
->icount
= num_insns
;
5339 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5340 qemu_log("--------------\n");
5341 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5342 log_target_disas(env
, pc_start
, last_pc
+ 4 - pc_start
, 0);
5348 void gen_intermediate_code(CPUSPARCState
* env
, TranslationBlock
* tb
)
5350 gen_intermediate_code_internal(sparc_env_get_cpu(env
), tb
, false);
5353 void gen_intermediate_code_pc(CPUSPARCState
* env
, TranslationBlock
* tb
)
5355 gen_intermediate_code_internal(sparc_env_get_cpu(env
), tb
, true);
5358 void gen_intermediate_code_init(CPUSPARCState
*env
)
5362 static const char * const gregnames
[8] = {
5363 NULL
, // g0 not used
5372 static const char * const fregnames
[32] = {
5373 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5374 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5375 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5376 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5379 /* init various static tables */
5383 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
5384 cpu_regwptr
= tcg_global_mem_new_ptr(TCG_AREG0
,
5385 offsetof(CPUSPARCState
, regwptr
),
5387 #ifdef TARGET_SPARC64
5388 cpu_xcc
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, xcc
),
5390 cpu_asi
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, asi
),
5392 cpu_fprs
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, fprs
),
5394 cpu_gsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, gsr
),
5396 cpu_tick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5397 offsetof(CPUSPARCState
, tick_cmpr
),
5399 cpu_stick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5400 offsetof(CPUSPARCState
, stick_cmpr
),
5402 cpu_hstick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5403 offsetof(CPUSPARCState
, hstick_cmpr
),
5405 cpu_hintp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hintp
),
5407 cpu_htba
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, htba
),
5409 cpu_hver
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hver
),
5411 cpu_ssr
= tcg_global_mem_new(TCG_AREG0
,
5412 offsetof(CPUSPARCState
, ssr
), "ssr");
5413 cpu_ver
= tcg_global_mem_new(TCG_AREG0
,
5414 offsetof(CPUSPARCState
, version
), "ver");
5415 cpu_softint
= tcg_global_mem_new_i32(TCG_AREG0
,
5416 offsetof(CPUSPARCState
, softint
),
5419 cpu_wim
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, wim
),
5422 cpu_cond
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cond
),
5424 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_src
),
5426 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
,
5427 offsetof(CPUSPARCState
, cc_src2
),
5429 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_dst
),
5431 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, cc_op
),
5433 cpu_psr
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, psr
),
5435 cpu_fsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, fsr
),
5437 cpu_pc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, pc
),
5439 cpu_npc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, npc
),
5441 cpu_y
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, y
), "y");
5442 #ifndef CONFIG_USER_ONLY
5443 cpu_tbr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, tbr
),
5446 for (i
= 1; i
< 8; i
++) {
5447 cpu_gregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
5448 offsetof(CPUSPARCState
, gregs
[i
]),
5451 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5452 cpu_fpr
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
5453 offsetof(CPUSPARCState
, fpr
[i
]),
5459 void restore_state_to_opc(CPUSPARCState
*env
, TranslationBlock
*tb
, int pc_pos
)
5462 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
5463 npc
= gen_opc_npc
[pc_pos
];
5465 /* dynamic NPC: already stored */
5466 } else if (npc
== 2) {
5467 /* jump PC: use 'cond' and the jump targets of the translation */
5469 env
->npc
= gen_opc_jump_pc
[0];
5471 env
->npc
= gen_opc_jump_pc
[1];