4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "disas/disas.h"
29 #include "exec/helper-proto.h"
31 #include "exec/cpu_ldst.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
40 #define DYNAMIC_PC 1 /* dynamic pc value */
41 #define JUMP_PC 2 /* dynamic pc value which takes only two values
42 according to jump_pc[T2] */
44 /* global register indexes */
45 static TCGv_ptr cpu_env
, cpu_regwptr
;
46 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
47 static TCGv_i32 cpu_cc_op
;
48 static TCGv_i32 cpu_psr
;
49 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
, cpu_gregs
[8];
51 #ifndef CONFIG_USER_ONLY
56 static TCGv_i32 cpu_xcc
, cpu_asi
, cpu_fprs
;
58 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
59 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
60 static TCGv_i32 cpu_softint
;
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
67 static target_ulong gen_opc_npc
[OPC_BUF_SIZE
];
68 static target_ulong gen_opc_jump_pc
[2];
70 #include "exec/gen-icount.h"
72 typedef struct DisasContext
{
73 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
74 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
75 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
79 int address_mask_32bit
;
81 uint32_t cc_op
; /* current CC operation */
82 struct TranslationBlock
*tb
;
97 // This function uses non-native bit order
98 #define GET_FIELD(X, FROM, TO) \
99 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
101 // This function uses the order in the manuals, i.e. bit 0 is 2^0
102 #define GET_FIELD_SP(X, FROM, TO) \
103 GET_FIELD(X, 31 - (TO), 31 - (FROM))
105 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
106 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
108 #ifdef TARGET_SPARC64
109 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
110 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
112 #define DFPREG(r) (r & 0x1e)
113 #define QFPREG(r) (r & 0x1c)
116 #define UA2005_HTRAP_MASK 0xff
117 #define V8_TRAP_MASK 0x7f
119 static int sign_extend(int x
, int len
)
122 return (x
<< len
) >> len
;
125 #define IS_IMM (insn & (1<<13))
127 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
130 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
131 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
135 static inline TCGv
get_temp_tl(DisasContext
*dc
)
138 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
139 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
143 static inline void gen_update_fprs_dirty(int rd
)
145 #if defined(TARGET_SPARC64)
146 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, (rd
< 32) ? 1 : 2);
150 /* floating point registers moves */
151 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
153 #if TCG_TARGET_REG_BITS == 32
155 return TCGV_LOW(cpu_fpr
[src
/ 2]);
157 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
161 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr
[src
/ 2]));
163 TCGv_i32 ret
= get_temp_i32(dc
);
164 TCGv_i64 t
= tcg_temp_new_i64();
166 tcg_gen_shri_i64(t
, cpu_fpr
[src
/ 2], 32);
167 tcg_gen_extrl_i64_i32(ret
, t
);
168 tcg_temp_free_i64(t
);
175 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
177 #if TCG_TARGET_REG_BITS == 32
179 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
181 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
184 TCGv_i64 t
= MAKE_TCGV_I64(GET_TCGV_I32(v
));
185 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
186 (dst
& 1 ? 0 : 32), 32);
188 gen_update_fprs_dirty(dst
);
191 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
193 return get_temp_i32(dc
);
196 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
199 return cpu_fpr
[src
/ 2];
202 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
205 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
206 gen_update_fprs_dirty(dst
);
209 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
211 return cpu_fpr
[DFPREG(dst
) / 2];
214 static void gen_op_load_fpr_QT0(unsigned int src
)
216 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
217 offsetof(CPU_QuadU
, ll
.upper
));
218 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
219 offsetof(CPU_QuadU
, ll
.lower
));
222 static void gen_op_load_fpr_QT1(unsigned int src
)
224 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
225 offsetof(CPU_QuadU
, ll
.upper
));
226 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
227 offsetof(CPU_QuadU
, ll
.lower
));
230 static void gen_op_store_QT0_fpr(unsigned int dst
)
232 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
233 offsetof(CPU_QuadU
, ll
.upper
));
234 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
235 offsetof(CPU_QuadU
, ll
.lower
));
238 #ifdef TARGET_SPARC64
239 static void gen_move_Q(unsigned int rd
, unsigned int rs
)
244 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
245 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
246 gen_update_fprs_dirty(rd
);
251 #ifdef CONFIG_USER_ONLY
252 #define supervisor(dc) 0
253 #ifdef TARGET_SPARC64
254 #define hypervisor(dc) 0
257 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
258 #ifdef TARGET_SPARC64
259 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
264 #ifdef TARGET_SPARC64
266 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
268 #define AM_CHECK(dc) (1)
272 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
274 #ifdef TARGET_SPARC64
276 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
280 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
282 if (reg
== 0 || reg
>= 8) {
283 TCGv t
= get_temp_tl(dc
);
285 tcg_gen_movi_tl(t
, 0);
287 tcg_gen_ld_tl(t
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
291 return cpu_gregs
[reg
];
295 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
299 tcg_gen_mov_tl(cpu_gregs
[reg
], v
);
301 tcg_gen_st_tl(v
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
306 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
308 if (reg
== 0 || reg
>= 8) {
309 return get_temp_tl(dc
);
311 return cpu_gregs
[reg
];
315 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
,
316 target_ulong pc
, target_ulong npc
)
318 TranslationBlock
*tb
;
321 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
322 (npc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
324 /* jump to same page: we can use a direct jump */
325 tcg_gen_goto_tb(tb_num
);
326 tcg_gen_movi_tl(cpu_pc
, pc
);
327 tcg_gen_movi_tl(cpu_npc
, npc
);
328 tcg_gen_exit_tb((uintptr_t)tb
+ tb_num
);
330 /* jump to another page: currently not optimized */
331 tcg_gen_movi_tl(cpu_pc
, pc
);
332 tcg_gen_movi_tl(cpu_npc
, npc
);
338 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
340 tcg_gen_extu_i32_tl(reg
, src
);
341 tcg_gen_shri_tl(reg
, reg
, PSR_NEG_SHIFT
);
342 tcg_gen_andi_tl(reg
, reg
, 0x1);
345 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
347 tcg_gen_extu_i32_tl(reg
, src
);
348 tcg_gen_shri_tl(reg
, reg
, PSR_ZERO_SHIFT
);
349 tcg_gen_andi_tl(reg
, reg
, 0x1);
352 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
354 tcg_gen_extu_i32_tl(reg
, src
);
355 tcg_gen_shri_tl(reg
, reg
, PSR_OVF_SHIFT
);
356 tcg_gen_andi_tl(reg
, reg
, 0x1);
359 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
361 tcg_gen_extu_i32_tl(reg
, src
);
362 tcg_gen_shri_tl(reg
, reg
, PSR_CARRY_SHIFT
);
363 tcg_gen_andi_tl(reg
, reg
, 0x1);
366 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
368 tcg_gen_mov_tl(cpu_cc_src
, src1
);
369 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
370 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
371 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
374 static TCGv_i32
gen_add32_carry32(void)
376 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
378 /* Carry is computed from a previous add: (dst < src) */
379 #if TARGET_LONG_BITS == 64
380 cc_src1_32
= tcg_temp_new_i32();
381 cc_src2_32
= tcg_temp_new_i32();
382 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
383 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
385 cc_src1_32
= cpu_cc_dst
;
386 cc_src2_32
= cpu_cc_src
;
389 carry_32
= tcg_temp_new_i32();
390 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
392 #if TARGET_LONG_BITS == 64
393 tcg_temp_free_i32(cc_src1_32
);
394 tcg_temp_free_i32(cc_src2_32
);
400 static TCGv_i32
gen_sub32_carry32(void)
402 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
404 /* Carry is computed from a previous borrow: (src1 < src2) */
405 #if TARGET_LONG_BITS == 64
406 cc_src1_32
= tcg_temp_new_i32();
407 cc_src2_32
= tcg_temp_new_i32();
408 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
409 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
411 cc_src1_32
= cpu_cc_src
;
412 cc_src2_32
= cpu_cc_src2
;
415 carry_32
= tcg_temp_new_i32();
416 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
418 #if TARGET_LONG_BITS == 64
419 tcg_temp_free_i32(cc_src1_32
);
420 tcg_temp_free_i32(cc_src2_32
);
426 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
427 TCGv src2
, int update_cc
)
435 /* Carry is known to be zero. Fall back to plain ADD. */
437 gen_op_add_cc(dst
, src1
, src2
);
439 tcg_gen_add_tl(dst
, src1
, src2
);
446 if (TARGET_LONG_BITS
== 32) {
447 /* We can re-use the host's hardware carry generation by using
448 an ADD2 opcode. We discard the low part of the output.
449 Ideally we'd combine this operation with the add that
450 generated the carry in the first place. */
451 carry
= tcg_temp_new();
452 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
453 tcg_temp_free(carry
);
456 carry_32
= gen_add32_carry32();
462 carry_32
= gen_sub32_carry32();
466 /* We need external help to produce the carry. */
467 carry_32
= tcg_temp_new_i32();
468 gen_helper_compute_C_icc(carry_32
, cpu_env
);
472 #if TARGET_LONG_BITS == 64
473 carry
= tcg_temp_new();
474 tcg_gen_extu_i32_i64(carry
, carry_32
);
479 tcg_gen_add_tl(dst
, src1
, src2
);
480 tcg_gen_add_tl(dst
, dst
, carry
);
482 tcg_temp_free_i32(carry_32
);
483 #if TARGET_LONG_BITS == 64
484 tcg_temp_free(carry
);
489 tcg_gen_mov_tl(cpu_cc_src
, src1
);
490 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
491 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
492 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
493 dc
->cc_op
= CC_OP_ADDX
;
497 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
499 tcg_gen_mov_tl(cpu_cc_src
, src1
);
500 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
501 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
502 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
505 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
506 TCGv src2
, int update_cc
)
514 /* Carry is known to be zero. Fall back to plain SUB. */
516 gen_op_sub_cc(dst
, src1
, src2
);
518 tcg_gen_sub_tl(dst
, src1
, src2
);
525 carry_32
= gen_add32_carry32();
531 if (TARGET_LONG_BITS
== 32) {
532 /* We can re-use the host's hardware carry generation by using
533 a SUB2 opcode. We discard the low part of the output.
534 Ideally we'd combine this operation with the add that
535 generated the carry in the first place. */
536 carry
= tcg_temp_new();
537 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
538 tcg_temp_free(carry
);
541 carry_32
= gen_sub32_carry32();
545 /* We need external help to produce the carry. */
546 carry_32
= tcg_temp_new_i32();
547 gen_helper_compute_C_icc(carry_32
, cpu_env
);
551 #if TARGET_LONG_BITS == 64
552 carry
= tcg_temp_new();
553 tcg_gen_extu_i32_i64(carry
, carry_32
);
558 tcg_gen_sub_tl(dst
, src1
, src2
);
559 tcg_gen_sub_tl(dst
, dst
, carry
);
561 tcg_temp_free_i32(carry_32
);
562 #if TARGET_LONG_BITS == 64
563 tcg_temp_free(carry
);
568 tcg_gen_mov_tl(cpu_cc_src
, src1
);
569 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
570 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
571 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
572 dc
->cc_op
= CC_OP_SUBX
;
576 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
578 TCGv r_temp
, zero
, t0
;
580 r_temp
= tcg_temp_new();
587 zero
= tcg_const_tl(0);
588 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
589 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
590 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
591 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
596 // env->y = (b2 << 31) | (env->y >> 1);
597 tcg_gen_andi_tl(r_temp
, cpu_cc_src
, 0x1);
598 tcg_gen_shli_tl(r_temp
, r_temp
, 31);
599 tcg_gen_shri_tl(t0
, cpu_y
, 1);
600 tcg_gen_andi_tl(t0
, t0
, 0x7fffffff);
601 tcg_gen_or_tl(t0
, t0
, r_temp
);
602 tcg_gen_andi_tl(cpu_y
, t0
, 0xffffffff);
605 gen_mov_reg_N(t0
, cpu_psr
);
606 gen_mov_reg_V(r_temp
, cpu_psr
);
607 tcg_gen_xor_tl(t0
, t0
, r_temp
);
608 tcg_temp_free(r_temp
);
610 // T0 = (b1 << 31) | (T0 >> 1);
612 tcg_gen_shli_tl(t0
, t0
, 31);
613 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
614 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
617 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
619 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
622 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
624 #if TARGET_LONG_BITS == 32
626 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
628 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
631 TCGv t0
= tcg_temp_new_i64();
632 TCGv t1
= tcg_temp_new_i64();
635 tcg_gen_ext32s_i64(t0
, src1
);
636 tcg_gen_ext32s_i64(t1
, src2
);
638 tcg_gen_ext32u_i64(t0
, src1
);
639 tcg_gen_ext32u_i64(t1
, src2
);
642 tcg_gen_mul_i64(dst
, t0
, t1
);
646 tcg_gen_shri_i64(cpu_y
, dst
, 32);
650 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
652 /* zero-extend truncated operands before multiplication */
653 gen_op_multiply(dst
, src1
, src2
, 0);
656 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
658 /* sign-extend truncated operands before multiplication */
659 gen_op_multiply(dst
, src1
, src2
, 1);
663 static inline void gen_op_eval_ba(TCGv dst
)
665 tcg_gen_movi_tl(dst
, 1);
669 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
671 gen_mov_reg_Z(dst
, src
);
675 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
677 TCGv t0
= tcg_temp_new();
678 gen_mov_reg_N(t0
, src
);
679 gen_mov_reg_V(dst
, src
);
680 tcg_gen_xor_tl(dst
, dst
, t0
);
681 gen_mov_reg_Z(t0
, src
);
682 tcg_gen_or_tl(dst
, dst
, t0
);
687 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
689 TCGv t0
= tcg_temp_new();
690 gen_mov_reg_V(t0
, src
);
691 gen_mov_reg_N(dst
, src
);
692 tcg_gen_xor_tl(dst
, dst
, t0
);
697 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
699 TCGv t0
= tcg_temp_new();
700 gen_mov_reg_Z(t0
, src
);
701 gen_mov_reg_C(dst
, src
);
702 tcg_gen_or_tl(dst
, dst
, t0
);
707 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
709 gen_mov_reg_C(dst
, src
);
713 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
715 gen_mov_reg_V(dst
, src
);
719 static inline void gen_op_eval_bn(TCGv dst
)
721 tcg_gen_movi_tl(dst
, 0);
725 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
727 gen_mov_reg_N(dst
, src
);
731 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
733 gen_mov_reg_Z(dst
, src
);
734 tcg_gen_xori_tl(dst
, dst
, 0x1);
738 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
740 gen_op_eval_ble(dst
, src
);
741 tcg_gen_xori_tl(dst
, dst
, 0x1);
745 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
747 gen_op_eval_bl(dst
, src
);
748 tcg_gen_xori_tl(dst
, dst
, 0x1);
752 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
754 gen_op_eval_bleu(dst
, src
);
755 tcg_gen_xori_tl(dst
, dst
, 0x1);
759 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
761 gen_mov_reg_C(dst
, src
);
762 tcg_gen_xori_tl(dst
, dst
, 0x1);
766 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
768 gen_mov_reg_N(dst
, src
);
769 tcg_gen_xori_tl(dst
, dst
, 0x1);
773 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
775 gen_mov_reg_V(dst
, src
);
776 tcg_gen_xori_tl(dst
, dst
, 0x1);
780 FPSR bit field FCC1 | FCC0:
786 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
787 unsigned int fcc_offset
)
789 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
790 tcg_gen_andi_tl(reg
, reg
, 0x1);
793 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
794 unsigned int fcc_offset
)
796 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
797 tcg_gen_andi_tl(reg
, reg
, 0x1);
801 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
802 unsigned int fcc_offset
)
804 TCGv t0
= tcg_temp_new();
805 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
806 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
807 tcg_gen_or_tl(dst
, dst
, t0
);
811 // 1 or 2: FCC0 ^ FCC1
812 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
813 unsigned int fcc_offset
)
815 TCGv t0
= tcg_temp_new();
816 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
817 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
818 tcg_gen_xor_tl(dst
, dst
, t0
);
823 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
824 unsigned int fcc_offset
)
826 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
830 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
831 unsigned int fcc_offset
)
833 TCGv t0
= tcg_temp_new();
834 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
835 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
836 tcg_gen_andc_tl(dst
, dst
, t0
);
841 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
842 unsigned int fcc_offset
)
844 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
848 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
849 unsigned int fcc_offset
)
851 TCGv t0
= tcg_temp_new();
852 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
853 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
854 tcg_gen_andc_tl(dst
, t0
, dst
);
859 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
860 unsigned int fcc_offset
)
862 TCGv t0
= tcg_temp_new();
863 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
864 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
865 tcg_gen_and_tl(dst
, dst
, t0
);
870 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
871 unsigned int fcc_offset
)
873 TCGv t0
= tcg_temp_new();
874 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
875 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
876 tcg_gen_or_tl(dst
, dst
, t0
);
877 tcg_gen_xori_tl(dst
, dst
, 0x1);
881 // 0 or 3: !(FCC0 ^ FCC1)
882 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
883 unsigned int fcc_offset
)
885 TCGv t0
= tcg_temp_new();
886 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
887 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
888 tcg_gen_xor_tl(dst
, dst
, t0
);
889 tcg_gen_xori_tl(dst
, dst
, 0x1);
894 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
895 unsigned int fcc_offset
)
897 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
898 tcg_gen_xori_tl(dst
, dst
, 0x1);
901 // !1: !(FCC0 & !FCC1)
902 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
903 unsigned int fcc_offset
)
905 TCGv t0
= tcg_temp_new();
906 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
907 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
908 tcg_gen_andc_tl(dst
, dst
, t0
);
909 tcg_gen_xori_tl(dst
, dst
, 0x1);
914 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
915 unsigned int fcc_offset
)
917 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
918 tcg_gen_xori_tl(dst
, dst
, 0x1);
921 // !2: !(!FCC0 & FCC1)
922 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
923 unsigned int fcc_offset
)
925 TCGv t0
= tcg_temp_new();
926 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
927 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
928 tcg_gen_andc_tl(dst
, t0
, dst
);
929 tcg_gen_xori_tl(dst
, dst
, 0x1);
933 // !3: !(FCC0 & FCC1)
934 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
935 unsigned int fcc_offset
)
937 TCGv t0
= tcg_temp_new();
938 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
939 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
940 tcg_gen_and_tl(dst
, dst
, t0
);
941 tcg_gen_xori_tl(dst
, dst
, 0x1);
945 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
946 target_ulong pc2
, TCGv r_cond
)
948 TCGLabel
*l1
= gen_new_label();
950 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
952 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
955 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
958 static inline void gen_branch_a(DisasContext
*dc
, target_ulong pc1
,
959 target_ulong pc2
, TCGv r_cond
)
961 TCGLabel
*l1
= gen_new_label();
963 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
965 gen_goto_tb(dc
, 0, pc2
, pc1
);
968 gen_goto_tb(dc
, 1, pc2
+ 4, pc2
+ 8);
971 static inline void gen_generic_branch(DisasContext
*dc
)
973 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
974 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
975 TCGv zero
= tcg_const_tl(0);
977 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
984 /* call this function before using the condition register as it may
985 have been set for a jump */
986 static inline void flush_cond(DisasContext
*dc
)
988 if (dc
->npc
== JUMP_PC
) {
989 gen_generic_branch(dc
);
990 dc
->npc
= DYNAMIC_PC
;
994 static inline void save_npc(DisasContext
*dc
)
996 if (dc
->npc
== JUMP_PC
) {
997 gen_generic_branch(dc
);
998 dc
->npc
= DYNAMIC_PC
;
999 } else if (dc
->npc
!= DYNAMIC_PC
) {
1000 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1004 static inline void update_psr(DisasContext
*dc
)
1006 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1007 dc
->cc_op
= CC_OP_FLAGS
;
1008 gen_helper_compute_psr(cpu_env
);
1012 static inline void save_state(DisasContext
*dc
)
1014 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1018 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1020 if (dc
->npc
== JUMP_PC
) {
1021 gen_generic_branch(dc
);
1022 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1023 dc
->pc
= DYNAMIC_PC
;
1024 } else if (dc
->npc
== DYNAMIC_PC
) {
1025 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1026 dc
->pc
= DYNAMIC_PC
;
1032 static inline void gen_op_next_insn(void)
1034 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1035 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1038 static void free_compare(DisasCompare
*cmp
)
1041 tcg_temp_free(cmp
->c1
);
1044 tcg_temp_free(cmp
->c2
);
1048 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1051 static int subcc_cond
[16] = {
1067 -1, /* no overflow */
1070 static int logic_cond
[16] = {
1072 TCG_COND_EQ
, /* eq: Z */
1073 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1074 TCG_COND_LT
, /* lt: N ^ V -> N */
1075 TCG_COND_EQ
, /* leu: C | Z -> Z */
1076 TCG_COND_NEVER
, /* ltu: C -> 0 */
1077 TCG_COND_LT
, /* neg: N */
1078 TCG_COND_NEVER
, /* vs: V -> 0 */
1080 TCG_COND_NE
, /* ne: !Z */
1081 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1082 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1083 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1084 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1085 TCG_COND_GE
, /* pos: !N */
1086 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1092 #ifdef TARGET_SPARC64
1102 switch (dc
->cc_op
) {
1104 cmp
->cond
= logic_cond
[cond
];
1106 cmp
->is_bool
= false;
1108 cmp
->c2
= tcg_const_tl(0);
1109 #ifdef TARGET_SPARC64
1112 cmp
->c1
= tcg_temp_new();
1113 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1118 cmp
->c1
= cpu_cc_dst
;
1125 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1126 goto do_compare_dst_0
;
1128 case 7: /* overflow */
1129 case 15: /* !overflow */
1133 cmp
->cond
= subcc_cond
[cond
];
1134 cmp
->is_bool
= false;
1135 #ifdef TARGET_SPARC64
1137 /* Note that sign-extension works for unsigned compares as
1138 long as both operands are sign-extended. */
1139 cmp
->g1
= cmp
->g2
= false;
1140 cmp
->c1
= tcg_temp_new();
1141 cmp
->c2
= tcg_temp_new();
1142 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1143 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1147 cmp
->g1
= cmp
->g2
= true;
1148 cmp
->c1
= cpu_cc_src
;
1149 cmp
->c2
= cpu_cc_src2
;
1156 gen_helper_compute_psr(cpu_env
);
1157 dc
->cc_op
= CC_OP_FLAGS
;
1161 /* We're going to generate a boolean result. */
1162 cmp
->cond
= TCG_COND_NE
;
1163 cmp
->is_bool
= true;
1164 cmp
->g1
= cmp
->g2
= false;
1165 cmp
->c1
= r_dst
= tcg_temp_new();
1166 cmp
->c2
= tcg_const_tl(0);
1170 gen_op_eval_bn(r_dst
);
1173 gen_op_eval_be(r_dst
, r_src
);
1176 gen_op_eval_ble(r_dst
, r_src
);
1179 gen_op_eval_bl(r_dst
, r_src
);
1182 gen_op_eval_bleu(r_dst
, r_src
);
1185 gen_op_eval_bcs(r_dst
, r_src
);
1188 gen_op_eval_bneg(r_dst
, r_src
);
1191 gen_op_eval_bvs(r_dst
, r_src
);
1194 gen_op_eval_ba(r_dst
);
1197 gen_op_eval_bne(r_dst
, r_src
);
1200 gen_op_eval_bg(r_dst
, r_src
);
1203 gen_op_eval_bge(r_dst
, r_src
);
1206 gen_op_eval_bgu(r_dst
, r_src
);
1209 gen_op_eval_bcc(r_dst
, r_src
);
1212 gen_op_eval_bpos(r_dst
, r_src
);
1215 gen_op_eval_bvc(r_dst
, r_src
);
1222 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1224 unsigned int offset
;
1227 /* For now we still generate a straight boolean result. */
1228 cmp
->cond
= TCG_COND_NE
;
1229 cmp
->is_bool
= true;
1230 cmp
->g1
= cmp
->g2
= false;
1231 cmp
->c1
= r_dst
= tcg_temp_new();
1232 cmp
->c2
= tcg_const_tl(0);
1252 gen_op_eval_bn(r_dst
);
1255 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1258 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1261 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1264 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1267 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1270 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1273 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1276 gen_op_eval_ba(r_dst
);
1279 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1282 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1285 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1288 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1291 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1294 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1297 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1302 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1306 gen_compare(&cmp
, cc
, cond
, dc
);
1308 /* The interface is to return a boolean in r_dst. */
1310 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1312 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1318 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1321 gen_fcompare(&cmp
, cc
, cond
);
1323 /* The interface is to return a boolean in r_dst. */
1325 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1327 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1333 #ifdef TARGET_SPARC64
1335 static const int gen_tcg_cond_reg
[8] = {
1346 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1348 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1349 cmp
->is_bool
= false;
1353 cmp
->c2
= tcg_const_tl(0);
1356 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1359 gen_compare_reg(&cmp
, cond
, r_src
);
1361 /* The interface is to return a boolean in r_dst. */
1362 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1368 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1370 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1371 target_ulong target
= dc
->pc
+ offset
;
1373 #ifdef TARGET_SPARC64
1374 if (unlikely(AM_CHECK(dc
))) {
1375 target
&= 0xffffffffULL
;
1379 /* unconditional not taken */
1381 dc
->pc
= dc
->npc
+ 4;
1382 dc
->npc
= dc
->pc
+ 4;
1385 dc
->npc
= dc
->pc
+ 4;
1387 } else if (cond
== 0x8) {
1388 /* unconditional taken */
1391 dc
->npc
= dc
->pc
+ 4;
1395 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1399 gen_cond(cpu_cond
, cc
, cond
, dc
);
1401 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1405 dc
->jump_pc
[0] = target
;
1406 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1407 dc
->jump_pc
[1] = DYNAMIC_PC
;
1408 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1410 dc
->jump_pc
[1] = dc
->npc
+ 4;
1417 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1419 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1420 target_ulong target
= dc
->pc
+ offset
;
1422 #ifdef TARGET_SPARC64
1423 if (unlikely(AM_CHECK(dc
))) {
1424 target
&= 0xffffffffULL
;
1428 /* unconditional not taken */
1430 dc
->pc
= dc
->npc
+ 4;
1431 dc
->npc
= dc
->pc
+ 4;
1434 dc
->npc
= dc
->pc
+ 4;
1436 } else if (cond
== 0x8) {
1437 /* unconditional taken */
1440 dc
->npc
= dc
->pc
+ 4;
1444 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1448 gen_fcond(cpu_cond
, cc
, cond
);
1450 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1454 dc
->jump_pc
[0] = target
;
1455 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1456 dc
->jump_pc
[1] = DYNAMIC_PC
;
1457 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1459 dc
->jump_pc
[1] = dc
->npc
+ 4;
1466 #ifdef TARGET_SPARC64
1467 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1470 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1471 target_ulong target
= dc
->pc
+ offset
;
1473 if (unlikely(AM_CHECK(dc
))) {
1474 target
&= 0xffffffffULL
;
1477 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1479 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1483 dc
->jump_pc
[0] = target
;
1484 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1485 dc
->jump_pc
[1] = DYNAMIC_PC
;
1486 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1488 dc
->jump_pc
[1] = dc
->npc
+ 4;
1494 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1498 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1501 gen_helper_fcmps_fcc1(cpu_env
, r_rs1
, r_rs2
);
1504 gen_helper_fcmps_fcc2(cpu_env
, r_rs1
, r_rs2
);
1507 gen_helper_fcmps_fcc3(cpu_env
, r_rs1
, r_rs2
);
1512 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1516 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1519 gen_helper_fcmpd_fcc1(cpu_env
, r_rs1
, r_rs2
);
1522 gen_helper_fcmpd_fcc2(cpu_env
, r_rs1
, r_rs2
);
1525 gen_helper_fcmpd_fcc3(cpu_env
, r_rs1
, r_rs2
);
1530 static inline void gen_op_fcmpq(int fccno
)
1534 gen_helper_fcmpq(cpu_env
);
1537 gen_helper_fcmpq_fcc1(cpu_env
);
1540 gen_helper_fcmpq_fcc2(cpu_env
);
1543 gen_helper_fcmpq_fcc3(cpu_env
);
1548 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1552 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1555 gen_helper_fcmpes_fcc1(cpu_env
, r_rs1
, r_rs2
);
1558 gen_helper_fcmpes_fcc2(cpu_env
, r_rs1
, r_rs2
);
1561 gen_helper_fcmpes_fcc3(cpu_env
, r_rs1
, r_rs2
);
1566 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1570 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1573 gen_helper_fcmped_fcc1(cpu_env
, r_rs1
, r_rs2
);
1576 gen_helper_fcmped_fcc2(cpu_env
, r_rs1
, r_rs2
);
1579 gen_helper_fcmped_fcc3(cpu_env
, r_rs1
, r_rs2
);
1584 static inline void gen_op_fcmpeq(int fccno
)
1588 gen_helper_fcmpeq(cpu_env
);
1591 gen_helper_fcmpeq_fcc1(cpu_env
);
1594 gen_helper_fcmpeq_fcc2(cpu_env
);
1597 gen_helper_fcmpeq_fcc3(cpu_env
);
1604 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1606 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1609 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1611 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1614 static inline void gen_op_fcmpq(int fccno
)
1616 gen_helper_fcmpq(cpu_env
);
1619 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1621 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1624 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1626 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1629 static inline void gen_op_fcmpeq(int fccno
)
1631 gen_helper_fcmpeq(cpu_env
);
1635 static inline void gen_op_fpexception_im(int fsr_flags
)
1639 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1640 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1641 r_const
= tcg_const_i32(TT_FP_EXCP
);
1642 gen_helper_raise_exception(cpu_env
, r_const
);
1643 tcg_temp_free_i32(r_const
);
1646 static int gen_trap_ifnofpu(DisasContext
*dc
)
1648 #if !defined(CONFIG_USER_ONLY)
1649 if (!dc
->fpu_enabled
) {
1653 r_const
= tcg_const_i32(TT_NFPU_INSN
);
1654 gen_helper_raise_exception(cpu_env
, r_const
);
1655 tcg_temp_free_i32(r_const
);
1663 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1665 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1668 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1669 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1673 src
= gen_load_fpr_F(dc
, rs
);
1674 dst
= gen_dest_fpr_F(dc
);
1676 gen(dst
, cpu_env
, src
);
1678 gen_store_fpr_F(dc
, rd
, dst
);
1681 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1682 void (*gen
)(TCGv_i32
, TCGv_i32
))
1686 src
= gen_load_fpr_F(dc
, rs
);
1687 dst
= gen_dest_fpr_F(dc
);
1691 gen_store_fpr_F(dc
, rd
, dst
);
1694 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1695 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1697 TCGv_i32 dst
, src1
, src2
;
1699 src1
= gen_load_fpr_F(dc
, rs1
);
1700 src2
= gen_load_fpr_F(dc
, rs2
);
1701 dst
= gen_dest_fpr_F(dc
);
1703 gen(dst
, cpu_env
, src1
, src2
);
1705 gen_store_fpr_F(dc
, rd
, dst
);
1708 #ifdef TARGET_SPARC64
1709 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1710 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1712 TCGv_i32 dst
, src1
, src2
;
1714 src1
= gen_load_fpr_F(dc
, rs1
);
1715 src2
= gen_load_fpr_F(dc
, rs2
);
1716 dst
= gen_dest_fpr_F(dc
);
1718 gen(dst
, src1
, src2
);
1720 gen_store_fpr_F(dc
, rd
, dst
);
1724 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1725 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1729 src
= gen_load_fpr_D(dc
, rs
);
1730 dst
= gen_dest_fpr_D(dc
, rd
);
1732 gen(dst
, cpu_env
, src
);
1734 gen_store_fpr_D(dc
, rd
, dst
);
1737 #ifdef TARGET_SPARC64
1738 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1739 void (*gen
)(TCGv_i64
, TCGv_i64
))
1743 src
= gen_load_fpr_D(dc
, rs
);
1744 dst
= gen_dest_fpr_D(dc
, rd
);
1748 gen_store_fpr_D(dc
, rd
, dst
);
1752 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1753 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1755 TCGv_i64 dst
, src1
, src2
;
1757 src1
= gen_load_fpr_D(dc
, rs1
);
1758 src2
= gen_load_fpr_D(dc
, rs2
);
1759 dst
= gen_dest_fpr_D(dc
, rd
);
1761 gen(dst
, cpu_env
, src1
, src2
);
1763 gen_store_fpr_D(dc
, rd
, dst
);
1766 #ifdef TARGET_SPARC64
1767 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1768 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1770 TCGv_i64 dst
, src1
, src2
;
1772 src1
= gen_load_fpr_D(dc
, rs1
);
1773 src2
= gen_load_fpr_D(dc
, rs2
);
1774 dst
= gen_dest_fpr_D(dc
, rd
);
1776 gen(dst
, src1
, src2
);
1778 gen_store_fpr_D(dc
, rd
, dst
);
1781 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1782 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1784 TCGv_i64 dst
, src1
, src2
;
1786 src1
= gen_load_fpr_D(dc
, rs1
);
1787 src2
= gen_load_fpr_D(dc
, rs2
);
1788 dst
= gen_dest_fpr_D(dc
, rd
);
1790 gen(dst
, cpu_gsr
, src1
, src2
);
1792 gen_store_fpr_D(dc
, rd
, dst
);
1795 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1796 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1798 TCGv_i64 dst
, src0
, src1
, src2
;
1800 src1
= gen_load_fpr_D(dc
, rs1
);
1801 src2
= gen_load_fpr_D(dc
, rs2
);
1802 src0
= gen_load_fpr_D(dc
, rd
);
1803 dst
= gen_dest_fpr_D(dc
, rd
);
1805 gen(dst
, src0
, src1
, src2
);
1807 gen_store_fpr_D(dc
, rd
, dst
);
1811 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1812 void (*gen
)(TCGv_ptr
))
1814 gen_op_load_fpr_QT1(QFPREG(rs
));
1818 gen_op_store_QT0_fpr(QFPREG(rd
));
1819 gen_update_fprs_dirty(QFPREG(rd
));
1822 #ifdef TARGET_SPARC64
1823 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1824 void (*gen
)(TCGv_ptr
))
1826 gen_op_load_fpr_QT1(QFPREG(rs
));
1830 gen_op_store_QT0_fpr(QFPREG(rd
));
1831 gen_update_fprs_dirty(QFPREG(rd
));
1835 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1836 void (*gen
)(TCGv_ptr
))
1838 gen_op_load_fpr_QT0(QFPREG(rs1
));
1839 gen_op_load_fpr_QT1(QFPREG(rs2
));
1843 gen_op_store_QT0_fpr(QFPREG(rd
));
1844 gen_update_fprs_dirty(QFPREG(rd
));
1847 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1848 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1851 TCGv_i32 src1
, src2
;
1853 src1
= gen_load_fpr_F(dc
, rs1
);
1854 src2
= gen_load_fpr_F(dc
, rs2
);
1855 dst
= gen_dest_fpr_D(dc
, rd
);
1857 gen(dst
, cpu_env
, src1
, src2
);
1859 gen_store_fpr_D(dc
, rd
, dst
);
1862 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1863 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1865 TCGv_i64 src1
, src2
;
1867 src1
= gen_load_fpr_D(dc
, rs1
);
1868 src2
= gen_load_fpr_D(dc
, rs2
);
1870 gen(cpu_env
, src1
, src2
);
1872 gen_op_store_QT0_fpr(QFPREG(rd
));
1873 gen_update_fprs_dirty(QFPREG(rd
));
1876 #ifdef TARGET_SPARC64
1877 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1878 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1883 src
= gen_load_fpr_F(dc
, rs
);
1884 dst
= gen_dest_fpr_D(dc
, rd
);
1886 gen(dst
, cpu_env
, src
);
1888 gen_store_fpr_D(dc
, rd
, dst
);
1892 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1893 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1898 src
= gen_load_fpr_F(dc
, rs
);
1899 dst
= gen_dest_fpr_D(dc
, rd
);
1901 gen(dst
, cpu_env
, src
);
1903 gen_store_fpr_D(dc
, rd
, dst
);
1906 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1907 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1912 src
= gen_load_fpr_D(dc
, rs
);
1913 dst
= gen_dest_fpr_F(dc
);
1915 gen(dst
, cpu_env
, src
);
1917 gen_store_fpr_F(dc
, rd
, dst
);
1920 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1921 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1925 gen_op_load_fpr_QT1(QFPREG(rs
));
1926 dst
= gen_dest_fpr_F(dc
);
1930 gen_store_fpr_F(dc
, rd
, dst
);
1933 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1934 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1938 gen_op_load_fpr_QT1(QFPREG(rs
));
1939 dst
= gen_dest_fpr_D(dc
, rd
);
1943 gen_store_fpr_D(dc
, rd
, dst
);
1946 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1947 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1951 src
= gen_load_fpr_F(dc
, rs
);
1955 gen_op_store_QT0_fpr(QFPREG(rd
));
1956 gen_update_fprs_dirty(QFPREG(rd
));
1959 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1960 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1964 src
= gen_load_fpr_D(dc
, rs
);
1968 gen_op_store_QT0_fpr(QFPREG(rd
));
1969 gen_update_fprs_dirty(QFPREG(rd
));
1973 #ifdef TARGET_SPARC64
1974 static inline TCGv_i32
gen_get_asi(int insn
, TCGv r_addr
)
1980 r_asi
= tcg_temp_new_i32();
1981 tcg_gen_mov_i32(r_asi
, cpu_asi
);
1983 asi
= GET_FIELD(insn
, 19, 26);
1984 r_asi
= tcg_const_i32(asi
);
1989 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
1992 TCGv_i32 r_asi
, r_size
, r_sign
;
1994 r_asi
= gen_get_asi(insn
, addr
);
1995 r_size
= tcg_const_i32(size
);
1996 r_sign
= tcg_const_i32(sign
);
1997 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
1998 tcg_temp_free_i32(r_sign
);
1999 tcg_temp_free_i32(r_size
);
2000 tcg_temp_free_i32(r_asi
);
2003 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2005 TCGv_i32 r_asi
, r_size
;
2007 r_asi
= gen_get_asi(insn
, addr
);
2008 r_size
= tcg_const_i32(size
);
2009 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2010 tcg_temp_free_i32(r_size
);
2011 tcg_temp_free_i32(r_asi
);
2014 static inline void gen_ldf_asi(TCGv addr
, int insn
, int size
, int rd
)
2016 TCGv_i32 r_asi
, r_size
, r_rd
;
2018 r_asi
= gen_get_asi(insn
, addr
);
2019 r_size
= tcg_const_i32(size
);
2020 r_rd
= tcg_const_i32(rd
);
2021 gen_helper_ldf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2022 tcg_temp_free_i32(r_rd
);
2023 tcg_temp_free_i32(r_size
);
2024 tcg_temp_free_i32(r_asi
);
2027 static inline void gen_stf_asi(TCGv addr
, int insn
, int size
, int rd
)
2029 TCGv_i32 r_asi
, r_size
, r_rd
;
2031 r_asi
= gen_get_asi(insn
, addr
);
2032 r_size
= tcg_const_i32(size
);
2033 r_rd
= tcg_const_i32(rd
);
2034 gen_helper_stf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2035 tcg_temp_free_i32(r_rd
);
2036 tcg_temp_free_i32(r_size
);
2037 tcg_temp_free_i32(r_asi
);
2040 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2042 TCGv_i32 r_asi
, r_size
, r_sign
;
2043 TCGv_i64 t64
= tcg_temp_new_i64();
2045 r_asi
= gen_get_asi(insn
, addr
);
2046 r_size
= tcg_const_i32(4);
2047 r_sign
= tcg_const_i32(0);
2048 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2049 tcg_temp_free_i32(r_sign
);
2050 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2051 tcg_temp_free_i32(r_size
);
2052 tcg_temp_free_i32(r_asi
);
2053 tcg_gen_trunc_i64_tl(dst
, t64
);
2054 tcg_temp_free_i64(t64
);
2057 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2060 TCGv_i32 r_asi
, r_rd
;
2062 r_asi
= gen_get_asi(insn
, addr
);
2063 r_rd
= tcg_const_i32(rd
);
2064 gen_helper_ldda_asi(cpu_env
, addr
, r_asi
, r_rd
);
2065 tcg_temp_free_i32(r_rd
);
2066 tcg_temp_free_i32(r_asi
);
2069 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2072 TCGv_i32 r_asi
, r_size
;
2073 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2074 TCGv_i64 t64
= tcg_temp_new_i64();
2076 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2077 r_asi
= gen_get_asi(insn
, addr
);
2078 r_size
= tcg_const_i32(8);
2079 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2080 tcg_temp_free_i32(r_size
);
2081 tcg_temp_free_i32(r_asi
);
2082 tcg_temp_free_i64(t64
);
2085 static inline void gen_casx_asi(DisasContext
*dc
, TCGv addr
,
2086 TCGv val2
, int insn
, int rd
)
2088 TCGv val1
= gen_load_gpr(dc
, rd
);
2089 TCGv dst
= gen_dest_gpr(dc
, rd
);
2090 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2092 gen_helper_casx_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2093 tcg_temp_free_i32(r_asi
);
2094 gen_store_gpr(dc
, rd
, dst
);
2097 #elif !defined(CONFIG_USER_ONLY)
2099 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2102 TCGv_i32 r_asi
, r_size
, r_sign
;
2103 TCGv_i64 t64
= tcg_temp_new_i64();
2105 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2106 r_size
= tcg_const_i32(size
);
2107 r_sign
= tcg_const_i32(sign
);
2108 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2109 tcg_temp_free_i32(r_sign
);
2110 tcg_temp_free_i32(r_size
);
2111 tcg_temp_free_i32(r_asi
);
2112 tcg_gen_trunc_i64_tl(dst
, t64
);
2113 tcg_temp_free_i64(t64
);
2116 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2118 TCGv_i32 r_asi
, r_size
;
2119 TCGv_i64 t64
= tcg_temp_new_i64();
2121 tcg_gen_extu_tl_i64(t64
, src
);
2122 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2123 r_size
= tcg_const_i32(size
);
2124 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2125 tcg_temp_free_i32(r_size
);
2126 tcg_temp_free_i32(r_asi
);
2127 tcg_temp_free_i64(t64
);
2130 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2132 TCGv_i32 r_asi
, r_size
, r_sign
;
2133 TCGv_i64 r_val
, t64
;
2135 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2136 r_size
= tcg_const_i32(4);
2137 r_sign
= tcg_const_i32(0);
2138 t64
= tcg_temp_new_i64();
2139 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2140 tcg_temp_free(r_sign
);
2141 r_val
= tcg_temp_new_i64();
2142 tcg_gen_extu_tl_i64(r_val
, src
);
2143 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2144 tcg_temp_free_i64(r_val
);
2145 tcg_temp_free_i32(r_size
);
2146 tcg_temp_free_i32(r_asi
);
2147 tcg_gen_trunc_i64_tl(dst
, t64
);
2148 tcg_temp_free_i64(t64
);
2151 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2154 TCGv_i32 r_asi
, r_size
, r_sign
;
2158 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2159 r_size
= tcg_const_i32(8);
2160 r_sign
= tcg_const_i32(0);
2161 t64
= tcg_temp_new_i64();
2162 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2163 tcg_temp_free_i32(r_sign
);
2164 tcg_temp_free_i32(r_size
);
2165 tcg_temp_free_i32(r_asi
);
2167 t
= gen_dest_gpr(dc
, rd
+ 1);
2168 tcg_gen_trunc_i64_tl(t
, t64
);
2169 gen_store_gpr(dc
, rd
+ 1, t
);
2171 tcg_gen_shri_i64(t64
, t64
, 32);
2172 tcg_gen_trunc_i64_tl(hi
, t64
);
2173 tcg_temp_free_i64(t64
);
2174 gen_store_gpr(dc
, rd
, hi
);
2177 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2180 TCGv_i32 r_asi
, r_size
;
2181 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2182 TCGv_i64 t64
= tcg_temp_new_i64();
2184 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2185 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2186 r_size
= tcg_const_i32(8);
2187 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2188 tcg_temp_free_i32(r_size
);
2189 tcg_temp_free_i32(r_asi
);
2190 tcg_temp_free_i64(t64
);
2194 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2195 static inline void gen_cas_asi(DisasContext
*dc
, TCGv addr
,
2196 TCGv val2
, int insn
, int rd
)
2198 TCGv val1
= gen_load_gpr(dc
, rd
);
2199 TCGv dst
= gen_dest_gpr(dc
, rd
);
2200 #ifdef TARGET_SPARC64
2201 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2203 TCGv_i32 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2206 gen_helper_cas_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2207 tcg_temp_free_i32(r_asi
);
2208 gen_store_gpr(dc
, rd
, dst
);
2211 static inline void gen_ldstub_asi(TCGv dst
, TCGv addr
, int insn
)
2214 TCGv_i32 r_asi
, r_size
;
2216 gen_ld_asi(dst
, addr
, insn
, 1, 0);
2218 r_val
= tcg_const_i64(0xffULL
);
2219 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2220 r_size
= tcg_const_i32(1);
2221 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2222 tcg_temp_free_i32(r_size
);
2223 tcg_temp_free_i32(r_asi
);
2224 tcg_temp_free_i64(r_val
);
2228 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2230 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2231 return gen_load_gpr(dc
, rs1
);
2234 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2236 if (IS_IMM
) { /* immediate */
2237 target_long simm
= GET_FIELDs(insn
, 19, 31);
2238 TCGv t
= get_temp_tl(dc
);
2239 tcg_gen_movi_tl(t
, simm
);
2241 } else { /* register */
2242 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2243 return gen_load_gpr(dc
, rs2
);
2247 #ifdef TARGET_SPARC64
2248 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2250 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2252 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2253 or fold the comparison down to 32 bits and use movcond_i32. Choose
2255 c32
= tcg_temp_new_i32();
2257 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2259 TCGv_i64 c64
= tcg_temp_new_i64();
2260 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2261 tcg_gen_extrl_i64_i32(c32
, c64
);
2262 tcg_temp_free_i64(c64
);
2265 s1
= gen_load_fpr_F(dc
, rs
);
2266 s2
= gen_load_fpr_F(dc
, rd
);
2267 dst
= gen_dest_fpr_F(dc
);
2268 zero
= tcg_const_i32(0);
2270 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2272 tcg_temp_free_i32(c32
);
2273 tcg_temp_free_i32(zero
);
2274 gen_store_fpr_F(dc
, rd
, dst
);
2277 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2279 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2280 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2281 gen_load_fpr_D(dc
, rs
),
2282 gen_load_fpr_D(dc
, rd
));
2283 gen_store_fpr_D(dc
, rd
, dst
);
2286 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2288 int qd
= QFPREG(rd
);
2289 int qs
= QFPREG(rs
);
2291 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2292 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2293 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2294 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2296 gen_update_fprs_dirty(qd
);
2299 #ifndef CONFIG_USER_ONLY
2300 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_ptr cpu_env
)
2302 TCGv_i32 r_tl
= tcg_temp_new_i32();
2304 /* load env->tl into r_tl */
2305 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2307 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2308 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2310 /* calculate offset to current trap state from env->ts, reuse r_tl */
2311 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2312 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2314 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2316 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2317 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2318 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2319 tcg_temp_free_ptr(r_tl_tmp
);
2322 tcg_temp_free_i32(r_tl
);
2326 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2327 int width
, bool cc
, bool left
)
2329 TCGv lo1
, lo2
, t1
, t2
;
2330 uint64_t amask
, tabl
, tabr
;
2331 int shift
, imask
, omask
;
2334 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2335 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2336 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2337 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2338 dc
->cc_op
= CC_OP_SUB
;
2341 /* Theory of operation: there are two tables, left and right (not to
2342 be confused with the left and right versions of the opcode). These
2343 are indexed by the low 3 bits of the inputs. To make things "easy",
2344 these tables are loaded into two constants, TABL and TABR below.
2345 The operation index = (input & imask) << shift calculates the index
2346 into the constant, while val = (table >> index) & omask calculates
2347 the value we're looking for. */
2354 tabl
= 0x80c0e0f0f8fcfeffULL
;
2355 tabr
= 0xff7f3f1f0f070301ULL
;
2357 tabl
= 0x0103070f1f3f7fffULL
;
2358 tabr
= 0xfffefcf8f0e0c080ULL
;
2378 tabl
= (2 << 2) | 3;
2379 tabr
= (3 << 2) | 1;
2381 tabl
= (1 << 2) | 3;
2382 tabr
= (3 << 2) | 2;
2389 lo1
= tcg_temp_new();
2390 lo2
= tcg_temp_new();
2391 tcg_gen_andi_tl(lo1
, s1
, imask
);
2392 tcg_gen_andi_tl(lo2
, s2
, imask
);
2393 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2394 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2396 t1
= tcg_const_tl(tabl
);
2397 t2
= tcg_const_tl(tabr
);
2398 tcg_gen_shr_tl(lo1
, t1
, lo1
);
2399 tcg_gen_shr_tl(lo2
, t2
, lo2
);
2400 tcg_gen_andi_tl(dst
, lo1
, omask
);
2401 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2405 amask
&= 0xffffffffULL
;
2407 tcg_gen_andi_tl(s1
, s1
, amask
);
2408 tcg_gen_andi_tl(s2
, s2
, amask
);
2410 /* We want to compute
2411 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2412 We've already done dst = lo1, so this reduces to
2413 dst &= (s1 == s2 ? -1 : lo2)
2418 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
2419 tcg_gen_neg_tl(t1
, t1
);
2420 tcg_gen_or_tl(lo2
, lo2
, t1
);
2421 tcg_gen_and_tl(dst
, dst
, lo2
);
2429 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2431 TCGv tmp
= tcg_temp_new();
2433 tcg_gen_add_tl(tmp
, s1
, s2
);
2434 tcg_gen_andi_tl(dst
, tmp
, -8);
2436 tcg_gen_neg_tl(tmp
, tmp
);
2438 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2443 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2447 t1
= tcg_temp_new();
2448 t2
= tcg_temp_new();
2449 shift
= tcg_temp_new();
2451 tcg_gen_andi_tl(shift
, gsr
, 7);
2452 tcg_gen_shli_tl(shift
, shift
, 3);
2453 tcg_gen_shl_tl(t1
, s1
, shift
);
2455 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2456 shift of (up to 63) followed by a constant shift of 1. */
2457 tcg_gen_xori_tl(shift
, shift
, 63);
2458 tcg_gen_shr_tl(t2
, s2
, shift
);
2459 tcg_gen_shri_tl(t2
, t2
, 1);
2461 tcg_gen_or_tl(dst
, t1
, t2
);
2465 tcg_temp_free(shift
);
2469 #define CHECK_IU_FEATURE(dc, FEATURE) \
2470 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2472 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2473 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2476 /* before an instruction, dc->pc must be static */
2477 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2479 unsigned int opc
, rs1
, rs2
, rd
;
2480 TCGv cpu_src1
, cpu_src2
;
2481 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2482 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2485 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2486 tcg_gen_debug_insn_start(dc
->pc
);
2489 opc
= GET_FIELD(insn
, 0, 1);
2490 rd
= GET_FIELD(insn
, 2, 6);
2493 case 0: /* branches/sethi */
2495 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2498 #ifdef TARGET_SPARC64
2499 case 0x1: /* V9 BPcc */
2503 target
= GET_FIELD_SP(insn
, 0, 18);
2504 target
= sign_extend(target
, 19);
2506 cc
= GET_FIELD_SP(insn
, 20, 21);
2508 do_branch(dc
, target
, insn
, 0);
2510 do_branch(dc
, target
, insn
, 1);
2515 case 0x3: /* V9 BPr */
2517 target
= GET_FIELD_SP(insn
, 0, 13) |
2518 (GET_FIELD_SP(insn
, 20, 21) << 14);
2519 target
= sign_extend(target
, 16);
2521 cpu_src1
= get_src1(dc
, insn
);
2522 do_branch_reg(dc
, target
, insn
, cpu_src1
);
2525 case 0x5: /* V9 FBPcc */
2527 int cc
= GET_FIELD_SP(insn
, 20, 21);
2528 if (gen_trap_ifnofpu(dc
)) {
2531 target
= GET_FIELD_SP(insn
, 0, 18);
2532 target
= sign_extend(target
, 19);
2534 do_fbranch(dc
, target
, insn
, cc
);
2538 case 0x7: /* CBN+x */
2543 case 0x2: /* BN+x */
2545 target
= GET_FIELD(insn
, 10, 31);
2546 target
= sign_extend(target
, 22);
2548 do_branch(dc
, target
, insn
, 0);
2551 case 0x6: /* FBN+x */
2553 if (gen_trap_ifnofpu(dc
)) {
2556 target
= GET_FIELD(insn
, 10, 31);
2557 target
= sign_extend(target
, 22);
2559 do_fbranch(dc
, target
, insn
, 0);
2562 case 0x4: /* SETHI */
2563 /* Special-case %g0 because that's the canonical nop. */
2565 uint32_t value
= GET_FIELD(insn
, 10, 31);
2566 TCGv t
= gen_dest_gpr(dc
, rd
);
2567 tcg_gen_movi_tl(t
, value
<< 10);
2568 gen_store_gpr(dc
, rd
, t
);
2571 case 0x0: /* UNIMPL */
2580 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
2581 TCGv o7
= gen_dest_gpr(dc
, 15);
2583 tcg_gen_movi_tl(o7
, dc
->pc
);
2584 gen_store_gpr(dc
, 15, o7
);
2587 #ifdef TARGET_SPARC64
2588 if (unlikely(AM_CHECK(dc
))) {
2589 target
&= 0xffffffffULL
;
2595 case 2: /* FPU & Logical Operations */
2597 unsigned int xop
= GET_FIELD(insn
, 7, 12);
2598 TCGv cpu_dst
= get_temp_tl(dc
);
2601 if (xop
== 0x3a) { /* generate trap */
2602 int cond
= GET_FIELD(insn
, 3, 6);
2604 TCGLabel
*l1
= NULL
;
2615 /* Conditional trap. */
2617 #ifdef TARGET_SPARC64
2619 int cc
= GET_FIELD_SP(insn
, 11, 12);
2621 gen_compare(&cmp
, 0, cond
, dc
);
2622 } else if (cc
== 2) {
2623 gen_compare(&cmp
, 1, cond
, dc
);
2628 gen_compare(&cmp
, 0, cond
, dc
);
2630 l1
= gen_new_label();
2631 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
2632 cmp
.c1
, cmp
.c2
, l1
);
2636 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2637 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2639 /* Don't use the normal temporaries, as they may well have
2640 gone out of scope with the branch above. While we're
2641 doing that we might as well pre-truncate to 32-bit. */
2642 trap
= tcg_temp_new_i32();
2644 rs1
= GET_FIELD_SP(insn
, 14, 18);
2646 rs2
= GET_FIELD_SP(insn
, 0, 6);
2648 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
2649 /* Signal that the trap value is fully constant. */
2652 TCGv t1
= gen_load_gpr(dc
, rs1
);
2653 tcg_gen_trunc_tl_i32(trap
, t1
);
2654 tcg_gen_addi_i32(trap
, trap
, rs2
);
2658 rs2
= GET_FIELD_SP(insn
, 0, 4);
2659 t1
= gen_load_gpr(dc
, rs1
);
2660 t2
= gen_load_gpr(dc
, rs2
);
2661 tcg_gen_add_tl(t1
, t1
, t2
);
2662 tcg_gen_trunc_tl_i32(trap
, t1
);
2665 tcg_gen_andi_i32(trap
, trap
, mask
);
2666 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2669 gen_helper_raise_exception(cpu_env
, trap
);
2670 tcg_temp_free_i32(trap
);
2673 /* An unconditional trap ends the TB. */
2677 /* A conditional trap falls through to the next insn. */
2681 } else if (xop
== 0x28) {
2682 rs1
= GET_FIELD(insn
, 13, 17);
2685 #ifndef TARGET_SPARC64
2686 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2687 manual, rdy on the microSPARC
2689 case 0x0f: /* stbar in the SPARCv8 manual,
2690 rdy on the microSPARC II */
2691 case 0x10 ... 0x1f: /* implementation-dependent in the
2692 SPARCv8 manual, rdy on the
2695 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
2696 TCGv t
= gen_dest_gpr(dc
, rd
);
2697 /* Read Asr17 for a Leon3 monoprocessor */
2698 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
2699 gen_store_gpr(dc
, rd
, t
);
2703 gen_store_gpr(dc
, rd
, cpu_y
);
2705 #ifdef TARGET_SPARC64
2706 case 0x2: /* V9 rdccr */
2708 gen_helper_rdccr(cpu_dst
, cpu_env
);
2709 gen_store_gpr(dc
, rd
, cpu_dst
);
2711 case 0x3: /* V9 rdasi */
2712 tcg_gen_ext_i32_tl(cpu_dst
, cpu_asi
);
2713 gen_store_gpr(dc
, rd
, cpu_dst
);
2715 case 0x4: /* V9 rdtick */
2719 r_tickptr
= tcg_temp_new_ptr();
2720 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2721 offsetof(CPUSPARCState
, tick
));
2722 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2723 tcg_temp_free_ptr(r_tickptr
);
2724 gen_store_gpr(dc
, rd
, cpu_dst
);
2727 case 0x5: /* V9 rdpc */
2729 TCGv t
= gen_dest_gpr(dc
, rd
);
2730 if (unlikely(AM_CHECK(dc
))) {
2731 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
2733 tcg_gen_movi_tl(t
, dc
->pc
);
2735 gen_store_gpr(dc
, rd
, t
);
2738 case 0x6: /* V9 rdfprs */
2739 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
2740 gen_store_gpr(dc
, rd
, cpu_dst
);
2742 case 0xf: /* V9 membar */
2743 break; /* no effect */
2744 case 0x13: /* Graphics Status */
2745 if (gen_trap_ifnofpu(dc
)) {
2748 gen_store_gpr(dc
, rd
, cpu_gsr
);
2750 case 0x16: /* Softint */
2751 tcg_gen_ext_i32_tl(cpu_dst
, cpu_softint
);
2752 gen_store_gpr(dc
, rd
, cpu_dst
);
2754 case 0x17: /* Tick compare */
2755 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
2757 case 0x18: /* System tick */
2761 r_tickptr
= tcg_temp_new_ptr();
2762 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2763 offsetof(CPUSPARCState
, stick
));
2764 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2765 tcg_temp_free_ptr(r_tickptr
);
2766 gen_store_gpr(dc
, rd
, cpu_dst
);
2769 case 0x19: /* System tick compare */
2770 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
2772 case 0x10: /* Performance Control */
2773 case 0x11: /* Performance Instrumentation Counter */
2774 case 0x12: /* Dispatch Control */
2775 case 0x14: /* Softint set, WO */
2776 case 0x15: /* Softint clear, WO */
2781 #if !defined(CONFIG_USER_ONLY)
2782 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
2783 #ifndef TARGET_SPARC64
2784 if (!supervisor(dc
)) {
2788 gen_helper_rdpsr(cpu_dst
, cpu_env
);
2790 CHECK_IU_FEATURE(dc
, HYPV
);
2791 if (!hypervisor(dc
))
2793 rs1
= GET_FIELD(insn
, 13, 17);
2796 // gen_op_rdhpstate();
2799 // gen_op_rdhtstate();
2802 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
2805 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
2808 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
2810 case 31: // hstick_cmpr
2811 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
2817 gen_store_gpr(dc
, rd
, cpu_dst
);
2819 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
2820 if (!supervisor(dc
)) {
2823 cpu_tmp0
= get_temp_tl(dc
);
2824 #ifdef TARGET_SPARC64
2825 rs1
= GET_FIELD(insn
, 13, 17);
2831 r_tsptr
= tcg_temp_new_ptr();
2832 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2833 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2834 offsetof(trap_state
, tpc
));
2835 tcg_temp_free_ptr(r_tsptr
);
2842 r_tsptr
= tcg_temp_new_ptr();
2843 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2844 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2845 offsetof(trap_state
, tnpc
));
2846 tcg_temp_free_ptr(r_tsptr
);
2853 r_tsptr
= tcg_temp_new_ptr();
2854 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2855 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2856 offsetof(trap_state
, tstate
));
2857 tcg_temp_free_ptr(r_tsptr
);
2862 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2864 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2865 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
2866 offsetof(trap_state
, tt
));
2867 tcg_temp_free_ptr(r_tsptr
);
2874 r_tickptr
= tcg_temp_new_ptr();
2875 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2876 offsetof(CPUSPARCState
, tick
));
2877 gen_helper_tick_get_count(cpu_tmp0
, r_tickptr
);
2878 tcg_temp_free_ptr(r_tickptr
);
2882 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
2885 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2886 offsetof(CPUSPARCState
, pstate
));
2889 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2890 offsetof(CPUSPARCState
, tl
));
2893 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2894 offsetof(CPUSPARCState
, psrpil
));
2897 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
2900 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2901 offsetof(CPUSPARCState
, cansave
));
2903 case 11: // canrestore
2904 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2905 offsetof(CPUSPARCState
, canrestore
));
2907 case 12: // cleanwin
2908 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2909 offsetof(CPUSPARCState
, cleanwin
));
2911 case 13: // otherwin
2912 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2913 offsetof(CPUSPARCState
, otherwin
));
2916 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2917 offsetof(CPUSPARCState
, wstate
));
2919 case 16: // UA2005 gl
2920 CHECK_IU_FEATURE(dc
, GL
);
2921 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2922 offsetof(CPUSPARCState
, gl
));
2924 case 26: // UA2005 strand status
2925 CHECK_IU_FEATURE(dc
, HYPV
);
2926 if (!hypervisor(dc
))
2928 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
2931 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
2938 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
2940 gen_store_gpr(dc
, rd
, cpu_tmp0
);
2942 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
2943 #ifdef TARGET_SPARC64
2945 gen_helper_flushw(cpu_env
);
2947 if (!supervisor(dc
))
2949 gen_store_gpr(dc
, rd
, cpu_tbr
);
2953 } else if (xop
== 0x34) { /* FPU Operations */
2954 if (gen_trap_ifnofpu(dc
)) {
2957 gen_op_clear_ieee_excp_and_FTT();
2958 rs1
= GET_FIELD(insn
, 13, 17);
2959 rs2
= GET_FIELD(insn
, 27, 31);
2960 xop
= GET_FIELD(insn
, 18, 26);
2963 case 0x1: /* fmovs */
2964 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
2965 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
2967 case 0x5: /* fnegs */
2968 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
2970 case 0x9: /* fabss */
2971 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
2973 case 0x29: /* fsqrts */
2974 CHECK_FPU_FEATURE(dc
, FSQRT
);
2975 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
2977 case 0x2a: /* fsqrtd */
2978 CHECK_FPU_FEATURE(dc
, FSQRT
);
2979 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
2981 case 0x2b: /* fsqrtq */
2982 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2983 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
2985 case 0x41: /* fadds */
2986 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
2988 case 0x42: /* faddd */
2989 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
2991 case 0x43: /* faddq */
2992 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2993 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
2995 case 0x45: /* fsubs */
2996 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
2998 case 0x46: /* fsubd */
2999 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3001 case 0x47: /* fsubq */
3002 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3003 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3005 case 0x49: /* fmuls */
3006 CHECK_FPU_FEATURE(dc
, FMUL
);
3007 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3009 case 0x4a: /* fmuld */
3010 CHECK_FPU_FEATURE(dc
, FMUL
);
3011 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3013 case 0x4b: /* fmulq */
3014 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3015 CHECK_FPU_FEATURE(dc
, FMUL
);
3016 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3018 case 0x4d: /* fdivs */
3019 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3021 case 0x4e: /* fdivd */
3022 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3024 case 0x4f: /* fdivq */
3025 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3026 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3028 case 0x69: /* fsmuld */
3029 CHECK_FPU_FEATURE(dc
, FSMULD
);
3030 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3032 case 0x6e: /* fdmulq */
3033 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3034 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3036 case 0xc4: /* fitos */
3037 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3039 case 0xc6: /* fdtos */
3040 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3042 case 0xc7: /* fqtos */
3043 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3044 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3046 case 0xc8: /* fitod */
3047 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3049 case 0xc9: /* fstod */
3050 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3052 case 0xcb: /* fqtod */
3053 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3054 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3056 case 0xcc: /* fitoq */
3057 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3058 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3060 case 0xcd: /* fstoq */
3061 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3062 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3064 case 0xce: /* fdtoq */
3065 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3066 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3068 case 0xd1: /* fstoi */
3069 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3071 case 0xd2: /* fdtoi */
3072 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3074 case 0xd3: /* fqtoi */
3075 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3076 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3078 #ifdef TARGET_SPARC64
3079 case 0x2: /* V9 fmovd */
3080 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3081 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3083 case 0x3: /* V9 fmovq */
3084 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3085 gen_move_Q(rd
, rs2
);
3087 case 0x6: /* V9 fnegd */
3088 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3090 case 0x7: /* V9 fnegq */
3091 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3092 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3094 case 0xa: /* V9 fabsd */
3095 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3097 case 0xb: /* V9 fabsq */
3098 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3099 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3101 case 0x81: /* V9 fstox */
3102 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3104 case 0x82: /* V9 fdtox */
3105 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3107 case 0x83: /* V9 fqtox */
3108 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3109 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3111 case 0x84: /* V9 fxtos */
3112 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3114 case 0x88: /* V9 fxtod */
3115 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3117 case 0x8c: /* V9 fxtoq */
3118 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3119 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3125 } else if (xop
== 0x35) { /* FPU Operations */
3126 #ifdef TARGET_SPARC64
3129 if (gen_trap_ifnofpu(dc
)) {
3132 gen_op_clear_ieee_excp_and_FTT();
3133 rs1
= GET_FIELD(insn
, 13, 17);
3134 rs2
= GET_FIELD(insn
, 27, 31);
3135 xop
= GET_FIELD(insn
, 18, 26);
3138 #ifdef TARGET_SPARC64
3142 cond = GET_FIELD_SP(insn, 10, 12); \
3143 cpu_src1 = get_src1(dc, insn); \
3144 gen_compare_reg(&cmp, cond, cpu_src1); \
3145 gen_fmov##sz(dc, &cmp, rd, rs2); \
3146 free_compare(&cmp); \
3149 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3152 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3155 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3156 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3163 #ifdef TARGET_SPARC64
3164 #define FMOVCC(fcc, sz) \
3167 cond = GET_FIELD_SP(insn, 14, 17); \
3168 gen_fcompare(&cmp, fcc, cond); \
3169 gen_fmov##sz(dc, &cmp, rd, rs2); \
3170 free_compare(&cmp); \
3173 case 0x001: /* V9 fmovscc %fcc0 */
3176 case 0x002: /* V9 fmovdcc %fcc0 */
3179 case 0x003: /* V9 fmovqcc %fcc0 */
3180 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3183 case 0x041: /* V9 fmovscc %fcc1 */
3186 case 0x042: /* V9 fmovdcc %fcc1 */
3189 case 0x043: /* V9 fmovqcc %fcc1 */
3190 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3193 case 0x081: /* V9 fmovscc %fcc2 */
3196 case 0x082: /* V9 fmovdcc %fcc2 */
3199 case 0x083: /* V9 fmovqcc %fcc2 */
3200 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3203 case 0x0c1: /* V9 fmovscc %fcc3 */
3206 case 0x0c2: /* V9 fmovdcc %fcc3 */
3209 case 0x0c3: /* V9 fmovqcc %fcc3 */
3210 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3214 #define FMOVCC(xcc, sz) \
3217 cond = GET_FIELD_SP(insn, 14, 17); \
3218 gen_compare(&cmp, xcc, cond, dc); \
3219 gen_fmov##sz(dc, &cmp, rd, rs2); \
3220 free_compare(&cmp); \
3223 case 0x101: /* V9 fmovscc %icc */
3226 case 0x102: /* V9 fmovdcc %icc */
3229 case 0x103: /* V9 fmovqcc %icc */
3230 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3233 case 0x181: /* V9 fmovscc %xcc */
3236 case 0x182: /* V9 fmovdcc %xcc */
3239 case 0x183: /* V9 fmovqcc %xcc */
3240 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3245 case 0x51: /* fcmps, V9 %fcc */
3246 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3247 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3248 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3250 case 0x52: /* fcmpd, V9 %fcc */
3251 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3252 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3253 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3255 case 0x53: /* fcmpq, V9 %fcc */
3256 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3257 gen_op_load_fpr_QT0(QFPREG(rs1
));
3258 gen_op_load_fpr_QT1(QFPREG(rs2
));
3259 gen_op_fcmpq(rd
& 3);
3261 case 0x55: /* fcmpes, V9 %fcc */
3262 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3263 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3264 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3266 case 0x56: /* fcmped, V9 %fcc */
3267 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3268 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3269 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3271 case 0x57: /* fcmpeq, V9 %fcc */
3272 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3273 gen_op_load_fpr_QT0(QFPREG(rs1
));
3274 gen_op_load_fpr_QT1(QFPREG(rs2
));
3275 gen_op_fcmpeq(rd
& 3);
3280 } else if (xop
== 0x2) {
3281 TCGv dst
= gen_dest_gpr(dc
, rd
);
3282 rs1
= GET_FIELD(insn
, 13, 17);
3284 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3285 if (IS_IMM
) { /* immediate */
3286 simm
= GET_FIELDs(insn
, 19, 31);
3287 tcg_gen_movi_tl(dst
, simm
);
3288 gen_store_gpr(dc
, rd
, dst
);
3289 } else { /* register */
3290 rs2
= GET_FIELD(insn
, 27, 31);
3292 tcg_gen_movi_tl(dst
, 0);
3293 gen_store_gpr(dc
, rd
, dst
);
3295 cpu_src2
= gen_load_gpr(dc
, rs2
);
3296 gen_store_gpr(dc
, rd
, cpu_src2
);
3300 cpu_src1
= get_src1(dc
, insn
);
3301 if (IS_IMM
) { /* immediate */
3302 simm
= GET_FIELDs(insn
, 19, 31);
3303 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
3304 gen_store_gpr(dc
, rd
, dst
);
3305 } else { /* register */
3306 rs2
= GET_FIELD(insn
, 27, 31);
3308 /* mov shortcut: or x, %g0, y -> mov x, y */
3309 gen_store_gpr(dc
, rd
, cpu_src1
);
3311 cpu_src2
= gen_load_gpr(dc
, rs2
);
3312 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
3313 gen_store_gpr(dc
, rd
, dst
);
3317 #ifdef TARGET_SPARC64
3318 } else if (xop
== 0x25) { /* sll, V9 sllx */
3319 cpu_src1
= get_src1(dc
, insn
);
3320 if (IS_IMM
) { /* immediate */
3321 simm
= GET_FIELDs(insn
, 20, 31);
3322 if (insn
& (1 << 12)) {
3323 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3325 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3327 } else { /* register */
3328 rs2
= GET_FIELD(insn
, 27, 31);
3329 cpu_src2
= gen_load_gpr(dc
, rs2
);
3330 cpu_tmp0
= get_temp_tl(dc
);
3331 if (insn
& (1 << 12)) {
3332 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3334 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3336 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3338 gen_store_gpr(dc
, rd
, cpu_dst
);
3339 } else if (xop
== 0x26) { /* srl, V9 srlx */
3340 cpu_src1
= get_src1(dc
, insn
);
3341 if (IS_IMM
) { /* immediate */
3342 simm
= GET_FIELDs(insn
, 20, 31);
3343 if (insn
& (1 << 12)) {
3344 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3346 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3347 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3349 } else { /* register */
3350 rs2
= GET_FIELD(insn
, 27, 31);
3351 cpu_src2
= gen_load_gpr(dc
, rs2
);
3352 cpu_tmp0
= get_temp_tl(dc
);
3353 if (insn
& (1 << 12)) {
3354 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3355 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3357 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3358 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3359 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3362 gen_store_gpr(dc
, rd
, cpu_dst
);
3363 } else if (xop
== 0x27) { /* sra, V9 srax */
3364 cpu_src1
= get_src1(dc
, insn
);
3365 if (IS_IMM
) { /* immediate */
3366 simm
= GET_FIELDs(insn
, 20, 31);
3367 if (insn
& (1 << 12)) {
3368 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3370 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3371 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3373 } else { /* register */
3374 rs2
= GET_FIELD(insn
, 27, 31);
3375 cpu_src2
= gen_load_gpr(dc
, rs2
);
3376 cpu_tmp0
= get_temp_tl(dc
);
3377 if (insn
& (1 << 12)) {
3378 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3379 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3381 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3382 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3383 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3386 gen_store_gpr(dc
, rd
, cpu_dst
);
3388 } else if (xop
< 0x36) {
3390 cpu_src1
= get_src1(dc
, insn
);
3391 cpu_src2
= get_src2(dc
, insn
);
3392 switch (xop
& ~0x10) {
3395 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3396 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3397 dc
->cc_op
= CC_OP_ADD
;
3399 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3403 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3405 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3406 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3407 dc
->cc_op
= CC_OP_LOGIC
;
3411 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3413 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3414 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3415 dc
->cc_op
= CC_OP_LOGIC
;
3419 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3421 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3422 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3423 dc
->cc_op
= CC_OP_LOGIC
;
3428 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3429 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3430 dc
->cc_op
= CC_OP_SUB
;
3432 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3435 case 0x5: /* andn */
3436 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3438 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3439 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3440 dc
->cc_op
= CC_OP_LOGIC
;
3444 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3446 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3447 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3448 dc
->cc_op
= CC_OP_LOGIC
;
3451 case 0x7: /* xorn */
3452 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3454 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3455 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3456 dc
->cc_op
= CC_OP_LOGIC
;
3459 case 0x8: /* addx, V9 addc */
3460 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3463 #ifdef TARGET_SPARC64
3464 case 0x9: /* V9 mulx */
3465 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3468 case 0xa: /* umul */
3469 CHECK_IU_FEATURE(dc
, MUL
);
3470 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3472 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3473 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3474 dc
->cc_op
= CC_OP_LOGIC
;
3477 case 0xb: /* smul */
3478 CHECK_IU_FEATURE(dc
, MUL
);
3479 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
3481 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3482 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3483 dc
->cc_op
= CC_OP_LOGIC
;
3486 case 0xc: /* subx, V9 subc */
3487 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3490 #ifdef TARGET_SPARC64
3491 case 0xd: /* V9 udivx */
3492 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3495 case 0xe: /* udiv */
3496 CHECK_IU_FEATURE(dc
, DIV
);
3498 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3500 dc
->cc_op
= CC_OP_DIV
;
3502 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
3506 case 0xf: /* sdiv */
3507 CHECK_IU_FEATURE(dc
, DIV
);
3509 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3511 dc
->cc_op
= CC_OP_DIV
;
3513 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
3520 gen_store_gpr(dc
, rd
, cpu_dst
);
3522 cpu_src1
= get_src1(dc
, insn
);
3523 cpu_src2
= get_src2(dc
, insn
);
3525 case 0x20: /* taddcc */
3526 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3527 gen_store_gpr(dc
, rd
, cpu_dst
);
3528 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
3529 dc
->cc_op
= CC_OP_TADD
;
3531 case 0x21: /* tsubcc */
3532 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3533 gen_store_gpr(dc
, rd
, cpu_dst
);
3534 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
3535 dc
->cc_op
= CC_OP_TSUB
;
3537 case 0x22: /* taddcctv */
3538 gen_helper_taddcctv(cpu_dst
, cpu_env
,
3539 cpu_src1
, cpu_src2
);
3540 gen_store_gpr(dc
, rd
, cpu_dst
);
3541 dc
->cc_op
= CC_OP_TADDTV
;
3543 case 0x23: /* tsubcctv */
3544 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
3545 cpu_src1
, cpu_src2
);
3546 gen_store_gpr(dc
, rd
, cpu_dst
);
3547 dc
->cc_op
= CC_OP_TSUBTV
;
3549 case 0x24: /* mulscc */
3551 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
3552 gen_store_gpr(dc
, rd
, cpu_dst
);
3553 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3554 dc
->cc_op
= CC_OP_ADD
;
3556 #ifndef TARGET_SPARC64
3557 case 0x25: /* sll */
3558 if (IS_IMM
) { /* immediate */
3559 simm
= GET_FIELDs(insn
, 20, 31);
3560 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3561 } else { /* register */
3562 cpu_tmp0
= get_temp_tl(dc
);
3563 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3564 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3566 gen_store_gpr(dc
, rd
, cpu_dst
);
3568 case 0x26: /* srl */
3569 if (IS_IMM
) { /* immediate */
3570 simm
= GET_FIELDs(insn
, 20, 31);
3571 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3572 } else { /* register */
3573 cpu_tmp0
= get_temp_tl(dc
);
3574 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3575 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3577 gen_store_gpr(dc
, rd
, cpu_dst
);
3579 case 0x27: /* sra */
3580 if (IS_IMM
) { /* immediate */
3581 simm
= GET_FIELDs(insn
, 20, 31);
3582 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3583 } else { /* register */
3584 cpu_tmp0
= get_temp_tl(dc
);
3585 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3586 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3588 gen_store_gpr(dc
, rd
, cpu_dst
);
3593 cpu_tmp0
= get_temp_tl(dc
);
3596 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3597 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
3599 #ifndef TARGET_SPARC64
3600 case 0x01 ... 0x0f: /* undefined in the
3604 case 0x10 ... 0x1f: /* implementation-dependent
3608 if ((rd
== 0x13) && (dc
->def
->features
&
3609 CPU_FEATURE_POWERDOWN
)) {
3610 /* LEON3 power-down */
3612 gen_helper_power_down(cpu_env
);
3616 case 0x2: /* V9 wrccr */
3617 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3618 gen_helper_wrccr(cpu_env
, cpu_tmp0
);
3619 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3620 dc
->cc_op
= CC_OP_FLAGS
;
3622 case 0x3: /* V9 wrasi */
3623 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3624 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
3625 tcg_gen_trunc_tl_i32(cpu_asi
, cpu_tmp0
);
3627 case 0x6: /* V9 wrfprs */
3628 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3629 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
3635 case 0xf: /* V9 sir, nop if user */
3636 #if !defined(CONFIG_USER_ONLY)
3637 if (supervisor(dc
)) {
3642 case 0x13: /* Graphics Status */
3643 if (gen_trap_ifnofpu(dc
)) {
3646 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
3648 case 0x14: /* Softint set */
3649 if (!supervisor(dc
))
3651 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3652 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
3654 case 0x15: /* Softint clear */
3655 if (!supervisor(dc
))
3657 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3658 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
3660 case 0x16: /* Softint write */
3661 if (!supervisor(dc
))
3663 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3664 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
3666 case 0x17: /* Tick compare */
3667 #if !defined(CONFIG_USER_ONLY)
3668 if (!supervisor(dc
))
3674 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
3676 r_tickptr
= tcg_temp_new_ptr();
3677 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3678 offsetof(CPUSPARCState
, tick
));
3679 gen_helper_tick_set_limit(r_tickptr
,
3681 tcg_temp_free_ptr(r_tickptr
);
3684 case 0x18: /* System tick */
3685 #if !defined(CONFIG_USER_ONLY)
3686 if (!supervisor(dc
))
3692 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
3694 r_tickptr
= tcg_temp_new_ptr();
3695 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3696 offsetof(CPUSPARCState
, stick
));
3697 gen_helper_tick_set_count(r_tickptr
,
3699 tcg_temp_free_ptr(r_tickptr
);
3702 case 0x19: /* System tick compare */
3703 #if !defined(CONFIG_USER_ONLY)
3704 if (!supervisor(dc
))
3710 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
3712 r_tickptr
= tcg_temp_new_ptr();
3713 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3714 offsetof(CPUSPARCState
, stick
));
3715 gen_helper_tick_set_limit(r_tickptr
,
3717 tcg_temp_free_ptr(r_tickptr
);
3721 case 0x10: /* Performance Control */
3722 case 0x11: /* Performance Instrumentation
3724 case 0x12: /* Dispatch Control */
3731 #if !defined(CONFIG_USER_ONLY)
3732 case 0x31: /* wrpsr, V9 saved, restored */
3734 if (!supervisor(dc
))
3736 #ifdef TARGET_SPARC64
3739 gen_helper_saved(cpu_env
);
3742 gen_helper_restored(cpu_env
);
3744 case 2: /* UA2005 allclean */
3745 case 3: /* UA2005 otherw */
3746 case 4: /* UA2005 normalw */
3747 case 5: /* UA2005 invalw */
3753 cpu_tmp0
= get_temp_tl(dc
);
3754 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3755 gen_helper_wrpsr(cpu_env
, cpu_tmp0
);
3756 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3757 dc
->cc_op
= CC_OP_FLAGS
;
3765 case 0x32: /* wrwim, V9 wrpr */
3767 if (!supervisor(dc
))
3769 cpu_tmp0
= get_temp_tl(dc
);
3770 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3771 #ifdef TARGET_SPARC64
3777 r_tsptr
= tcg_temp_new_ptr();
3778 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3779 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3780 offsetof(trap_state
, tpc
));
3781 tcg_temp_free_ptr(r_tsptr
);
3788 r_tsptr
= tcg_temp_new_ptr();
3789 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3790 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3791 offsetof(trap_state
, tnpc
));
3792 tcg_temp_free_ptr(r_tsptr
);
3799 r_tsptr
= tcg_temp_new_ptr();
3800 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3801 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3802 offsetof(trap_state
,
3804 tcg_temp_free_ptr(r_tsptr
);
3811 r_tsptr
= tcg_temp_new_ptr();
3812 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3813 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
3814 offsetof(trap_state
, tt
));
3815 tcg_temp_free_ptr(r_tsptr
);
3822 r_tickptr
= tcg_temp_new_ptr();
3823 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3824 offsetof(CPUSPARCState
, tick
));
3825 gen_helper_tick_set_count(r_tickptr
,
3827 tcg_temp_free_ptr(r_tickptr
);
3831 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
3835 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
3836 dc
->npc
= DYNAMIC_PC
;
3840 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3841 offsetof(CPUSPARCState
, tl
));
3842 dc
->npc
= DYNAMIC_PC
;
3845 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
3848 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
3851 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3852 offsetof(CPUSPARCState
,
3855 case 11: // canrestore
3856 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3857 offsetof(CPUSPARCState
,
3860 case 12: // cleanwin
3861 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3862 offsetof(CPUSPARCState
,
3865 case 13: // otherwin
3866 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3867 offsetof(CPUSPARCState
,
3871 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3872 offsetof(CPUSPARCState
,
3875 case 16: // UA2005 gl
3876 CHECK_IU_FEATURE(dc
, GL
);
3877 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3878 offsetof(CPUSPARCState
, gl
));
3880 case 26: // UA2005 strand status
3881 CHECK_IU_FEATURE(dc
, HYPV
);
3882 if (!hypervisor(dc
))
3884 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
3890 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
3891 if (dc
->def
->nwindows
!= 32) {
3892 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
3893 (1 << dc
->def
->nwindows
) - 1);
3898 case 0x33: /* wrtbr, UA2005 wrhpr */
3900 #ifndef TARGET_SPARC64
3901 if (!supervisor(dc
))
3903 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
3905 CHECK_IU_FEATURE(dc
, HYPV
);
3906 if (!hypervisor(dc
))
3908 cpu_tmp0
= get_temp_tl(dc
);
3909 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3912 // XXX gen_op_wrhpstate();
3919 // XXX gen_op_wrhtstate();
3922 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
3925 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
3927 case 31: // hstick_cmpr
3931 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
3932 r_tickptr
= tcg_temp_new_ptr();
3933 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3934 offsetof(CPUSPARCState
, hstick
));
3935 gen_helper_tick_set_limit(r_tickptr
,
3937 tcg_temp_free_ptr(r_tickptr
);
3940 case 6: // hver readonly
3948 #ifdef TARGET_SPARC64
3949 case 0x2c: /* V9 movcc */
3951 int cc
= GET_FIELD_SP(insn
, 11, 12);
3952 int cond
= GET_FIELD_SP(insn
, 14, 17);
3956 if (insn
& (1 << 18)) {
3958 gen_compare(&cmp
, 0, cond
, dc
);
3959 } else if (cc
== 2) {
3960 gen_compare(&cmp
, 1, cond
, dc
);
3965 gen_fcompare(&cmp
, cc
, cond
);
3968 /* The get_src2 above loaded the normal 13-bit
3969 immediate field, not the 11-bit field we have
3970 in movcc. But it did handle the reg case. */
3972 simm
= GET_FIELD_SPs(insn
, 0, 10);
3973 tcg_gen_movi_tl(cpu_src2
, simm
);
3976 dst
= gen_load_gpr(dc
, rd
);
3977 tcg_gen_movcond_tl(cmp
.cond
, dst
,
3981 gen_store_gpr(dc
, rd
, dst
);
3984 case 0x2d: /* V9 sdivx */
3985 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3986 gen_store_gpr(dc
, rd
, cpu_dst
);
3988 case 0x2e: /* V9 popc */
3989 gen_helper_popc(cpu_dst
, cpu_src2
);
3990 gen_store_gpr(dc
, rd
, cpu_dst
);
3992 case 0x2f: /* V9 movr */
3994 int cond
= GET_FIELD_SP(insn
, 10, 12);
3998 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4000 /* The get_src2 above loaded the normal 13-bit
4001 immediate field, not the 10-bit field we have
4002 in movr. But it did handle the reg case. */
4004 simm
= GET_FIELD_SPs(insn
, 0, 9);
4005 tcg_gen_movi_tl(cpu_src2
, simm
);
4008 dst
= gen_load_gpr(dc
, rd
);
4009 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4013 gen_store_gpr(dc
, rd
, dst
);
4021 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4022 #ifdef TARGET_SPARC64
4023 int opf
= GET_FIELD_SP(insn
, 5, 13);
4024 rs1
= GET_FIELD(insn
, 13, 17);
4025 rs2
= GET_FIELD(insn
, 27, 31);
4026 if (gen_trap_ifnofpu(dc
)) {
4031 case 0x000: /* VIS I edge8cc */
4032 CHECK_FPU_FEATURE(dc
, VIS1
);
4033 cpu_src1
= gen_load_gpr(dc
, rs1
);
4034 cpu_src2
= gen_load_gpr(dc
, rs2
);
4035 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4036 gen_store_gpr(dc
, rd
, cpu_dst
);
4038 case 0x001: /* VIS II edge8n */
4039 CHECK_FPU_FEATURE(dc
, VIS2
);
4040 cpu_src1
= gen_load_gpr(dc
, rs1
);
4041 cpu_src2
= gen_load_gpr(dc
, rs2
);
4042 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4043 gen_store_gpr(dc
, rd
, cpu_dst
);
4045 case 0x002: /* VIS I edge8lcc */
4046 CHECK_FPU_FEATURE(dc
, VIS1
);
4047 cpu_src1
= gen_load_gpr(dc
, rs1
);
4048 cpu_src2
= gen_load_gpr(dc
, rs2
);
4049 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4050 gen_store_gpr(dc
, rd
, cpu_dst
);
4052 case 0x003: /* VIS II edge8ln */
4053 CHECK_FPU_FEATURE(dc
, VIS2
);
4054 cpu_src1
= gen_load_gpr(dc
, rs1
);
4055 cpu_src2
= gen_load_gpr(dc
, rs2
);
4056 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4057 gen_store_gpr(dc
, rd
, cpu_dst
);
4059 case 0x004: /* VIS I edge16cc */
4060 CHECK_FPU_FEATURE(dc
, VIS1
);
4061 cpu_src1
= gen_load_gpr(dc
, rs1
);
4062 cpu_src2
= gen_load_gpr(dc
, rs2
);
4063 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4064 gen_store_gpr(dc
, rd
, cpu_dst
);
4066 case 0x005: /* VIS II edge16n */
4067 CHECK_FPU_FEATURE(dc
, VIS2
);
4068 cpu_src1
= gen_load_gpr(dc
, rs1
);
4069 cpu_src2
= gen_load_gpr(dc
, rs2
);
4070 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4071 gen_store_gpr(dc
, rd
, cpu_dst
);
4073 case 0x006: /* VIS I edge16lcc */
4074 CHECK_FPU_FEATURE(dc
, VIS1
);
4075 cpu_src1
= gen_load_gpr(dc
, rs1
);
4076 cpu_src2
= gen_load_gpr(dc
, rs2
);
4077 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4078 gen_store_gpr(dc
, rd
, cpu_dst
);
4080 case 0x007: /* VIS II edge16ln */
4081 CHECK_FPU_FEATURE(dc
, VIS2
);
4082 cpu_src1
= gen_load_gpr(dc
, rs1
);
4083 cpu_src2
= gen_load_gpr(dc
, rs2
);
4084 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4085 gen_store_gpr(dc
, rd
, cpu_dst
);
4087 case 0x008: /* VIS I edge32cc */
4088 CHECK_FPU_FEATURE(dc
, VIS1
);
4089 cpu_src1
= gen_load_gpr(dc
, rs1
);
4090 cpu_src2
= gen_load_gpr(dc
, rs2
);
4091 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4092 gen_store_gpr(dc
, rd
, cpu_dst
);
4094 case 0x009: /* VIS II edge32n */
4095 CHECK_FPU_FEATURE(dc
, VIS2
);
4096 cpu_src1
= gen_load_gpr(dc
, rs1
);
4097 cpu_src2
= gen_load_gpr(dc
, rs2
);
4098 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4099 gen_store_gpr(dc
, rd
, cpu_dst
);
4101 case 0x00a: /* VIS I edge32lcc */
4102 CHECK_FPU_FEATURE(dc
, VIS1
);
4103 cpu_src1
= gen_load_gpr(dc
, rs1
);
4104 cpu_src2
= gen_load_gpr(dc
, rs2
);
4105 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4106 gen_store_gpr(dc
, rd
, cpu_dst
);
4108 case 0x00b: /* VIS II edge32ln */
4109 CHECK_FPU_FEATURE(dc
, VIS2
);
4110 cpu_src1
= gen_load_gpr(dc
, rs1
);
4111 cpu_src2
= gen_load_gpr(dc
, rs2
);
4112 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4113 gen_store_gpr(dc
, rd
, cpu_dst
);
4115 case 0x010: /* VIS I array8 */
4116 CHECK_FPU_FEATURE(dc
, VIS1
);
4117 cpu_src1
= gen_load_gpr(dc
, rs1
);
4118 cpu_src2
= gen_load_gpr(dc
, rs2
);
4119 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4120 gen_store_gpr(dc
, rd
, cpu_dst
);
4122 case 0x012: /* VIS I array16 */
4123 CHECK_FPU_FEATURE(dc
, VIS1
);
4124 cpu_src1
= gen_load_gpr(dc
, rs1
);
4125 cpu_src2
= gen_load_gpr(dc
, rs2
);
4126 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4127 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4128 gen_store_gpr(dc
, rd
, cpu_dst
);
4130 case 0x014: /* VIS I array32 */
4131 CHECK_FPU_FEATURE(dc
, VIS1
);
4132 cpu_src1
= gen_load_gpr(dc
, rs1
);
4133 cpu_src2
= gen_load_gpr(dc
, rs2
);
4134 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4135 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4136 gen_store_gpr(dc
, rd
, cpu_dst
);
4138 case 0x018: /* VIS I alignaddr */
4139 CHECK_FPU_FEATURE(dc
, VIS1
);
4140 cpu_src1
= gen_load_gpr(dc
, rs1
);
4141 cpu_src2
= gen_load_gpr(dc
, rs2
);
4142 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4143 gen_store_gpr(dc
, rd
, cpu_dst
);
4145 case 0x01a: /* VIS I alignaddrl */
4146 CHECK_FPU_FEATURE(dc
, VIS1
);
4147 cpu_src1
= gen_load_gpr(dc
, rs1
);
4148 cpu_src2
= gen_load_gpr(dc
, rs2
);
4149 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4150 gen_store_gpr(dc
, rd
, cpu_dst
);
4152 case 0x019: /* VIS II bmask */
4153 CHECK_FPU_FEATURE(dc
, VIS2
);
4154 cpu_src1
= gen_load_gpr(dc
, rs1
);
4155 cpu_src2
= gen_load_gpr(dc
, rs2
);
4156 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4157 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4158 gen_store_gpr(dc
, rd
, cpu_dst
);
4160 case 0x020: /* VIS I fcmple16 */
4161 CHECK_FPU_FEATURE(dc
, VIS1
);
4162 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4163 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4164 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4165 gen_store_gpr(dc
, rd
, cpu_dst
);
4167 case 0x022: /* VIS I fcmpne16 */
4168 CHECK_FPU_FEATURE(dc
, VIS1
);
4169 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4170 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4171 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4172 gen_store_gpr(dc
, rd
, cpu_dst
);
4174 case 0x024: /* VIS I fcmple32 */
4175 CHECK_FPU_FEATURE(dc
, VIS1
);
4176 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4177 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4178 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4179 gen_store_gpr(dc
, rd
, cpu_dst
);
4181 case 0x026: /* VIS I fcmpne32 */
4182 CHECK_FPU_FEATURE(dc
, VIS1
);
4183 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4184 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4185 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4186 gen_store_gpr(dc
, rd
, cpu_dst
);
4188 case 0x028: /* VIS I fcmpgt16 */
4189 CHECK_FPU_FEATURE(dc
, VIS1
);
4190 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4191 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4192 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4193 gen_store_gpr(dc
, rd
, cpu_dst
);
4195 case 0x02a: /* VIS I fcmpeq16 */
4196 CHECK_FPU_FEATURE(dc
, VIS1
);
4197 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4198 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4199 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4200 gen_store_gpr(dc
, rd
, cpu_dst
);
4202 case 0x02c: /* VIS I fcmpgt32 */
4203 CHECK_FPU_FEATURE(dc
, VIS1
);
4204 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4205 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4206 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4207 gen_store_gpr(dc
, rd
, cpu_dst
);
4209 case 0x02e: /* VIS I fcmpeq32 */
4210 CHECK_FPU_FEATURE(dc
, VIS1
);
4211 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4212 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4213 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4214 gen_store_gpr(dc
, rd
, cpu_dst
);
4216 case 0x031: /* VIS I fmul8x16 */
4217 CHECK_FPU_FEATURE(dc
, VIS1
);
4218 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4220 case 0x033: /* VIS I fmul8x16au */
4221 CHECK_FPU_FEATURE(dc
, VIS1
);
4222 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4224 case 0x035: /* VIS I fmul8x16al */
4225 CHECK_FPU_FEATURE(dc
, VIS1
);
4226 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4228 case 0x036: /* VIS I fmul8sux16 */
4229 CHECK_FPU_FEATURE(dc
, VIS1
);
4230 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4232 case 0x037: /* VIS I fmul8ulx16 */
4233 CHECK_FPU_FEATURE(dc
, VIS1
);
4234 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4236 case 0x038: /* VIS I fmuld8sux16 */
4237 CHECK_FPU_FEATURE(dc
, VIS1
);
4238 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4240 case 0x039: /* VIS I fmuld8ulx16 */
4241 CHECK_FPU_FEATURE(dc
, VIS1
);
4242 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4244 case 0x03a: /* VIS I fpack32 */
4245 CHECK_FPU_FEATURE(dc
, VIS1
);
4246 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4248 case 0x03b: /* VIS I fpack16 */
4249 CHECK_FPU_FEATURE(dc
, VIS1
);
4250 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4251 cpu_dst_32
= gen_dest_fpr_F(dc
);
4252 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4253 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4255 case 0x03d: /* VIS I fpackfix */
4256 CHECK_FPU_FEATURE(dc
, VIS1
);
4257 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4258 cpu_dst_32
= gen_dest_fpr_F(dc
);
4259 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4260 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4262 case 0x03e: /* VIS I pdist */
4263 CHECK_FPU_FEATURE(dc
, VIS1
);
4264 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4266 case 0x048: /* VIS I faligndata */
4267 CHECK_FPU_FEATURE(dc
, VIS1
);
4268 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4270 case 0x04b: /* VIS I fpmerge */
4271 CHECK_FPU_FEATURE(dc
, VIS1
);
4272 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4274 case 0x04c: /* VIS II bshuffle */
4275 CHECK_FPU_FEATURE(dc
, VIS2
);
4276 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4278 case 0x04d: /* VIS I fexpand */
4279 CHECK_FPU_FEATURE(dc
, VIS1
);
4280 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4282 case 0x050: /* VIS I fpadd16 */
4283 CHECK_FPU_FEATURE(dc
, VIS1
);
4284 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4286 case 0x051: /* VIS I fpadd16s */
4287 CHECK_FPU_FEATURE(dc
, VIS1
);
4288 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4290 case 0x052: /* VIS I fpadd32 */
4291 CHECK_FPU_FEATURE(dc
, VIS1
);
4292 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4294 case 0x053: /* VIS I fpadd32s */
4295 CHECK_FPU_FEATURE(dc
, VIS1
);
4296 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4298 case 0x054: /* VIS I fpsub16 */
4299 CHECK_FPU_FEATURE(dc
, VIS1
);
4300 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4302 case 0x055: /* VIS I fpsub16s */
4303 CHECK_FPU_FEATURE(dc
, VIS1
);
4304 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4306 case 0x056: /* VIS I fpsub32 */
4307 CHECK_FPU_FEATURE(dc
, VIS1
);
4308 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4310 case 0x057: /* VIS I fpsub32s */
4311 CHECK_FPU_FEATURE(dc
, VIS1
);
4312 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4314 case 0x060: /* VIS I fzero */
4315 CHECK_FPU_FEATURE(dc
, VIS1
);
4316 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4317 tcg_gen_movi_i64(cpu_dst_64
, 0);
4318 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4320 case 0x061: /* VIS I fzeros */
4321 CHECK_FPU_FEATURE(dc
, VIS1
);
4322 cpu_dst_32
= gen_dest_fpr_F(dc
);
4323 tcg_gen_movi_i32(cpu_dst_32
, 0);
4324 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4326 case 0x062: /* VIS I fnor */
4327 CHECK_FPU_FEATURE(dc
, VIS1
);
4328 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4330 case 0x063: /* VIS I fnors */
4331 CHECK_FPU_FEATURE(dc
, VIS1
);
4332 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4334 case 0x064: /* VIS I fandnot2 */
4335 CHECK_FPU_FEATURE(dc
, VIS1
);
4336 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4338 case 0x065: /* VIS I fandnot2s */
4339 CHECK_FPU_FEATURE(dc
, VIS1
);
4340 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4342 case 0x066: /* VIS I fnot2 */
4343 CHECK_FPU_FEATURE(dc
, VIS1
);
4344 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4346 case 0x067: /* VIS I fnot2s */
4347 CHECK_FPU_FEATURE(dc
, VIS1
);
4348 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4350 case 0x068: /* VIS I fandnot1 */
4351 CHECK_FPU_FEATURE(dc
, VIS1
);
4352 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4354 case 0x069: /* VIS I fandnot1s */
4355 CHECK_FPU_FEATURE(dc
, VIS1
);
4356 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4358 case 0x06a: /* VIS I fnot1 */
4359 CHECK_FPU_FEATURE(dc
, VIS1
);
4360 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4362 case 0x06b: /* VIS I fnot1s */
4363 CHECK_FPU_FEATURE(dc
, VIS1
);
4364 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4366 case 0x06c: /* VIS I fxor */
4367 CHECK_FPU_FEATURE(dc
, VIS1
);
4368 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4370 case 0x06d: /* VIS I fxors */
4371 CHECK_FPU_FEATURE(dc
, VIS1
);
4372 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4374 case 0x06e: /* VIS I fnand */
4375 CHECK_FPU_FEATURE(dc
, VIS1
);
4376 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4378 case 0x06f: /* VIS I fnands */
4379 CHECK_FPU_FEATURE(dc
, VIS1
);
4380 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4382 case 0x070: /* VIS I fand */
4383 CHECK_FPU_FEATURE(dc
, VIS1
);
4384 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4386 case 0x071: /* VIS I fands */
4387 CHECK_FPU_FEATURE(dc
, VIS1
);
4388 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4390 case 0x072: /* VIS I fxnor */
4391 CHECK_FPU_FEATURE(dc
, VIS1
);
4392 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4394 case 0x073: /* VIS I fxnors */
4395 CHECK_FPU_FEATURE(dc
, VIS1
);
4396 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4398 case 0x074: /* VIS I fsrc1 */
4399 CHECK_FPU_FEATURE(dc
, VIS1
);
4400 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4401 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4403 case 0x075: /* VIS I fsrc1s */
4404 CHECK_FPU_FEATURE(dc
, VIS1
);
4405 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4406 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4408 case 0x076: /* VIS I fornot2 */
4409 CHECK_FPU_FEATURE(dc
, VIS1
);
4410 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4412 case 0x077: /* VIS I fornot2s */
4413 CHECK_FPU_FEATURE(dc
, VIS1
);
4414 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4416 case 0x078: /* VIS I fsrc2 */
4417 CHECK_FPU_FEATURE(dc
, VIS1
);
4418 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4419 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4421 case 0x079: /* VIS I fsrc2s */
4422 CHECK_FPU_FEATURE(dc
, VIS1
);
4423 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4424 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4426 case 0x07a: /* VIS I fornot1 */
4427 CHECK_FPU_FEATURE(dc
, VIS1
);
4428 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4430 case 0x07b: /* VIS I fornot1s */
4431 CHECK_FPU_FEATURE(dc
, VIS1
);
4432 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4434 case 0x07c: /* VIS I for */
4435 CHECK_FPU_FEATURE(dc
, VIS1
);
4436 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4438 case 0x07d: /* VIS I fors */
4439 CHECK_FPU_FEATURE(dc
, VIS1
);
4440 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4442 case 0x07e: /* VIS I fone */
4443 CHECK_FPU_FEATURE(dc
, VIS1
);
4444 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4445 tcg_gen_movi_i64(cpu_dst_64
, -1);
4446 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4448 case 0x07f: /* VIS I fones */
4449 CHECK_FPU_FEATURE(dc
, VIS1
);
4450 cpu_dst_32
= gen_dest_fpr_F(dc
);
4451 tcg_gen_movi_i32(cpu_dst_32
, -1);
4452 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4454 case 0x080: /* VIS I shutdown */
4455 case 0x081: /* VIS II siam */
4464 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
4465 #ifdef TARGET_SPARC64
4470 #ifdef TARGET_SPARC64
4471 } else if (xop
== 0x39) { /* V9 return */
4475 cpu_src1
= get_src1(dc
, insn
);
4476 cpu_tmp0
= get_temp_tl(dc
);
4477 if (IS_IMM
) { /* immediate */
4478 simm
= GET_FIELDs(insn
, 19, 31);
4479 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4480 } else { /* register */
4481 rs2
= GET_FIELD(insn
, 27, 31);
4483 cpu_src2
= gen_load_gpr(dc
, rs2
);
4484 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4486 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4489 gen_helper_restore(cpu_env
);
4491 r_const
= tcg_const_i32(3);
4492 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4493 tcg_temp_free_i32(r_const
);
4494 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4495 dc
->npc
= DYNAMIC_PC
;
4499 cpu_src1
= get_src1(dc
, insn
);
4500 cpu_tmp0
= get_temp_tl(dc
);
4501 if (IS_IMM
) { /* immediate */
4502 simm
= GET_FIELDs(insn
, 19, 31);
4503 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4504 } else { /* register */
4505 rs2
= GET_FIELD(insn
, 27, 31);
4507 cpu_src2
= gen_load_gpr(dc
, rs2
);
4508 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4510 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4514 case 0x38: /* jmpl */
4519 t
= gen_dest_gpr(dc
, rd
);
4520 tcg_gen_movi_tl(t
, dc
->pc
);
4521 gen_store_gpr(dc
, rd
, t
);
4523 r_const
= tcg_const_i32(3);
4524 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4525 tcg_temp_free_i32(r_const
);
4526 gen_address_mask(dc
, cpu_tmp0
);
4527 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4528 dc
->npc
= DYNAMIC_PC
;
4531 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4532 case 0x39: /* rett, V9 return */
4536 if (!supervisor(dc
))
4539 r_const
= tcg_const_i32(3);
4540 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4541 tcg_temp_free_i32(r_const
);
4542 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4543 dc
->npc
= DYNAMIC_PC
;
4544 gen_helper_rett(cpu_env
);
4548 case 0x3b: /* flush */
4549 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
4553 case 0x3c: /* save */
4555 gen_helper_save(cpu_env
);
4556 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4558 case 0x3d: /* restore */
4560 gen_helper_restore(cpu_env
);
4561 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4563 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4564 case 0x3e: /* V9 done/retry */
4568 if (!supervisor(dc
))
4570 dc
->npc
= DYNAMIC_PC
;
4571 dc
->pc
= DYNAMIC_PC
;
4572 gen_helper_done(cpu_env
);
4575 if (!supervisor(dc
))
4577 dc
->npc
= DYNAMIC_PC
;
4578 dc
->pc
= DYNAMIC_PC
;
4579 gen_helper_retry(cpu_env
);
4594 case 3: /* load/store instructions */
4596 unsigned int xop
= GET_FIELD(insn
, 7, 12);
4597 /* ??? gen_address_mask prevents us from using a source
4598 register directly. Always generate a temporary. */
4599 TCGv cpu_addr
= get_temp_tl(dc
);
4601 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
4602 if (xop
== 0x3c || xop
== 0x3e) {
4603 /* V9 casa/casxa : no offset */
4604 } else if (IS_IMM
) { /* immediate */
4605 simm
= GET_FIELDs(insn
, 19, 31);
4607 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
4609 } else { /* register */
4610 rs2
= GET_FIELD(insn
, 27, 31);
4612 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
4615 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
4616 (xop
> 0x17 && xop
<= 0x1d ) ||
4617 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
4618 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
4621 case 0x0: /* ld, V9 lduw, load unsigned word */
4622 gen_address_mask(dc
, cpu_addr
);
4623 tcg_gen_qemu_ld32u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4625 case 0x1: /* ldub, load unsigned byte */
4626 gen_address_mask(dc
, cpu_addr
);
4627 tcg_gen_qemu_ld8u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4629 case 0x2: /* lduh, load unsigned halfword */
4630 gen_address_mask(dc
, cpu_addr
);
4631 tcg_gen_qemu_ld16u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4633 case 0x3: /* ldd, load double word */
4641 r_const
= tcg_const_i32(7);
4642 /* XXX remove alignment check */
4643 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4644 tcg_temp_free_i32(r_const
);
4645 gen_address_mask(dc
, cpu_addr
);
4646 t64
= tcg_temp_new_i64();
4647 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4648 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4649 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4650 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
4651 tcg_gen_shri_i64(t64
, t64
, 32);
4652 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4653 tcg_temp_free_i64(t64
);
4654 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4657 case 0x9: /* ldsb, load signed byte */
4658 gen_address_mask(dc
, cpu_addr
);
4659 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4661 case 0xa: /* ldsh, load signed halfword */
4662 gen_address_mask(dc
, cpu_addr
);
4663 tcg_gen_qemu_ld16s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4665 case 0xd: /* ldstub -- XXX: should be atomically */
4669 gen_address_mask(dc
, cpu_addr
);
4670 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4671 r_const
= tcg_const_tl(0xff);
4672 tcg_gen_qemu_st8(r_const
, cpu_addr
, dc
->mem_idx
);
4673 tcg_temp_free(r_const
);
4677 /* swap, swap register with memory. Also atomically */
4679 TCGv t0
= get_temp_tl(dc
);
4680 CHECK_IU_FEATURE(dc
, SWAP
);
4681 cpu_src1
= gen_load_gpr(dc
, rd
);
4682 gen_address_mask(dc
, cpu_addr
);
4683 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4684 tcg_gen_qemu_st32(cpu_src1
, cpu_addr
, dc
->mem_idx
);
4685 tcg_gen_mov_tl(cpu_val
, t0
);
4688 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4689 case 0x10: /* lda, V9 lduwa, load word alternate */
4690 #ifndef TARGET_SPARC64
4693 if (!supervisor(dc
))
4697 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 0);
4699 case 0x11: /* lduba, load unsigned byte alternate */
4700 #ifndef TARGET_SPARC64
4703 if (!supervisor(dc
))
4707 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 0);
4709 case 0x12: /* lduha, load unsigned halfword alternate */
4710 #ifndef TARGET_SPARC64
4713 if (!supervisor(dc
))
4717 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 0);
4719 case 0x13: /* ldda, load double word alternate */
4720 #ifndef TARGET_SPARC64
4723 if (!supervisor(dc
))
4729 gen_ldda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4731 case 0x19: /* ldsba, load signed byte alternate */
4732 #ifndef TARGET_SPARC64
4735 if (!supervisor(dc
))
4739 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 1);
4741 case 0x1a: /* ldsha, load signed halfword alternate */
4742 #ifndef TARGET_SPARC64
4745 if (!supervisor(dc
))
4749 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 1);
4751 case 0x1d: /* ldstuba -- XXX: should be atomically */
4752 #ifndef TARGET_SPARC64
4755 if (!supervisor(dc
))
4759 gen_ldstub_asi(cpu_val
, cpu_addr
, insn
);
4761 case 0x1f: /* swapa, swap reg with alt. memory. Also
4763 CHECK_IU_FEATURE(dc
, SWAP
);
4764 #ifndef TARGET_SPARC64
4767 if (!supervisor(dc
))
4771 cpu_src1
= gen_load_gpr(dc
, rd
);
4772 gen_swap_asi(cpu_val
, cpu_src1
, cpu_addr
, insn
);
4775 #ifndef TARGET_SPARC64
4776 case 0x30: /* ldc */
4777 case 0x31: /* ldcsr */
4778 case 0x33: /* lddc */
4782 #ifdef TARGET_SPARC64
4783 case 0x08: /* V9 ldsw */
4784 gen_address_mask(dc
, cpu_addr
);
4785 tcg_gen_qemu_ld32s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4787 case 0x0b: /* V9 ldx */
4788 gen_address_mask(dc
, cpu_addr
);
4789 tcg_gen_qemu_ld64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4791 case 0x18: /* V9 ldswa */
4793 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 1);
4795 case 0x1b: /* V9 ldxa */
4797 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 8, 0);
4799 case 0x2d: /* V9 prefetch, no effect */
4801 case 0x30: /* V9 ldfa */
4802 if (gen_trap_ifnofpu(dc
)) {
4806 gen_ldf_asi(cpu_addr
, insn
, 4, rd
);
4807 gen_update_fprs_dirty(rd
);
4809 case 0x33: /* V9 lddfa */
4810 if (gen_trap_ifnofpu(dc
)) {
4814 gen_ldf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
4815 gen_update_fprs_dirty(DFPREG(rd
));
4817 case 0x3d: /* V9 prefetcha, no effect */
4819 case 0x32: /* V9 ldqfa */
4820 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4821 if (gen_trap_ifnofpu(dc
)) {
4825 gen_ldf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
4826 gen_update_fprs_dirty(QFPREG(rd
));
4832 gen_store_gpr(dc
, rd
, cpu_val
);
4833 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4836 } else if (xop
>= 0x20 && xop
< 0x24) {
4839 if (gen_trap_ifnofpu(dc
)) {
4844 case 0x20: /* ldf, load fpreg */
4845 gen_address_mask(dc
, cpu_addr
);
4846 t0
= get_temp_tl(dc
);
4847 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4848 cpu_dst_32
= gen_dest_fpr_F(dc
);
4849 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4850 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4852 case 0x21: /* ldfsr, V9 ldxfsr */
4853 #ifdef TARGET_SPARC64
4854 gen_address_mask(dc
, cpu_addr
);
4856 TCGv_i64 t64
= tcg_temp_new_i64();
4857 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4858 gen_helper_ldxfsr(cpu_env
, t64
);
4859 tcg_temp_free_i64(t64
);
4863 cpu_dst_32
= get_temp_i32(dc
);
4864 t0
= get_temp_tl(dc
);
4865 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4866 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4867 gen_helper_ldfsr(cpu_env
, cpu_dst_32
);
4869 case 0x22: /* ldqf, load quad fpreg */
4873 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4874 r_const
= tcg_const_i32(dc
->mem_idx
);
4875 gen_address_mask(dc
, cpu_addr
);
4876 gen_helper_ldqf(cpu_env
, cpu_addr
, r_const
);
4877 tcg_temp_free_i32(r_const
);
4878 gen_op_store_QT0_fpr(QFPREG(rd
));
4879 gen_update_fprs_dirty(QFPREG(rd
));
4882 case 0x23: /* lddf, load double fpreg */
4883 gen_address_mask(dc
, cpu_addr
);
4884 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4885 tcg_gen_qemu_ld64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
);
4886 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4891 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
4892 xop
== 0xe || xop
== 0x1e) {
4893 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
4896 case 0x4: /* st, store word */
4897 gen_address_mask(dc
, cpu_addr
);
4898 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
4900 case 0x5: /* stb, store byte */
4901 gen_address_mask(dc
, cpu_addr
);
4902 tcg_gen_qemu_st8(cpu_val
, cpu_addr
, dc
->mem_idx
);
4904 case 0x6: /* sth, store halfword */
4905 gen_address_mask(dc
, cpu_addr
);
4906 tcg_gen_qemu_st16(cpu_val
, cpu_addr
, dc
->mem_idx
);
4908 case 0x7: /* std, store double word */
4917 gen_address_mask(dc
, cpu_addr
);
4918 r_const
= tcg_const_i32(7);
4919 /* XXX remove alignment check */
4920 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4921 tcg_temp_free_i32(r_const
);
4922 lo
= gen_load_gpr(dc
, rd
+ 1);
4924 t64
= tcg_temp_new_i64();
4925 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
4926 tcg_gen_qemu_st64(t64
, cpu_addr
, dc
->mem_idx
);
4927 tcg_temp_free_i64(t64
);
4930 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4931 case 0x14: /* sta, V9 stwa, store word alternate */
4932 #ifndef TARGET_SPARC64
4935 if (!supervisor(dc
))
4939 gen_st_asi(cpu_val
, cpu_addr
, insn
, 4);
4940 dc
->npc
= DYNAMIC_PC
;
4942 case 0x15: /* stba, store byte alternate */
4943 #ifndef TARGET_SPARC64
4946 if (!supervisor(dc
))
4950 gen_st_asi(cpu_val
, cpu_addr
, insn
, 1);
4951 dc
->npc
= DYNAMIC_PC
;
4953 case 0x16: /* stha, store halfword alternate */
4954 #ifndef TARGET_SPARC64
4957 if (!supervisor(dc
))
4961 gen_st_asi(cpu_val
, cpu_addr
, insn
, 2);
4962 dc
->npc
= DYNAMIC_PC
;
4964 case 0x17: /* stda, store double word alternate */
4965 #ifndef TARGET_SPARC64
4968 if (!supervisor(dc
))
4975 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4979 #ifdef TARGET_SPARC64
4980 case 0x0e: /* V9 stx */
4981 gen_address_mask(dc
, cpu_addr
);
4982 tcg_gen_qemu_st64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4984 case 0x1e: /* V9 stxa */
4986 gen_st_asi(cpu_val
, cpu_addr
, insn
, 8);
4987 dc
->npc
= DYNAMIC_PC
;
4993 } else if (xop
> 0x23 && xop
< 0x28) {
4994 if (gen_trap_ifnofpu(dc
)) {
4999 case 0x24: /* stf, store fpreg */
5001 TCGv t
= get_temp_tl(dc
);
5002 gen_address_mask(dc
, cpu_addr
);
5003 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5004 tcg_gen_ext_i32_tl(t
, cpu_src1_32
);
5005 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5008 case 0x25: /* stfsr, V9 stxfsr */
5010 TCGv t
= get_temp_tl(dc
);
5012 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUSPARCState
, fsr
));
5013 #ifdef TARGET_SPARC64
5014 gen_address_mask(dc
, cpu_addr
);
5016 tcg_gen_qemu_st64(t
, cpu_addr
, dc
->mem_idx
);
5020 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5024 #ifdef TARGET_SPARC64
5025 /* V9 stqf, store quad fpreg */
5029 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5030 gen_op_load_fpr_QT0(QFPREG(rd
));
5031 r_const
= tcg_const_i32(dc
->mem_idx
);
5032 gen_address_mask(dc
, cpu_addr
);
5033 gen_helper_stqf(cpu_env
, cpu_addr
, r_const
);
5034 tcg_temp_free_i32(r_const
);
5037 #else /* !TARGET_SPARC64 */
5038 /* stdfq, store floating point queue */
5039 #if defined(CONFIG_USER_ONLY)
5042 if (!supervisor(dc
))
5044 if (gen_trap_ifnofpu(dc
)) {
5050 case 0x27: /* stdf, store double fpreg */
5051 gen_address_mask(dc
, cpu_addr
);
5052 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5053 tcg_gen_qemu_st64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
);
5058 } else if (xop
> 0x33 && xop
< 0x3f) {
5061 #ifdef TARGET_SPARC64
5062 case 0x34: /* V9 stfa */
5063 if (gen_trap_ifnofpu(dc
)) {
5066 gen_stf_asi(cpu_addr
, insn
, 4, rd
);
5068 case 0x36: /* V9 stqfa */
5072 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5073 if (gen_trap_ifnofpu(dc
)) {
5076 r_const
= tcg_const_i32(7);
5077 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
5078 tcg_temp_free_i32(r_const
);
5079 gen_stf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
5082 case 0x37: /* V9 stdfa */
5083 if (gen_trap_ifnofpu(dc
)) {
5086 gen_stf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
5088 case 0x3e: /* V9 casxa */
5089 rs2
= GET_FIELD(insn
, 27, 31);
5090 cpu_src2
= gen_load_gpr(dc
, rs2
);
5091 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5094 case 0x34: /* stc */
5095 case 0x35: /* stcsr */
5096 case 0x36: /* stdcq */
5097 case 0x37: /* stdc */
5100 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5101 case 0x3c: /* V9 or LEON3 casa */
5102 #ifndef TARGET_SPARC64
5103 CHECK_IU_FEATURE(dc
, CASA
);
5107 if (!supervisor(dc
)) {
5111 rs2
= GET_FIELD(insn
, 27, 31);
5112 cpu_src2
= gen_load_gpr(dc
, rs2
);
5113 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5125 /* default case for non jump instructions */
5126 if (dc
->npc
== DYNAMIC_PC
) {
5127 dc
->pc
= DYNAMIC_PC
;
5129 } else if (dc
->npc
== JUMP_PC
) {
5130 /* we can do a static jump */
5131 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5135 dc
->npc
= dc
->npc
+ 4;
5144 r_const
= tcg_const_i32(TT_ILL_INSN
);
5145 gen_helper_raise_exception(cpu_env
, r_const
);
5146 tcg_temp_free_i32(r_const
);
5155 r_const
= tcg_const_i32(TT_UNIMP_FLUSH
);
5156 gen_helper_raise_exception(cpu_env
, r_const
);
5157 tcg_temp_free_i32(r_const
);
5161 #if !defined(CONFIG_USER_ONLY)
5167 r_const
= tcg_const_i32(TT_PRIV_INSN
);
5168 gen_helper_raise_exception(cpu_env
, r_const
);
5169 tcg_temp_free_i32(r_const
);
5176 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP
);
5179 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5182 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR
);
5186 #ifndef TARGET_SPARC64
5192 r_const
= tcg_const_i32(TT_NCP_INSN
);
5193 gen_helper_raise_exception(cpu_env
, r_const
);
5194 tcg_temp_free(r_const
);
5200 if (dc
->n_t32
!= 0) {
5202 for (i
= dc
->n_t32
- 1; i
>= 0; --i
) {
5203 tcg_temp_free_i32(dc
->t32
[i
]);
5207 if (dc
->n_ttl
!= 0) {
5209 for (i
= dc
->n_ttl
- 1; i
>= 0; --i
) {
5210 tcg_temp_free(dc
->ttl
[i
]);
5216 static inline void gen_intermediate_code_internal(SPARCCPU
*cpu
,
5217 TranslationBlock
*tb
,
5220 CPUState
*cs
= CPU(cpu
);
5221 CPUSPARCState
*env
= &cpu
->env
;
5222 target_ulong pc_start
, last_pc
;
5223 DisasContext dc1
, *dc
= &dc1
;
5230 memset(dc
, 0, sizeof(DisasContext
));
5235 dc
->npc
= (target_ulong
) tb
->cs_base
;
5236 dc
->cc_op
= CC_OP_DYNAMIC
;
5237 dc
->mem_idx
= cpu_mmu_index(env
, false);
5239 dc
->fpu_enabled
= tb_fpu_enabled(tb
->flags
);
5240 dc
->address_mask_32bit
= tb_am_enabled(tb
->flags
);
5241 dc
->singlestep
= (cs
->singlestep_enabled
|| singlestep
);
5244 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5246 max_insns
= CF_COUNT_MASK
;
5249 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
5250 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
5251 if (bp
->pc
== dc
->pc
) {
5252 if (dc
->pc
!= pc_start
)
5254 gen_helper_debug(cpu_env
);
5262 qemu_log("Search PC...\n");
5263 j
= tcg_op_buf_count();
5267 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5268 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
5269 gen_opc_npc
[lj
] = dc
->npc
;
5270 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
5271 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
5274 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
5277 insn
= cpu_ldl_code(env
, dc
->pc
);
5279 disas_sparc_insn(dc
, insn
);
5284 /* if the next PC is different, we abort now */
5285 if (dc
->pc
!= (last_pc
+ 4))
5287 /* if we reach a page boundary, we stop generation so that the
5288 PC of a TT_TFAULT exception is always in the right page */
5289 if ((dc
->pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
5291 /* if single step mode, we generate only one instruction and
5292 generate an exception */
5293 if (dc
->singlestep
) {
5296 } while (!tcg_op_buf_full() &&
5297 (dc
->pc
- pc_start
) < (TARGET_PAGE_SIZE
- 32) &&
5298 num_insns
< max_insns
);
5301 if (tb
->cflags
& CF_LAST_IO
) {
5305 if (dc
->pc
!= DYNAMIC_PC
&&
5306 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
5307 /* static PC and NPC: we can use direct chaining */
5308 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5310 if (dc
->pc
!= DYNAMIC_PC
) {
5311 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5317 gen_tb_end(tb
, num_insns
);
5320 j
= tcg_op_buf_count();
5323 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5327 gen_opc_jump_pc
[0] = dc
->jump_pc
[0];
5328 gen_opc_jump_pc
[1] = dc
->jump_pc
[1];
5330 tb
->size
= last_pc
+ 4 - pc_start
;
5331 tb
->icount
= num_insns
;
5334 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5335 qemu_log("--------------\n");
5336 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5337 log_target_disas(cs
, pc_start
, last_pc
+ 4 - pc_start
, 0);
5343 void gen_intermediate_code(CPUSPARCState
* env
, TranslationBlock
* tb
)
5345 gen_intermediate_code_internal(sparc_env_get_cpu(env
), tb
, false);
5348 void gen_intermediate_code_pc(CPUSPARCState
* env
, TranslationBlock
* tb
)
5350 gen_intermediate_code_internal(sparc_env_get_cpu(env
), tb
, true);
5353 void gen_intermediate_code_init(CPUSPARCState
*env
)
5357 static const char * const gregnames
[8] = {
5358 NULL
, // g0 not used
5367 static const char * const fregnames
[32] = {
5368 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5369 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5370 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5371 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5374 /* init various static tables */
5378 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
5379 cpu_regwptr
= tcg_global_mem_new_ptr(TCG_AREG0
,
5380 offsetof(CPUSPARCState
, regwptr
),
5382 #ifdef TARGET_SPARC64
5383 cpu_xcc
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, xcc
),
5385 cpu_asi
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, asi
),
5387 cpu_fprs
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, fprs
),
5389 cpu_gsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, gsr
),
5391 cpu_tick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5392 offsetof(CPUSPARCState
, tick_cmpr
),
5394 cpu_stick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5395 offsetof(CPUSPARCState
, stick_cmpr
),
5397 cpu_hstick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5398 offsetof(CPUSPARCState
, hstick_cmpr
),
5400 cpu_hintp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hintp
),
5402 cpu_htba
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, htba
),
5404 cpu_hver
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hver
),
5406 cpu_ssr
= tcg_global_mem_new(TCG_AREG0
,
5407 offsetof(CPUSPARCState
, ssr
), "ssr");
5408 cpu_ver
= tcg_global_mem_new(TCG_AREG0
,
5409 offsetof(CPUSPARCState
, version
), "ver");
5410 cpu_softint
= tcg_global_mem_new_i32(TCG_AREG0
,
5411 offsetof(CPUSPARCState
, softint
),
5414 cpu_wim
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, wim
),
5417 cpu_cond
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cond
),
5419 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_src
),
5421 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
,
5422 offsetof(CPUSPARCState
, cc_src2
),
5424 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_dst
),
5426 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, cc_op
),
5428 cpu_psr
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, psr
),
5430 cpu_fsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, fsr
),
5432 cpu_pc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, pc
),
5434 cpu_npc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, npc
),
5436 cpu_y
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, y
), "y");
5437 #ifndef CONFIG_USER_ONLY
5438 cpu_tbr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, tbr
),
5441 for (i
= 1; i
< 8; i
++) {
5442 cpu_gregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
5443 offsetof(CPUSPARCState
, gregs
[i
]),
5446 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5447 cpu_fpr
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
5448 offsetof(CPUSPARCState
, fpr
[i
]),
5454 void restore_state_to_opc(CPUSPARCState
*env
, TranslationBlock
*tb
, int pc_pos
)
5457 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
5458 npc
= gen_opc_npc
[pc_pos
];
5460 /* dynamic NPC: already stored */
5461 } else if (npc
== 2) {
5462 /* jump PC: use 'cond' and the jump targets of the translation */
5464 env
->npc
= gen_opc_jump_pc
[0];
5466 env
->npc
= gen_opc_jump_pc
[1];