4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
41 /* global register indexes */
42 static TCGv_ptr cpu_env
, cpu_regwptr
;
43 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
44 static TCGv_i32 cpu_cc_op
;
45 static TCGv_i32 cpu_psr
;
46 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
, cpu_gregs
[8];
48 #ifndef CONFIG_USER_ONLY
53 static TCGv_i32 cpu_xcc
, cpu_asi
, cpu_fprs
;
55 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
56 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
57 static TCGv_i32 cpu_softint
;
61 /* Floating point registers */
62 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
64 #include "exec/gen-icount.h"
66 typedef struct DisasContext
{
67 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
68 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
69 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
73 int address_mask_32bit
;
75 uint32_t cc_op
; /* current CC operation */
76 struct TranslationBlock
*tb
;
91 // This function uses non-native bit order
92 #define GET_FIELD(X, FROM, TO) \
93 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
95 // This function uses the order in the manuals, i.e. bit 0 is 2^0
96 #define GET_FIELD_SP(X, FROM, TO) \
97 GET_FIELD(X, 31 - (TO), 31 - (FROM))
99 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
100 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
102 #ifdef TARGET_SPARC64
103 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
104 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
106 #define DFPREG(r) (r & 0x1e)
107 #define QFPREG(r) (r & 0x1c)
110 #define UA2005_HTRAP_MASK 0xff
111 #define V8_TRAP_MASK 0x7f
113 static int sign_extend(int x
, int len
)
116 return (x
<< len
) >> len
;
119 #define IS_IMM (insn & (1<<13))
121 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
124 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
125 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
129 static inline TCGv
get_temp_tl(DisasContext
*dc
)
132 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
133 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
137 static inline void gen_update_fprs_dirty(int rd
)
139 #if defined(TARGET_SPARC64)
140 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, (rd
< 32) ? 1 : 2);
144 /* floating point registers moves */
145 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
147 #if TCG_TARGET_REG_BITS == 32
149 return TCGV_LOW(cpu_fpr
[src
/ 2]);
151 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
155 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr
[src
/ 2]));
157 TCGv_i32 ret
= get_temp_i32(dc
);
158 TCGv_i64 t
= tcg_temp_new_i64();
160 tcg_gen_shri_i64(t
, cpu_fpr
[src
/ 2], 32);
161 tcg_gen_extrl_i64_i32(ret
, t
);
162 tcg_temp_free_i64(t
);
169 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
171 #if TCG_TARGET_REG_BITS == 32
173 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
175 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
178 TCGv_i64 t
= MAKE_TCGV_I64(GET_TCGV_I32(v
));
179 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
180 (dst
& 1 ? 0 : 32), 32);
182 gen_update_fprs_dirty(dst
);
185 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
187 return get_temp_i32(dc
);
190 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
193 return cpu_fpr
[src
/ 2];
196 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
199 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
200 gen_update_fprs_dirty(dst
);
203 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
205 return cpu_fpr
[DFPREG(dst
) / 2];
208 static void gen_op_load_fpr_QT0(unsigned int src
)
210 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
211 offsetof(CPU_QuadU
, ll
.upper
));
212 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
213 offsetof(CPU_QuadU
, ll
.lower
));
216 static void gen_op_load_fpr_QT1(unsigned int src
)
218 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
219 offsetof(CPU_QuadU
, ll
.upper
));
220 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
221 offsetof(CPU_QuadU
, ll
.lower
));
224 static void gen_op_store_QT0_fpr(unsigned int dst
)
226 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
227 offsetof(CPU_QuadU
, ll
.upper
));
228 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
229 offsetof(CPU_QuadU
, ll
.lower
));
232 #ifdef TARGET_SPARC64
233 static void gen_move_Q(unsigned int rd
, unsigned int rs
)
238 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
239 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
240 gen_update_fprs_dirty(rd
);
245 #ifdef CONFIG_USER_ONLY
246 #define supervisor(dc) 0
247 #ifdef TARGET_SPARC64
248 #define hypervisor(dc) 0
251 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
252 #ifdef TARGET_SPARC64
253 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
258 #ifdef TARGET_SPARC64
260 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
262 #define AM_CHECK(dc) (1)
266 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
268 #ifdef TARGET_SPARC64
270 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
274 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
276 if (reg
== 0 || reg
>= 8) {
277 TCGv t
= get_temp_tl(dc
);
279 tcg_gen_movi_tl(t
, 0);
281 tcg_gen_ld_tl(t
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
285 return cpu_gregs
[reg
];
289 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
293 tcg_gen_mov_tl(cpu_gregs
[reg
], v
);
295 tcg_gen_st_tl(v
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
300 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
302 if (reg
== 0 || reg
>= 8) {
303 return get_temp_tl(dc
);
305 return cpu_gregs
[reg
];
309 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
,
310 target_ulong pc
, target_ulong npc
)
312 TranslationBlock
*tb
;
315 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
316 (npc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
318 /* jump to same page: we can use a direct jump */
319 tcg_gen_goto_tb(tb_num
);
320 tcg_gen_movi_tl(cpu_pc
, pc
);
321 tcg_gen_movi_tl(cpu_npc
, npc
);
322 tcg_gen_exit_tb((uintptr_t)tb
+ tb_num
);
324 /* jump to another page: currently not optimized */
325 tcg_gen_movi_tl(cpu_pc
, pc
);
326 tcg_gen_movi_tl(cpu_npc
, npc
);
332 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
334 tcg_gen_extu_i32_tl(reg
, src
);
335 tcg_gen_shri_tl(reg
, reg
, PSR_NEG_SHIFT
);
336 tcg_gen_andi_tl(reg
, reg
, 0x1);
339 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
341 tcg_gen_extu_i32_tl(reg
, src
);
342 tcg_gen_shri_tl(reg
, reg
, PSR_ZERO_SHIFT
);
343 tcg_gen_andi_tl(reg
, reg
, 0x1);
346 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
348 tcg_gen_extu_i32_tl(reg
, src
);
349 tcg_gen_shri_tl(reg
, reg
, PSR_OVF_SHIFT
);
350 tcg_gen_andi_tl(reg
, reg
, 0x1);
353 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
355 tcg_gen_extu_i32_tl(reg
, src
);
356 tcg_gen_shri_tl(reg
, reg
, PSR_CARRY_SHIFT
);
357 tcg_gen_andi_tl(reg
, reg
, 0x1);
360 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
362 tcg_gen_mov_tl(cpu_cc_src
, src1
);
363 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
364 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
365 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
368 static TCGv_i32
gen_add32_carry32(void)
370 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
372 /* Carry is computed from a previous add: (dst < src) */
373 #if TARGET_LONG_BITS == 64
374 cc_src1_32
= tcg_temp_new_i32();
375 cc_src2_32
= tcg_temp_new_i32();
376 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
377 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
379 cc_src1_32
= cpu_cc_dst
;
380 cc_src2_32
= cpu_cc_src
;
383 carry_32
= tcg_temp_new_i32();
384 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
386 #if TARGET_LONG_BITS == 64
387 tcg_temp_free_i32(cc_src1_32
);
388 tcg_temp_free_i32(cc_src2_32
);
394 static TCGv_i32
gen_sub32_carry32(void)
396 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
398 /* Carry is computed from a previous borrow: (src1 < src2) */
399 #if TARGET_LONG_BITS == 64
400 cc_src1_32
= tcg_temp_new_i32();
401 cc_src2_32
= tcg_temp_new_i32();
402 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
403 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
405 cc_src1_32
= cpu_cc_src
;
406 cc_src2_32
= cpu_cc_src2
;
409 carry_32
= tcg_temp_new_i32();
410 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
412 #if TARGET_LONG_BITS == 64
413 tcg_temp_free_i32(cc_src1_32
);
414 tcg_temp_free_i32(cc_src2_32
);
420 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
421 TCGv src2
, int update_cc
)
429 /* Carry is known to be zero. Fall back to plain ADD. */
431 gen_op_add_cc(dst
, src1
, src2
);
433 tcg_gen_add_tl(dst
, src1
, src2
);
440 if (TARGET_LONG_BITS
== 32) {
441 /* We can re-use the host's hardware carry generation by using
442 an ADD2 opcode. We discard the low part of the output.
443 Ideally we'd combine this operation with the add that
444 generated the carry in the first place. */
445 carry
= tcg_temp_new();
446 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
447 tcg_temp_free(carry
);
450 carry_32
= gen_add32_carry32();
456 carry_32
= gen_sub32_carry32();
460 /* We need external help to produce the carry. */
461 carry_32
= tcg_temp_new_i32();
462 gen_helper_compute_C_icc(carry_32
, cpu_env
);
466 #if TARGET_LONG_BITS == 64
467 carry
= tcg_temp_new();
468 tcg_gen_extu_i32_i64(carry
, carry_32
);
473 tcg_gen_add_tl(dst
, src1
, src2
);
474 tcg_gen_add_tl(dst
, dst
, carry
);
476 tcg_temp_free_i32(carry_32
);
477 #if TARGET_LONG_BITS == 64
478 tcg_temp_free(carry
);
483 tcg_gen_mov_tl(cpu_cc_src
, src1
);
484 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
485 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
486 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
487 dc
->cc_op
= CC_OP_ADDX
;
491 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
493 tcg_gen_mov_tl(cpu_cc_src
, src1
);
494 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
495 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
496 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
499 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
500 TCGv src2
, int update_cc
)
508 /* Carry is known to be zero. Fall back to plain SUB. */
510 gen_op_sub_cc(dst
, src1
, src2
);
512 tcg_gen_sub_tl(dst
, src1
, src2
);
519 carry_32
= gen_add32_carry32();
525 if (TARGET_LONG_BITS
== 32) {
526 /* We can re-use the host's hardware carry generation by using
527 a SUB2 opcode. We discard the low part of the output.
528 Ideally we'd combine this operation with the add that
529 generated the carry in the first place. */
530 carry
= tcg_temp_new();
531 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
532 tcg_temp_free(carry
);
535 carry_32
= gen_sub32_carry32();
539 /* We need external help to produce the carry. */
540 carry_32
= tcg_temp_new_i32();
541 gen_helper_compute_C_icc(carry_32
, cpu_env
);
545 #if TARGET_LONG_BITS == 64
546 carry
= tcg_temp_new();
547 tcg_gen_extu_i32_i64(carry
, carry_32
);
552 tcg_gen_sub_tl(dst
, src1
, src2
);
553 tcg_gen_sub_tl(dst
, dst
, carry
);
555 tcg_temp_free_i32(carry_32
);
556 #if TARGET_LONG_BITS == 64
557 tcg_temp_free(carry
);
562 tcg_gen_mov_tl(cpu_cc_src
, src1
);
563 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
564 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
565 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
566 dc
->cc_op
= CC_OP_SUBX
;
570 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
572 TCGv r_temp
, zero
, t0
;
574 r_temp
= tcg_temp_new();
581 zero
= tcg_const_tl(0);
582 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
583 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
584 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
585 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
590 // env->y = (b2 << 31) | (env->y >> 1);
591 tcg_gen_andi_tl(r_temp
, cpu_cc_src
, 0x1);
592 tcg_gen_shli_tl(r_temp
, r_temp
, 31);
593 tcg_gen_shri_tl(t0
, cpu_y
, 1);
594 tcg_gen_andi_tl(t0
, t0
, 0x7fffffff);
595 tcg_gen_or_tl(t0
, t0
, r_temp
);
596 tcg_gen_andi_tl(cpu_y
, t0
, 0xffffffff);
599 gen_mov_reg_N(t0
, cpu_psr
);
600 gen_mov_reg_V(r_temp
, cpu_psr
);
601 tcg_gen_xor_tl(t0
, t0
, r_temp
);
602 tcg_temp_free(r_temp
);
604 // T0 = (b1 << 31) | (T0 >> 1);
606 tcg_gen_shli_tl(t0
, t0
, 31);
607 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
608 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
611 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
613 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
616 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
618 #if TARGET_LONG_BITS == 32
620 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
622 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
625 TCGv t0
= tcg_temp_new_i64();
626 TCGv t1
= tcg_temp_new_i64();
629 tcg_gen_ext32s_i64(t0
, src1
);
630 tcg_gen_ext32s_i64(t1
, src2
);
632 tcg_gen_ext32u_i64(t0
, src1
);
633 tcg_gen_ext32u_i64(t1
, src2
);
636 tcg_gen_mul_i64(dst
, t0
, t1
);
640 tcg_gen_shri_i64(cpu_y
, dst
, 32);
644 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
646 /* zero-extend truncated operands before multiplication */
647 gen_op_multiply(dst
, src1
, src2
, 0);
650 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
652 /* sign-extend truncated operands before multiplication */
653 gen_op_multiply(dst
, src1
, src2
, 1);
657 static inline void gen_op_eval_ba(TCGv dst
)
659 tcg_gen_movi_tl(dst
, 1);
663 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
665 gen_mov_reg_Z(dst
, src
);
669 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
671 TCGv t0
= tcg_temp_new();
672 gen_mov_reg_N(t0
, src
);
673 gen_mov_reg_V(dst
, src
);
674 tcg_gen_xor_tl(dst
, dst
, t0
);
675 gen_mov_reg_Z(t0
, src
);
676 tcg_gen_or_tl(dst
, dst
, t0
);
681 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
683 TCGv t0
= tcg_temp_new();
684 gen_mov_reg_V(t0
, src
);
685 gen_mov_reg_N(dst
, src
);
686 tcg_gen_xor_tl(dst
, dst
, t0
);
691 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
693 TCGv t0
= tcg_temp_new();
694 gen_mov_reg_Z(t0
, src
);
695 gen_mov_reg_C(dst
, src
);
696 tcg_gen_or_tl(dst
, dst
, t0
);
701 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
703 gen_mov_reg_C(dst
, src
);
707 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
709 gen_mov_reg_V(dst
, src
);
713 static inline void gen_op_eval_bn(TCGv dst
)
715 tcg_gen_movi_tl(dst
, 0);
719 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
721 gen_mov_reg_N(dst
, src
);
725 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
727 gen_mov_reg_Z(dst
, src
);
728 tcg_gen_xori_tl(dst
, dst
, 0x1);
732 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
734 gen_op_eval_ble(dst
, src
);
735 tcg_gen_xori_tl(dst
, dst
, 0x1);
739 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
741 gen_op_eval_bl(dst
, src
);
742 tcg_gen_xori_tl(dst
, dst
, 0x1);
746 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
748 gen_op_eval_bleu(dst
, src
);
749 tcg_gen_xori_tl(dst
, dst
, 0x1);
753 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
755 gen_mov_reg_C(dst
, src
);
756 tcg_gen_xori_tl(dst
, dst
, 0x1);
760 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
762 gen_mov_reg_N(dst
, src
);
763 tcg_gen_xori_tl(dst
, dst
, 0x1);
767 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
769 gen_mov_reg_V(dst
, src
);
770 tcg_gen_xori_tl(dst
, dst
, 0x1);
774 FPSR bit field FCC1 | FCC0:
780 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
781 unsigned int fcc_offset
)
783 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
784 tcg_gen_andi_tl(reg
, reg
, 0x1);
787 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
788 unsigned int fcc_offset
)
790 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
791 tcg_gen_andi_tl(reg
, reg
, 0x1);
795 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
796 unsigned int fcc_offset
)
798 TCGv t0
= tcg_temp_new();
799 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
800 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
801 tcg_gen_or_tl(dst
, dst
, t0
);
805 // 1 or 2: FCC0 ^ FCC1
806 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
807 unsigned int fcc_offset
)
809 TCGv t0
= tcg_temp_new();
810 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
811 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
812 tcg_gen_xor_tl(dst
, dst
, t0
);
817 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
818 unsigned int fcc_offset
)
820 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
824 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
825 unsigned int fcc_offset
)
827 TCGv t0
= tcg_temp_new();
828 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
829 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
830 tcg_gen_andc_tl(dst
, dst
, t0
);
835 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
836 unsigned int fcc_offset
)
838 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
842 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
843 unsigned int fcc_offset
)
845 TCGv t0
= tcg_temp_new();
846 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
847 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
848 tcg_gen_andc_tl(dst
, t0
, dst
);
853 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
854 unsigned int fcc_offset
)
856 TCGv t0
= tcg_temp_new();
857 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
858 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
859 tcg_gen_and_tl(dst
, dst
, t0
);
864 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
865 unsigned int fcc_offset
)
867 TCGv t0
= tcg_temp_new();
868 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
869 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
870 tcg_gen_or_tl(dst
, dst
, t0
);
871 tcg_gen_xori_tl(dst
, dst
, 0x1);
875 // 0 or 3: !(FCC0 ^ FCC1)
876 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
877 unsigned int fcc_offset
)
879 TCGv t0
= tcg_temp_new();
880 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
881 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
882 tcg_gen_xor_tl(dst
, dst
, t0
);
883 tcg_gen_xori_tl(dst
, dst
, 0x1);
888 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
889 unsigned int fcc_offset
)
891 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
892 tcg_gen_xori_tl(dst
, dst
, 0x1);
895 // !1: !(FCC0 & !FCC1)
896 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
897 unsigned int fcc_offset
)
899 TCGv t0
= tcg_temp_new();
900 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
901 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
902 tcg_gen_andc_tl(dst
, dst
, t0
);
903 tcg_gen_xori_tl(dst
, dst
, 0x1);
908 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
909 unsigned int fcc_offset
)
911 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
912 tcg_gen_xori_tl(dst
, dst
, 0x1);
915 // !2: !(!FCC0 & FCC1)
916 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
917 unsigned int fcc_offset
)
919 TCGv t0
= tcg_temp_new();
920 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
921 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
922 tcg_gen_andc_tl(dst
, t0
, dst
);
923 tcg_gen_xori_tl(dst
, dst
, 0x1);
927 // !3: !(FCC0 & FCC1)
928 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
929 unsigned int fcc_offset
)
931 TCGv t0
= tcg_temp_new();
932 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
933 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
934 tcg_gen_and_tl(dst
, dst
, t0
);
935 tcg_gen_xori_tl(dst
, dst
, 0x1);
939 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
940 target_ulong pc2
, TCGv r_cond
)
942 TCGLabel
*l1
= gen_new_label();
944 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
946 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
949 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
952 static void gen_branch_a(DisasContext
*dc
, target_ulong pc1
)
954 TCGLabel
*l1
= gen_new_label();
955 target_ulong npc
= dc
->npc
;
957 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cond
, 0, l1
);
959 gen_goto_tb(dc
, 0, npc
, pc1
);
962 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
967 static void gen_branch_n(DisasContext
*dc
, target_ulong pc1
)
969 target_ulong npc
= dc
->npc
;
971 if (likely(npc
!= DYNAMIC_PC
)) {
973 dc
->jump_pc
[0] = pc1
;
974 dc
->jump_pc
[1] = npc
+ 4;
979 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
981 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
982 t
= tcg_const_tl(pc1
);
984 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, z
, t
, cpu_npc
);
992 static inline void gen_generic_branch(DisasContext
*dc
)
994 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
995 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
996 TCGv zero
= tcg_const_tl(0);
998 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1000 tcg_temp_free(npc0
);
1001 tcg_temp_free(npc1
);
1002 tcg_temp_free(zero
);
1005 /* call this function before using the condition register as it may
1006 have been set for a jump */
1007 static inline void flush_cond(DisasContext
*dc
)
1009 if (dc
->npc
== JUMP_PC
) {
1010 gen_generic_branch(dc
);
1011 dc
->npc
= DYNAMIC_PC
;
1015 static inline void save_npc(DisasContext
*dc
)
1017 if (dc
->npc
== JUMP_PC
) {
1018 gen_generic_branch(dc
);
1019 dc
->npc
= DYNAMIC_PC
;
1020 } else if (dc
->npc
!= DYNAMIC_PC
) {
1021 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1025 static inline void update_psr(DisasContext
*dc
)
1027 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1028 dc
->cc_op
= CC_OP_FLAGS
;
1029 gen_helper_compute_psr(cpu_env
);
1033 static inline void save_state(DisasContext
*dc
)
1035 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1039 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1041 if (dc
->npc
== JUMP_PC
) {
1042 gen_generic_branch(dc
);
1043 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1044 dc
->pc
= DYNAMIC_PC
;
1045 } else if (dc
->npc
== DYNAMIC_PC
) {
1046 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1047 dc
->pc
= DYNAMIC_PC
;
1053 static inline void gen_op_next_insn(void)
1055 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1056 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1059 static void free_compare(DisasCompare
*cmp
)
1062 tcg_temp_free(cmp
->c1
);
1065 tcg_temp_free(cmp
->c2
);
1069 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1072 static int subcc_cond
[16] = {
1088 -1, /* no overflow */
1091 static int logic_cond
[16] = {
1093 TCG_COND_EQ
, /* eq: Z */
1094 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1095 TCG_COND_LT
, /* lt: N ^ V -> N */
1096 TCG_COND_EQ
, /* leu: C | Z -> Z */
1097 TCG_COND_NEVER
, /* ltu: C -> 0 */
1098 TCG_COND_LT
, /* neg: N */
1099 TCG_COND_NEVER
, /* vs: V -> 0 */
1101 TCG_COND_NE
, /* ne: !Z */
1102 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1103 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1104 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1105 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1106 TCG_COND_GE
, /* pos: !N */
1107 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1113 #ifdef TARGET_SPARC64
1123 switch (dc
->cc_op
) {
1125 cmp
->cond
= logic_cond
[cond
];
1127 cmp
->is_bool
= false;
1129 cmp
->c2
= tcg_const_tl(0);
1130 #ifdef TARGET_SPARC64
1133 cmp
->c1
= tcg_temp_new();
1134 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1139 cmp
->c1
= cpu_cc_dst
;
1146 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1147 goto do_compare_dst_0
;
1149 case 7: /* overflow */
1150 case 15: /* !overflow */
1154 cmp
->cond
= subcc_cond
[cond
];
1155 cmp
->is_bool
= false;
1156 #ifdef TARGET_SPARC64
1158 /* Note that sign-extension works for unsigned compares as
1159 long as both operands are sign-extended. */
1160 cmp
->g1
= cmp
->g2
= false;
1161 cmp
->c1
= tcg_temp_new();
1162 cmp
->c2
= tcg_temp_new();
1163 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1164 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1168 cmp
->g1
= cmp
->g2
= true;
1169 cmp
->c1
= cpu_cc_src
;
1170 cmp
->c2
= cpu_cc_src2
;
1177 gen_helper_compute_psr(cpu_env
);
1178 dc
->cc_op
= CC_OP_FLAGS
;
1182 /* We're going to generate a boolean result. */
1183 cmp
->cond
= TCG_COND_NE
;
1184 cmp
->is_bool
= true;
1185 cmp
->g1
= cmp
->g2
= false;
1186 cmp
->c1
= r_dst
= tcg_temp_new();
1187 cmp
->c2
= tcg_const_tl(0);
1191 gen_op_eval_bn(r_dst
);
1194 gen_op_eval_be(r_dst
, r_src
);
1197 gen_op_eval_ble(r_dst
, r_src
);
1200 gen_op_eval_bl(r_dst
, r_src
);
1203 gen_op_eval_bleu(r_dst
, r_src
);
1206 gen_op_eval_bcs(r_dst
, r_src
);
1209 gen_op_eval_bneg(r_dst
, r_src
);
1212 gen_op_eval_bvs(r_dst
, r_src
);
1215 gen_op_eval_ba(r_dst
);
1218 gen_op_eval_bne(r_dst
, r_src
);
1221 gen_op_eval_bg(r_dst
, r_src
);
1224 gen_op_eval_bge(r_dst
, r_src
);
1227 gen_op_eval_bgu(r_dst
, r_src
);
1230 gen_op_eval_bcc(r_dst
, r_src
);
1233 gen_op_eval_bpos(r_dst
, r_src
);
1236 gen_op_eval_bvc(r_dst
, r_src
);
1243 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1245 unsigned int offset
;
1248 /* For now we still generate a straight boolean result. */
1249 cmp
->cond
= TCG_COND_NE
;
1250 cmp
->is_bool
= true;
1251 cmp
->g1
= cmp
->g2
= false;
1252 cmp
->c1
= r_dst
= tcg_temp_new();
1253 cmp
->c2
= tcg_const_tl(0);
1273 gen_op_eval_bn(r_dst
);
1276 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1279 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1282 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1285 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1288 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1291 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1294 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1297 gen_op_eval_ba(r_dst
);
1300 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1303 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1306 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1309 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1312 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1315 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1318 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1323 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1327 gen_compare(&cmp
, cc
, cond
, dc
);
1329 /* The interface is to return a boolean in r_dst. */
1331 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1333 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1339 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1342 gen_fcompare(&cmp
, cc
, cond
);
1344 /* The interface is to return a boolean in r_dst. */
1346 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1348 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1354 #ifdef TARGET_SPARC64
1356 static const int gen_tcg_cond_reg
[8] = {
1367 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1369 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1370 cmp
->is_bool
= false;
1374 cmp
->c2
= tcg_const_tl(0);
1377 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1380 gen_compare_reg(&cmp
, cond
, r_src
);
1382 /* The interface is to return a boolean in r_dst. */
1383 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1389 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1391 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1392 target_ulong target
= dc
->pc
+ offset
;
1394 #ifdef TARGET_SPARC64
1395 if (unlikely(AM_CHECK(dc
))) {
1396 target
&= 0xffffffffULL
;
1400 /* unconditional not taken */
1402 dc
->pc
= dc
->npc
+ 4;
1403 dc
->npc
= dc
->pc
+ 4;
1406 dc
->npc
= dc
->pc
+ 4;
1408 } else if (cond
== 0x8) {
1409 /* unconditional taken */
1412 dc
->npc
= dc
->pc
+ 4;
1416 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1420 gen_cond(cpu_cond
, cc
, cond
, dc
);
1422 gen_branch_a(dc
, target
);
1424 gen_branch_n(dc
, target
);
1429 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1431 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1432 target_ulong target
= dc
->pc
+ offset
;
1434 #ifdef TARGET_SPARC64
1435 if (unlikely(AM_CHECK(dc
))) {
1436 target
&= 0xffffffffULL
;
1440 /* unconditional not taken */
1442 dc
->pc
= dc
->npc
+ 4;
1443 dc
->npc
= dc
->pc
+ 4;
1446 dc
->npc
= dc
->pc
+ 4;
1448 } else if (cond
== 0x8) {
1449 /* unconditional taken */
1452 dc
->npc
= dc
->pc
+ 4;
1456 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1460 gen_fcond(cpu_cond
, cc
, cond
);
1462 gen_branch_a(dc
, target
);
1464 gen_branch_n(dc
, target
);
1469 #ifdef TARGET_SPARC64
1470 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1473 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1474 target_ulong target
= dc
->pc
+ offset
;
1476 if (unlikely(AM_CHECK(dc
))) {
1477 target
&= 0xffffffffULL
;
1480 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1482 gen_branch_a(dc
, target
);
1484 gen_branch_n(dc
, target
);
1488 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1492 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1495 gen_helper_fcmps_fcc1(cpu_env
, r_rs1
, r_rs2
);
1498 gen_helper_fcmps_fcc2(cpu_env
, r_rs1
, r_rs2
);
1501 gen_helper_fcmps_fcc3(cpu_env
, r_rs1
, r_rs2
);
1506 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1510 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1513 gen_helper_fcmpd_fcc1(cpu_env
, r_rs1
, r_rs2
);
1516 gen_helper_fcmpd_fcc2(cpu_env
, r_rs1
, r_rs2
);
1519 gen_helper_fcmpd_fcc3(cpu_env
, r_rs1
, r_rs2
);
1524 static inline void gen_op_fcmpq(int fccno
)
1528 gen_helper_fcmpq(cpu_env
);
1531 gen_helper_fcmpq_fcc1(cpu_env
);
1534 gen_helper_fcmpq_fcc2(cpu_env
);
1537 gen_helper_fcmpq_fcc3(cpu_env
);
1542 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1546 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1549 gen_helper_fcmpes_fcc1(cpu_env
, r_rs1
, r_rs2
);
1552 gen_helper_fcmpes_fcc2(cpu_env
, r_rs1
, r_rs2
);
1555 gen_helper_fcmpes_fcc3(cpu_env
, r_rs1
, r_rs2
);
1560 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1564 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1567 gen_helper_fcmped_fcc1(cpu_env
, r_rs1
, r_rs2
);
1570 gen_helper_fcmped_fcc2(cpu_env
, r_rs1
, r_rs2
);
1573 gen_helper_fcmped_fcc3(cpu_env
, r_rs1
, r_rs2
);
1578 static inline void gen_op_fcmpeq(int fccno
)
1582 gen_helper_fcmpeq(cpu_env
);
1585 gen_helper_fcmpeq_fcc1(cpu_env
);
1588 gen_helper_fcmpeq_fcc2(cpu_env
);
1591 gen_helper_fcmpeq_fcc3(cpu_env
);
1598 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1600 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1603 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1605 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1608 static inline void gen_op_fcmpq(int fccno
)
1610 gen_helper_fcmpq(cpu_env
);
1613 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1615 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1618 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1620 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1623 static inline void gen_op_fcmpeq(int fccno
)
1625 gen_helper_fcmpeq(cpu_env
);
1629 static inline void gen_op_fpexception_im(int fsr_flags
)
1633 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1634 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1635 r_const
= tcg_const_i32(TT_FP_EXCP
);
1636 gen_helper_raise_exception(cpu_env
, r_const
);
1637 tcg_temp_free_i32(r_const
);
1640 static int gen_trap_ifnofpu(DisasContext
*dc
)
1642 #if !defined(CONFIG_USER_ONLY)
1643 if (!dc
->fpu_enabled
) {
1647 r_const
= tcg_const_i32(TT_NFPU_INSN
);
1648 gen_helper_raise_exception(cpu_env
, r_const
);
1649 tcg_temp_free_i32(r_const
);
1657 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1659 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1662 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1663 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1667 src
= gen_load_fpr_F(dc
, rs
);
1668 dst
= gen_dest_fpr_F(dc
);
1670 gen(dst
, cpu_env
, src
);
1672 gen_store_fpr_F(dc
, rd
, dst
);
1675 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1676 void (*gen
)(TCGv_i32
, TCGv_i32
))
1680 src
= gen_load_fpr_F(dc
, rs
);
1681 dst
= gen_dest_fpr_F(dc
);
1685 gen_store_fpr_F(dc
, rd
, dst
);
1688 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1689 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1691 TCGv_i32 dst
, src1
, src2
;
1693 src1
= gen_load_fpr_F(dc
, rs1
);
1694 src2
= gen_load_fpr_F(dc
, rs2
);
1695 dst
= gen_dest_fpr_F(dc
);
1697 gen(dst
, cpu_env
, src1
, src2
);
1699 gen_store_fpr_F(dc
, rd
, dst
);
1702 #ifdef TARGET_SPARC64
1703 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1704 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1706 TCGv_i32 dst
, src1
, src2
;
1708 src1
= gen_load_fpr_F(dc
, rs1
);
1709 src2
= gen_load_fpr_F(dc
, rs2
);
1710 dst
= gen_dest_fpr_F(dc
);
1712 gen(dst
, src1
, src2
);
1714 gen_store_fpr_F(dc
, rd
, dst
);
1718 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1719 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1723 src
= gen_load_fpr_D(dc
, rs
);
1724 dst
= gen_dest_fpr_D(dc
, rd
);
1726 gen(dst
, cpu_env
, src
);
1728 gen_store_fpr_D(dc
, rd
, dst
);
1731 #ifdef TARGET_SPARC64
1732 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1733 void (*gen
)(TCGv_i64
, TCGv_i64
))
1737 src
= gen_load_fpr_D(dc
, rs
);
1738 dst
= gen_dest_fpr_D(dc
, rd
);
1742 gen_store_fpr_D(dc
, rd
, dst
);
1746 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1747 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1749 TCGv_i64 dst
, src1
, src2
;
1751 src1
= gen_load_fpr_D(dc
, rs1
);
1752 src2
= gen_load_fpr_D(dc
, rs2
);
1753 dst
= gen_dest_fpr_D(dc
, rd
);
1755 gen(dst
, cpu_env
, src1
, src2
);
1757 gen_store_fpr_D(dc
, rd
, dst
);
1760 #ifdef TARGET_SPARC64
1761 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1762 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1764 TCGv_i64 dst
, src1
, src2
;
1766 src1
= gen_load_fpr_D(dc
, rs1
);
1767 src2
= gen_load_fpr_D(dc
, rs2
);
1768 dst
= gen_dest_fpr_D(dc
, rd
);
1770 gen(dst
, src1
, src2
);
1772 gen_store_fpr_D(dc
, rd
, dst
);
1775 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1776 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1778 TCGv_i64 dst
, src1
, src2
;
1780 src1
= gen_load_fpr_D(dc
, rs1
);
1781 src2
= gen_load_fpr_D(dc
, rs2
);
1782 dst
= gen_dest_fpr_D(dc
, rd
);
1784 gen(dst
, cpu_gsr
, src1
, src2
);
1786 gen_store_fpr_D(dc
, rd
, dst
);
1789 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1790 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1792 TCGv_i64 dst
, src0
, src1
, src2
;
1794 src1
= gen_load_fpr_D(dc
, rs1
);
1795 src2
= gen_load_fpr_D(dc
, rs2
);
1796 src0
= gen_load_fpr_D(dc
, rd
);
1797 dst
= gen_dest_fpr_D(dc
, rd
);
1799 gen(dst
, src0
, src1
, src2
);
1801 gen_store_fpr_D(dc
, rd
, dst
);
1805 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1806 void (*gen
)(TCGv_ptr
))
1808 gen_op_load_fpr_QT1(QFPREG(rs
));
1812 gen_op_store_QT0_fpr(QFPREG(rd
));
1813 gen_update_fprs_dirty(QFPREG(rd
));
1816 #ifdef TARGET_SPARC64
1817 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1818 void (*gen
)(TCGv_ptr
))
1820 gen_op_load_fpr_QT1(QFPREG(rs
));
1824 gen_op_store_QT0_fpr(QFPREG(rd
));
1825 gen_update_fprs_dirty(QFPREG(rd
));
1829 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1830 void (*gen
)(TCGv_ptr
))
1832 gen_op_load_fpr_QT0(QFPREG(rs1
));
1833 gen_op_load_fpr_QT1(QFPREG(rs2
));
1837 gen_op_store_QT0_fpr(QFPREG(rd
));
1838 gen_update_fprs_dirty(QFPREG(rd
));
1841 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1842 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1845 TCGv_i32 src1
, src2
;
1847 src1
= gen_load_fpr_F(dc
, rs1
);
1848 src2
= gen_load_fpr_F(dc
, rs2
);
1849 dst
= gen_dest_fpr_D(dc
, rd
);
1851 gen(dst
, cpu_env
, src1
, src2
);
1853 gen_store_fpr_D(dc
, rd
, dst
);
1856 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1857 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1859 TCGv_i64 src1
, src2
;
1861 src1
= gen_load_fpr_D(dc
, rs1
);
1862 src2
= gen_load_fpr_D(dc
, rs2
);
1864 gen(cpu_env
, src1
, src2
);
1866 gen_op_store_QT0_fpr(QFPREG(rd
));
1867 gen_update_fprs_dirty(QFPREG(rd
));
1870 #ifdef TARGET_SPARC64
1871 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1872 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1877 src
= gen_load_fpr_F(dc
, rs
);
1878 dst
= gen_dest_fpr_D(dc
, rd
);
1880 gen(dst
, cpu_env
, src
);
1882 gen_store_fpr_D(dc
, rd
, dst
);
1886 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1887 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1892 src
= gen_load_fpr_F(dc
, rs
);
1893 dst
= gen_dest_fpr_D(dc
, rd
);
1895 gen(dst
, cpu_env
, src
);
1897 gen_store_fpr_D(dc
, rd
, dst
);
1900 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1901 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1906 src
= gen_load_fpr_D(dc
, rs
);
1907 dst
= gen_dest_fpr_F(dc
);
1909 gen(dst
, cpu_env
, src
);
1911 gen_store_fpr_F(dc
, rd
, dst
);
1914 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1915 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1919 gen_op_load_fpr_QT1(QFPREG(rs
));
1920 dst
= gen_dest_fpr_F(dc
);
1924 gen_store_fpr_F(dc
, rd
, dst
);
1927 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1928 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1932 gen_op_load_fpr_QT1(QFPREG(rs
));
1933 dst
= gen_dest_fpr_D(dc
, rd
);
1937 gen_store_fpr_D(dc
, rd
, dst
);
1940 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1941 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1945 src
= gen_load_fpr_F(dc
, rs
);
1949 gen_op_store_QT0_fpr(QFPREG(rd
));
1950 gen_update_fprs_dirty(QFPREG(rd
));
1953 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1954 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1958 src
= gen_load_fpr_D(dc
, rs
);
1962 gen_op_store_QT0_fpr(QFPREG(rd
));
1963 gen_update_fprs_dirty(QFPREG(rd
));
1967 #ifdef TARGET_SPARC64
1968 static inline TCGv_i32
gen_get_asi(int insn
, TCGv r_addr
)
1974 r_asi
= tcg_temp_new_i32();
1975 tcg_gen_mov_i32(r_asi
, cpu_asi
);
1977 asi
= GET_FIELD(insn
, 19, 26);
1978 r_asi
= tcg_const_i32(asi
);
1983 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
1986 TCGv_i32 r_asi
, r_size
, r_sign
;
1988 r_asi
= gen_get_asi(insn
, addr
);
1989 r_size
= tcg_const_i32(size
);
1990 r_sign
= tcg_const_i32(sign
);
1991 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
1992 tcg_temp_free_i32(r_sign
);
1993 tcg_temp_free_i32(r_size
);
1994 tcg_temp_free_i32(r_asi
);
1997 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
1999 TCGv_i32 r_asi
, r_size
;
2001 r_asi
= gen_get_asi(insn
, addr
);
2002 r_size
= tcg_const_i32(size
);
2003 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2004 tcg_temp_free_i32(r_size
);
2005 tcg_temp_free_i32(r_asi
);
2008 static inline void gen_ldf_asi(TCGv addr
, int insn
, int size
, int rd
)
2010 TCGv_i32 r_asi
, r_size
, r_rd
;
2012 r_asi
= gen_get_asi(insn
, addr
);
2013 r_size
= tcg_const_i32(size
);
2014 r_rd
= tcg_const_i32(rd
);
2015 gen_helper_ldf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2016 tcg_temp_free_i32(r_rd
);
2017 tcg_temp_free_i32(r_size
);
2018 tcg_temp_free_i32(r_asi
);
2021 static inline void gen_stf_asi(TCGv addr
, int insn
, int size
, int rd
)
2023 TCGv_i32 r_asi
, r_size
, r_rd
;
2025 r_asi
= gen_get_asi(insn
, addr
);
2026 r_size
= tcg_const_i32(size
);
2027 r_rd
= tcg_const_i32(rd
);
2028 gen_helper_stf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2029 tcg_temp_free_i32(r_rd
);
2030 tcg_temp_free_i32(r_size
);
2031 tcg_temp_free_i32(r_asi
);
2034 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2036 TCGv_i32 r_asi
, r_size
, r_sign
;
2037 TCGv_i64 t64
= tcg_temp_new_i64();
2039 r_asi
= gen_get_asi(insn
, addr
);
2040 r_size
= tcg_const_i32(4);
2041 r_sign
= tcg_const_i32(0);
2042 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2043 tcg_temp_free_i32(r_sign
);
2044 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2045 tcg_temp_free_i32(r_size
);
2046 tcg_temp_free_i32(r_asi
);
2047 tcg_gen_trunc_i64_tl(dst
, t64
);
2048 tcg_temp_free_i64(t64
);
2051 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2054 TCGv_i32 r_asi
, r_rd
;
2056 r_asi
= gen_get_asi(insn
, addr
);
2057 r_rd
= tcg_const_i32(rd
);
2058 gen_helper_ldda_asi(cpu_env
, addr
, r_asi
, r_rd
);
2059 tcg_temp_free_i32(r_rd
);
2060 tcg_temp_free_i32(r_asi
);
2063 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2066 TCGv_i32 r_asi
, r_size
;
2067 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2068 TCGv_i64 t64
= tcg_temp_new_i64();
2070 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2071 r_asi
= gen_get_asi(insn
, addr
);
2072 r_size
= tcg_const_i32(8);
2073 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2074 tcg_temp_free_i32(r_size
);
2075 tcg_temp_free_i32(r_asi
);
2076 tcg_temp_free_i64(t64
);
2079 static inline void gen_casx_asi(DisasContext
*dc
, TCGv addr
,
2080 TCGv val2
, int insn
, int rd
)
2082 TCGv val1
= gen_load_gpr(dc
, rd
);
2083 TCGv dst
= gen_dest_gpr(dc
, rd
);
2084 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2086 gen_helper_casx_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2087 tcg_temp_free_i32(r_asi
);
2088 gen_store_gpr(dc
, rd
, dst
);
2091 #elif !defined(CONFIG_USER_ONLY)
2093 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2096 TCGv_i32 r_asi
, r_size
, r_sign
;
2097 TCGv_i64 t64
= tcg_temp_new_i64();
2099 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2100 r_size
= tcg_const_i32(size
);
2101 r_sign
= tcg_const_i32(sign
);
2102 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2103 tcg_temp_free_i32(r_sign
);
2104 tcg_temp_free_i32(r_size
);
2105 tcg_temp_free_i32(r_asi
);
2106 tcg_gen_trunc_i64_tl(dst
, t64
);
2107 tcg_temp_free_i64(t64
);
2110 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2112 TCGv_i32 r_asi
, r_size
;
2113 TCGv_i64 t64
= tcg_temp_new_i64();
2115 tcg_gen_extu_tl_i64(t64
, src
);
2116 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2117 r_size
= tcg_const_i32(size
);
2118 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2119 tcg_temp_free_i32(r_size
);
2120 tcg_temp_free_i32(r_asi
);
2121 tcg_temp_free_i64(t64
);
2124 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2126 TCGv_i32 r_asi
, r_size
, r_sign
;
2127 TCGv_i64 r_val
, t64
;
2129 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2130 r_size
= tcg_const_i32(4);
2131 r_sign
= tcg_const_i32(0);
2132 t64
= tcg_temp_new_i64();
2133 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2134 tcg_temp_free(r_sign
);
2135 r_val
= tcg_temp_new_i64();
2136 tcg_gen_extu_tl_i64(r_val
, src
);
2137 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2138 tcg_temp_free_i64(r_val
);
2139 tcg_temp_free_i32(r_size
);
2140 tcg_temp_free_i32(r_asi
);
2141 tcg_gen_trunc_i64_tl(dst
, t64
);
2142 tcg_temp_free_i64(t64
);
2145 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2148 TCGv_i32 r_asi
, r_size
, r_sign
;
2152 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2153 r_size
= tcg_const_i32(8);
2154 r_sign
= tcg_const_i32(0);
2155 t64
= tcg_temp_new_i64();
2156 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2157 tcg_temp_free_i32(r_sign
);
2158 tcg_temp_free_i32(r_size
);
2159 tcg_temp_free_i32(r_asi
);
2161 t
= gen_dest_gpr(dc
, rd
+ 1);
2162 tcg_gen_trunc_i64_tl(t
, t64
);
2163 gen_store_gpr(dc
, rd
+ 1, t
);
2165 tcg_gen_shri_i64(t64
, t64
, 32);
2166 tcg_gen_trunc_i64_tl(hi
, t64
);
2167 tcg_temp_free_i64(t64
);
2168 gen_store_gpr(dc
, rd
, hi
);
2171 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2174 TCGv_i32 r_asi
, r_size
;
2175 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2176 TCGv_i64 t64
= tcg_temp_new_i64();
2178 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2179 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2180 r_size
= tcg_const_i32(8);
2181 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2182 tcg_temp_free_i32(r_size
);
2183 tcg_temp_free_i32(r_asi
);
2184 tcg_temp_free_i64(t64
);
2188 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2189 static inline void gen_cas_asi(DisasContext
*dc
, TCGv addr
,
2190 TCGv val2
, int insn
, int rd
)
2192 TCGv val1
= gen_load_gpr(dc
, rd
);
2193 TCGv dst
= gen_dest_gpr(dc
, rd
);
2194 #ifdef TARGET_SPARC64
2195 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2197 TCGv_i32 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2200 gen_helper_cas_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2201 tcg_temp_free_i32(r_asi
);
2202 gen_store_gpr(dc
, rd
, dst
);
2205 static inline void gen_ldstub_asi(TCGv dst
, TCGv addr
, int insn
)
2208 TCGv_i32 r_asi
, r_size
;
2210 gen_ld_asi(dst
, addr
, insn
, 1, 0);
2212 r_val
= tcg_const_i64(0xffULL
);
2213 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2214 r_size
= tcg_const_i32(1);
2215 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2216 tcg_temp_free_i32(r_size
);
2217 tcg_temp_free_i32(r_asi
);
2218 tcg_temp_free_i64(r_val
);
2222 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2224 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2225 return gen_load_gpr(dc
, rs1
);
2228 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2230 if (IS_IMM
) { /* immediate */
2231 target_long simm
= GET_FIELDs(insn
, 19, 31);
2232 TCGv t
= get_temp_tl(dc
);
2233 tcg_gen_movi_tl(t
, simm
);
2235 } else { /* register */
2236 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2237 return gen_load_gpr(dc
, rs2
);
2241 #ifdef TARGET_SPARC64
2242 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2244 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2246 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2247 or fold the comparison down to 32 bits and use movcond_i32. Choose
2249 c32
= tcg_temp_new_i32();
2251 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2253 TCGv_i64 c64
= tcg_temp_new_i64();
2254 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2255 tcg_gen_extrl_i64_i32(c32
, c64
);
2256 tcg_temp_free_i64(c64
);
2259 s1
= gen_load_fpr_F(dc
, rs
);
2260 s2
= gen_load_fpr_F(dc
, rd
);
2261 dst
= gen_dest_fpr_F(dc
);
2262 zero
= tcg_const_i32(0);
2264 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2266 tcg_temp_free_i32(c32
);
2267 tcg_temp_free_i32(zero
);
2268 gen_store_fpr_F(dc
, rd
, dst
);
2271 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2273 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2274 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2275 gen_load_fpr_D(dc
, rs
),
2276 gen_load_fpr_D(dc
, rd
));
2277 gen_store_fpr_D(dc
, rd
, dst
);
2280 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2282 int qd
= QFPREG(rd
);
2283 int qs
= QFPREG(rs
);
2285 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2286 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2287 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2288 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2290 gen_update_fprs_dirty(qd
);
2293 #ifndef CONFIG_USER_ONLY
2294 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_ptr cpu_env
)
2296 TCGv_i32 r_tl
= tcg_temp_new_i32();
2298 /* load env->tl into r_tl */
2299 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2301 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2302 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2304 /* calculate offset to current trap state from env->ts, reuse r_tl */
2305 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2306 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2308 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2310 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2311 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2312 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2313 tcg_temp_free_ptr(r_tl_tmp
);
2316 tcg_temp_free_i32(r_tl
);
2320 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2321 int width
, bool cc
, bool left
)
2323 TCGv lo1
, lo2
, t1
, t2
;
2324 uint64_t amask
, tabl
, tabr
;
2325 int shift
, imask
, omask
;
2328 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2329 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2330 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2331 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2332 dc
->cc_op
= CC_OP_SUB
;
2335 /* Theory of operation: there are two tables, left and right (not to
2336 be confused with the left and right versions of the opcode). These
2337 are indexed by the low 3 bits of the inputs. To make things "easy",
2338 these tables are loaded into two constants, TABL and TABR below.
2339 The operation index = (input & imask) << shift calculates the index
2340 into the constant, while val = (table >> index) & omask calculates
2341 the value we're looking for. */
2348 tabl
= 0x80c0e0f0f8fcfeffULL
;
2349 tabr
= 0xff7f3f1f0f070301ULL
;
2351 tabl
= 0x0103070f1f3f7fffULL
;
2352 tabr
= 0xfffefcf8f0e0c080ULL
;
2372 tabl
= (2 << 2) | 3;
2373 tabr
= (3 << 2) | 1;
2375 tabl
= (1 << 2) | 3;
2376 tabr
= (3 << 2) | 2;
2383 lo1
= tcg_temp_new();
2384 lo2
= tcg_temp_new();
2385 tcg_gen_andi_tl(lo1
, s1
, imask
);
2386 tcg_gen_andi_tl(lo2
, s2
, imask
);
2387 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2388 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2390 t1
= tcg_const_tl(tabl
);
2391 t2
= tcg_const_tl(tabr
);
2392 tcg_gen_shr_tl(lo1
, t1
, lo1
);
2393 tcg_gen_shr_tl(lo2
, t2
, lo2
);
2394 tcg_gen_andi_tl(dst
, lo1
, omask
);
2395 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2399 amask
&= 0xffffffffULL
;
2401 tcg_gen_andi_tl(s1
, s1
, amask
);
2402 tcg_gen_andi_tl(s2
, s2
, amask
);
2404 /* We want to compute
2405 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2406 We've already done dst = lo1, so this reduces to
2407 dst &= (s1 == s2 ? -1 : lo2)
2412 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
2413 tcg_gen_neg_tl(t1
, t1
);
2414 tcg_gen_or_tl(lo2
, lo2
, t1
);
2415 tcg_gen_and_tl(dst
, dst
, lo2
);
2423 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2425 TCGv tmp
= tcg_temp_new();
2427 tcg_gen_add_tl(tmp
, s1
, s2
);
2428 tcg_gen_andi_tl(dst
, tmp
, -8);
2430 tcg_gen_neg_tl(tmp
, tmp
);
2432 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2437 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2441 t1
= tcg_temp_new();
2442 t2
= tcg_temp_new();
2443 shift
= tcg_temp_new();
2445 tcg_gen_andi_tl(shift
, gsr
, 7);
2446 tcg_gen_shli_tl(shift
, shift
, 3);
2447 tcg_gen_shl_tl(t1
, s1
, shift
);
2449 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2450 shift of (up to 63) followed by a constant shift of 1. */
2451 tcg_gen_xori_tl(shift
, shift
, 63);
2452 tcg_gen_shr_tl(t2
, s2
, shift
);
2453 tcg_gen_shri_tl(t2
, t2
, 1);
2455 tcg_gen_or_tl(dst
, t1
, t2
);
2459 tcg_temp_free(shift
);
2463 #define CHECK_IU_FEATURE(dc, FEATURE) \
2464 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2466 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2467 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2470 /* before an instruction, dc->pc must be static */
2471 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2473 unsigned int opc
, rs1
, rs2
, rd
;
2474 TCGv cpu_src1
, cpu_src2
;
2475 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2476 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2479 opc
= GET_FIELD(insn
, 0, 1);
2480 rd
= GET_FIELD(insn
, 2, 6);
2483 case 0: /* branches/sethi */
2485 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2488 #ifdef TARGET_SPARC64
2489 case 0x1: /* V9 BPcc */
2493 target
= GET_FIELD_SP(insn
, 0, 18);
2494 target
= sign_extend(target
, 19);
2496 cc
= GET_FIELD_SP(insn
, 20, 21);
2498 do_branch(dc
, target
, insn
, 0);
2500 do_branch(dc
, target
, insn
, 1);
2505 case 0x3: /* V9 BPr */
2507 target
= GET_FIELD_SP(insn
, 0, 13) |
2508 (GET_FIELD_SP(insn
, 20, 21) << 14);
2509 target
= sign_extend(target
, 16);
2511 cpu_src1
= get_src1(dc
, insn
);
2512 do_branch_reg(dc
, target
, insn
, cpu_src1
);
2515 case 0x5: /* V9 FBPcc */
2517 int cc
= GET_FIELD_SP(insn
, 20, 21);
2518 if (gen_trap_ifnofpu(dc
)) {
2521 target
= GET_FIELD_SP(insn
, 0, 18);
2522 target
= sign_extend(target
, 19);
2524 do_fbranch(dc
, target
, insn
, cc
);
2528 case 0x7: /* CBN+x */
2533 case 0x2: /* BN+x */
2535 target
= GET_FIELD(insn
, 10, 31);
2536 target
= sign_extend(target
, 22);
2538 do_branch(dc
, target
, insn
, 0);
2541 case 0x6: /* FBN+x */
2543 if (gen_trap_ifnofpu(dc
)) {
2546 target
= GET_FIELD(insn
, 10, 31);
2547 target
= sign_extend(target
, 22);
2549 do_fbranch(dc
, target
, insn
, 0);
2552 case 0x4: /* SETHI */
2553 /* Special-case %g0 because that's the canonical nop. */
2555 uint32_t value
= GET_FIELD(insn
, 10, 31);
2556 TCGv t
= gen_dest_gpr(dc
, rd
);
2557 tcg_gen_movi_tl(t
, value
<< 10);
2558 gen_store_gpr(dc
, rd
, t
);
2561 case 0x0: /* UNIMPL */
2570 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
2571 TCGv o7
= gen_dest_gpr(dc
, 15);
2573 tcg_gen_movi_tl(o7
, dc
->pc
);
2574 gen_store_gpr(dc
, 15, o7
);
2577 #ifdef TARGET_SPARC64
2578 if (unlikely(AM_CHECK(dc
))) {
2579 target
&= 0xffffffffULL
;
2585 case 2: /* FPU & Logical Operations */
2587 unsigned int xop
= GET_FIELD(insn
, 7, 12);
2588 TCGv cpu_dst
= get_temp_tl(dc
);
2591 if (xop
== 0x3a) { /* generate trap */
2592 int cond
= GET_FIELD(insn
, 3, 6);
2594 TCGLabel
*l1
= NULL
;
2605 /* Conditional trap. */
2607 #ifdef TARGET_SPARC64
2609 int cc
= GET_FIELD_SP(insn
, 11, 12);
2611 gen_compare(&cmp
, 0, cond
, dc
);
2612 } else if (cc
== 2) {
2613 gen_compare(&cmp
, 1, cond
, dc
);
2618 gen_compare(&cmp
, 0, cond
, dc
);
2620 l1
= gen_new_label();
2621 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
2622 cmp
.c1
, cmp
.c2
, l1
);
2626 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2627 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2629 /* Don't use the normal temporaries, as they may well have
2630 gone out of scope with the branch above. While we're
2631 doing that we might as well pre-truncate to 32-bit. */
2632 trap
= tcg_temp_new_i32();
2634 rs1
= GET_FIELD_SP(insn
, 14, 18);
2636 rs2
= GET_FIELD_SP(insn
, 0, 6);
2638 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
2639 /* Signal that the trap value is fully constant. */
2642 TCGv t1
= gen_load_gpr(dc
, rs1
);
2643 tcg_gen_trunc_tl_i32(trap
, t1
);
2644 tcg_gen_addi_i32(trap
, trap
, rs2
);
2648 rs2
= GET_FIELD_SP(insn
, 0, 4);
2649 t1
= gen_load_gpr(dc
, rs1
);
2650 t2
= gen_load_gpr(dc
, rs2
);
2651 tcg_gen_add_tl(t1
, t1
, t2
);
2652 tcg_gen_trunc_tl_i32(trap
, t1
);
2655 tcg_gen_andi_i32(trap
, trap
, mask
);
2656 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2659 gen_helper_raise_exception(cpu_env
, trap
);
2660 tcg_temp_free_i32(trap
);
2663 /* An unconditional trap ends the TB. */
2667 /* A conditional trap falls through to the next insn. */
2671 } else if (xop
== 0x28) {
2672 rs1
= GET_FIELD(insn
, 13, 17);
2675 #ifndef TARGET_SPARC64
2676 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2677 manual, rdy on the microSPARC
2679 case 0x0f: /* stbar in the SPARCv8 manual,
2680 rdy on the microSPARC II */
2681 case 0x10 ... 0x1f: /* implementation-dependent in the
2682 SPARCv8 manual, rdy on the
2685 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
2686 TCGv t
= gen_dest_gpr(dc
, rd
);
2687 /* Read Asr17 for a Leon3 monoprocessor */
2688 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
2689 gen_store_gpr(dc
, rd
, t
);
2693 gen_store_gpr(dc
, rd
, cpu_y
);
2695 #ifdef TARGET_SPARC64
2696 case 0x2: /* V9 rdccr */
2698 gen_helper_rdccr(cpu_dst
, cpu_env
);
2699 gen_store_gpr(dc
, rd
, cpu_dst
);
2701 case 0x3: /* V9 rdasi */
2702 tcg_gen_ext_i32_tl(cpu_dst
, cpu_asi
);
2703 gen_store_gpr(dc
, rd
, cpu_dst
);
2705 case 0x4: /* V9 rdtick */
2710 r_tickptr
= tcg_temp_new_ptr();
2711 r_const
= tcg_const_i32(dc
->mem_idx
);
2712 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2713 offsetof(CPUSPARCState
, tick
));
2714 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
2716 tcg_temp_free_ptr(r_tickptr
);
2717 tcg_temp_free_i32(r_const
);
2718 gen_store_gpr(dc
, rd
, cpu_dst
);
2721 case 0x5: /* V9 rdpc */
2723 TCGv t
= gen_dest_gpr(dc
, rd
);
2724 if (unlikely(AM_CHECK(dc
))) {
2725 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
2727 tcg_gen_movi_tl(t
, dc
->pc
);
2729 gen_store_gpr(dc
, rd
, t
);
2732 case 0x6: /* V9 rdfprs */
2733 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
2734 gen_store_gpr(dc
, rd
, cpu_dst
);
2736 case 0xf: /* V9 membar */
2737 break; /* no effect */
2738 case 0x13: /* Graphics Status */
2739 if (gen_trap_ifnofpu(dc
)) {
2742 gen_store_gpr(dc
, rd
, cpu_gsr
);
2744 case 0x16: /* Softint */
2745 tcg_gen_ext_i32_tl(cpu_dst
, cpu_softint
);
2746 gen_store_gpr(dc
, rd
, cpu_dst
);
2748 case 0x17: /* Tick compare */
2749 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
2751 case 0x18: /* System tick */
2756 r_tickptr
= tcg_temp_new_ptr();
2757 r_const
= tcg_const_i32(dc
->mem_idx
);
2758 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2759 offsetof(CPUSPARCState
, stick
));
2760 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
2762 tcg_temp_free_ptr(r_tickptr
);
2763 tcg_temp_free_i32(r_const
);
2764 gen_store_gpr(dc
, rd
, cpu_dst
);
2767 case 0x19: /* System tick compare */
2768 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
2770 case 0x10: /* Performance Control */
2771 case 0x11: /* Performance Instrumentation Counter */
2772 case 0x12: /* Dispatch Control */
2773 case 0x14: /* Softint set, WO */
2774 case 0x15: /* Softint clear, WO */
2779 #if !defined(CONFIG_USER_ONLY)
2780 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
2781 #ifndef TARGET_SPARC64
2782 if (!supervisor(dc
)) {
2786 gen_helper_rdpsr(cpu_dst
, cpu_env
);
2788 CHECK_IU_FEATURE(dc
, HYPV
);
2789 if (!hypervisor(dc
))
2791 rs1
= GET_FIELD(insn
, 13, 17);
2794 // gen_op_rdhpstate();
2797 // gen_op_rdhtstate();
2800 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
2803 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
2806 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
2808 case 31: // hstick_cmpr
2809 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
2815 gen_store_gpr(dc
, rd
, cpu_dst
);
2817 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
2818 if (!supervisor(dc
)) {
2821 cpu_tmp0
= get_temp_tl(dc
);
2822 #ifdef TARGET_SPARC64
2823 rs1
= GET_FIELD(insn
, 13, 17);
2829 r_tsptr
= tcg_temp_new_ptr();
2830 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2831 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2832 offsetof(trap_state
, tpc
));
2833 tcg_temp_free_ptr(r_tsptr
);
2840 r_tsptr
= tcg_temp_new_ptr();
2841 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2842 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2843 offsetof(trap_state
, tnpc
));
2844 tcg_temp_free_ptr(r_tsptr
);
2851 r_tsptr
= tcg_temp_new_ptr();
2852 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2853 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2854 offsetof(trap_state
, tstate
));
2855 tcg_temp_free_ptr(r_tsptr
);
2860 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2862 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2863 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
2864 offsetof(trap_state
, tt
));
2865 tcg_temp_free_ptr(r_tsptr
);
2873 r_tickptr
= tcg_temp_new_ptr();
2874 r_const
= tcg_const_i32(dc
->mem_idx
);
2875 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2876 offsetof(CPUSPARCState
, tick
));
2877 gen_helper_tick_get_count(cpu_tmp0
, cpu_env
,
2878 r_tickptr
, r_const
);
2879 tcg_temp_free_ptr(r_tickptr
);
2880 tcg_temp_free_i32(r_const
);
2884 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
2887 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2888 offsetof(CPUSPARCState
, pstate
));
2891 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2892 offsetof(CPUSPARCState
, tl
));
2895 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2896 offsetof(CPUSPARCState
, psrpil
));
2899 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
2902 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2903 offsetof(CPUSPARCState
, cansave
));
2905 case 11: // canrestore
2906 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2907 offsetof(CPUSPARCState
, canrestore
));
2909 case 12: // cleanwin
2910 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2911 offsetof(CPUSPARCState
, cleanwin
));
2913 case 13: // otherwin
2914 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2915 offsetof(CPUSPARCState
, otherwin
));
2918 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2919 offsetof(CPUSPARCState
, wstate
));
2921 case 16: // UA2005 gl
2922 CHECK_IU_FEATURE(dc
, GL
);
2923 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2924 offsetof(CPUSPARCState
, gl
));
2926 case 26: // UA2005 strand status
2927 CHECK_IU_FEATURE(dc
, HYPV
);
2928 if (!hypervisor(dc
))
2930 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
2933 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
2940 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
2942 gen_store_gpr(dc
, rd
, cpu_tmp0
);
2944 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
2945 #ifdef TARGET_SPARC64
2947 gen_helper_flushw(cpu_env
);
2949 if (!supervisor(dc
))
2951 gen_store_gpr(dc
, rd
, cpu_tbr
);
2955 } else if (xop
== 0x34) { /* FPU Operations */
2956 if (gen_trap_ifnofpu(dc
)) {
2959 gen_op_clear_ieee_excp_and_FTT();
2960 rs1
= GET_FIELD(insn
, 13, 17);
2961 rs2
= GET_FIELD(insn
, 27, 31);
2962 xop
= GET_FIELD(insn
, 18, 26);
2965 case 0x1: /* fmovs */
2966 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
2967 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
2969 case 0x5: /* fnegs */
2970 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
2972 case 0x9: /* fabss */
2973 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
2975 case 0x29: /* fsqrts */
2976 CHECK_FPU_FEATURE(dc
, FSQRT
);
2977 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
2979 case 0x2a: /* fsqrtd */
2980 CHECK_FPU_FEATURE(dc
, FSQRT
);
2981 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
2983 case 0x2b: /* fsqrtq */
2984 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2985 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
2987 case 0x41: /* fadds */
2988 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
2990 case 0x42: /* faddd */
2991 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
2993 case 0x43: /* faddq */
2994 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2995 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
2997 case 0x45: /* fsubs */
2998 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
3000 case 0x46: /* fsubd */
3001 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3003 case 0x47: /* fsubq */
3004 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3005 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3007 case 0x49: /* fmuls */
3008 CHECK_FPU_FEATURE(dc
, FMUL
);
3009 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3011 case 0x4a: /* fmuld */
3012 CHECK_FPU_FEATURE(dc
, FMUL
);
3013 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3015 case 0x4b: /* fmulq */
3016 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3017 CHECK_FPU_FEATURE(dc
, FMUL
);
3018 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3020 case 0x4d: /* fdivs */
3021 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3023 case 0x4e: /* fdivd */
3024 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3026 case 0x4f: /* fdivq */
3027 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3028 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3030 case 0x69: /* fsmuld */
3031 CHECK_FPU_FEATURE(dc
, FSMULD
);
3032 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3034 case 0x6e: /* fdmulq */
3035 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3036 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3038 case 0xc4: /* fitos */
3039 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3041 case 0xc6: /* fdtos */
3042 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3044 case 0xc7: /* fqtos */
3045 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3046 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3048 case 0xc8: /* fitod */
3049 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3051 case 0xc9: /* fstod */
3052 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3054 case 0xcb: /* fqtod */
3055 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3056 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3058 case 0xcc: /* fitoq */
3059 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3060 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3062 case 0xcd: /* fstoq */
3063 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3064 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3066 case 0xce: /* fdtoq */
3067 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3068 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3070 case 0xd1: /* fstoi */
3071 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3073 case 0xd2: /* fdtoi */
3074 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3076 case 0xd3: /* fqtoi */
3077 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3078 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3080 #ifdef TARGET_SPARC64
3081 case 0x2: /* V9 fmovd */
3082 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3083 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3085 case 0x3: /* V9 fmovq */
3086 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3087 gen_move_Q(rd
, rs2
);
3089 case 0x6: /* V9 fnegd */
3090 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3092 case 0x7: /* V9 fnegq */
3093 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3094 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3096 case 0xa: /* V9 fabsd */
3097 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3099 case 0xb: /* V9 fabsq */
3100 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3101 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3103 case 0x81: /* V9 fstox */
3104 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3106 case 0x82: /* V9 fdtox */
3107 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3109 case 0x83: /* V9 fqtox */
3110 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3111 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3113 case 0x84: /* V9 fxtos */
3114 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3116 case 0x88: /* V9 fxtod */
3117 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3119 case 0x8c: /* V9 fxtoq */
3120 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3121 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3127 } else if (xop
== 0x35) { /* FPU Operations */
3128 #ifdef TARGET_SPARC64
3131 if (gen_trap_ifnofpu(dc
)) {
3134 gen_op_clear_ieee_excp_and_FTT();
3135 rs1
= GET_FIELD(insn
, 13, 17);
3136 rs2
= GET_FIELD(insn
, 27, 31);
3137 xop
= GET_FIELD(insn
, 18, 26);
3140 #ifdef TARGET_SPARC64
3144 cond = GET_FIELD_SP(insn, 10, 12); \
3145 cpu_src1 = get_src1(dc, insn); \
3146 gen_compare_reg(&cmp, cond, cpu_src1); \
3147 gen_fmov##sz(dc, &cmp, rd, rs2); \
3148 free_compare(&cmp); \
3151 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3154 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3157 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3158 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3165 #ifdef TARGET_SPARC64
3166 #define FMOVCC(fcc, sz) \
3169 cond = GET_FIELD_SP(insn, 14, 17); \
3170 gen_fcompare(&cmp, fcc, cond); \
3171 gen_fmov##sz(dc, &cmp, rd, rs2); \
3172 free_compare(&cmp); \
3175 case 0x001: /* V9 fmovscc %fcc0 */
3178 case 0x002: /* V9 fmovdcc %fcc0 */
3181 case 0x003: /* V9 fmovqcc %fcc0 */
3182 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3185 case 0x041: /* V9 fmovscc %fcc1 */
3188 case 0x042: /* V9 fmovdcc %fcc1 */
3191 case 0x043: /* V9 fmovqcc %fcc1 */
3192 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3195 case 0x081: /* V9 fmovscc %fcc2 */
3198 case 0x082: /* V9 fmovdcc %fcc2 */
3201 case 0x083: /* V9 fmovqcc %fcc2 */
3202 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3205 case 0x0c1: /* V9 fmovscc %fcc3 */
3208 case 0x0c2: /* V9 fmovdcc %fcc3 */
3211 case 0x0c3: /* V9 fmovqcc %fcc3 */
3212 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3216 #define FMOVCC(xcc, sz) \
3219 cond = GET_FIELD_SP(insn, 14, 17); \
3220 gen_compare(&cmp, xcc, cond, dc); \
3221 gen_fmov##sz(dc, &cmp, rd, rs2); \
3222 free_compare(&cmp); \
3225 case 0x101: /* V9 fmovscc %icc */
3228 case 0x102: /* V9 fmovdcc %icc */
3231 case 0x103: /* V9 fmovqcc %icc */
3232 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3235 case 0x181: /* V9 fmovscc %xcc */
3238 case 0x182: /* V9 fmovdcc %xcc */
3241 case 0x183: /* V9 fmovqcc %xcc */
3242 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3247 case 0x51: /* fcmps, V9 %fcc */
3248 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3249 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3250 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3252 case 0x52: /* fcmpd, V9 %fcc */
3253 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3254 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3255 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3257 case 0x53: /* fcmpq, V9 %fcc */
3258 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3259 gen_op_load_fpr_QT0(QFPREG(rs1
));
3260 gen_op_load_fpr_QT1(QFPREG(rs2
));
3261 gen_op_fcmpq(rd
& 3);
3263 case 0x55: /* fcmpes, V9 %fcc */
3264 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3265 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3266 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3268 case 0x56: /* fcmped, V9 %fcc */
3269 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3270 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3271 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3273 case 0x57: /* fcmpeq, V9 %fcc */
3274 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3275 gen_op_load_fpr_QT0(QFPREG(rs1
));
3276 gen_op_load_fpr_QT1(QFPREG(rs2
));
3277 gen_op_fcmpeq(rd
& 3);
3282 } else if (xop
== 0x2) {
3283 TCGv dst
= gen_dest_gpr(dc
, rd
);
3284 rs1
= GET_FIELD(insn
, 13, 17);
3286 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3287 if (IS_IMM
) { /* immediate */
3288 simm
= GET_FIELDs(insn
, 19, 31);
3289 tcg_gen_movi_tl(dst
, simm
);
3290 gen_store_gpr(dc
, rd
, dst
);
3291 } else { /* register */
3292 rs2
= GET_FIELD(insn
, 27, 31);
3294 tcg_gen_movi_tl(dst
, 0);
3295 gen_store_gpr(dc
, rd
, dst
);
3297 cpu_src2
= gen_load_gpr(dc
, rs2
);
3298 gen_store_gpr(dc
, rd
, cpu_src2
);
3302 cpu_src1
= get_src1(dc
, insn
);
3303 if (IS_IMM
) { /* immediate */
3304 simm
= GET_FIELDs(insn
, 19, 31);
3305 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
3306 gen_store_gpr(dc
, rd
, dst
);
3307 } else { /* register */
3308 rs2
= GET_FIELD(insn
, 27, 31);
3310 /* mov shortcut: or x, %g0, y -> mov x, y */
3311 gen_store_gpr(dc
, rd
, cpu_src1
);
3313 cpu_src2
= gen_load_gpr(dc
, rs2
);
3314 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
3315 gen_store_gpr(dc
, rd
, dst
);
3319 #ifdef TARGET_SPARC64
3320 } else if (xop
== 0x25) { /* sll, V9 sllx */
3321 cpu_src1
= get_src1(dc
, insn
);
3322 if (IS_IMM
) { /* immediate */
3323 simm
= GET_FIELDs(insn
, 20, 31);
3324 if (insn
& (1 << 12)) {
3325 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3327 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3329 } else { /* register */
3330 rs2
= GET_FIELD(insn
, 27, 31);
3331 cpu_src2
= gen_load_gpr(dc
, rs2
);
3332 cpu_tmp0
= get_temp_tl(dc
);
3333 if (insn
& (1 << 12)) {
3334 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3336 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3338 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3340 gen_store_gpr(dc
, rd
, cpu_dst
);
3341 } else if (xop
== 0x26) { /* srl, V9 srlx */
3342 cpu_src1
= get_src1(dc
, insn
);
3343 if (IS_IMM
) { /* immediate */
3344 simm
= GET_FIELDs(insn
, 20, 31);
3345 if (insn
& (1 << 12)) {
3346 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3348 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3349 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3351 } else { /* register */
3352 rs2
= GET_FIELD(insn
, 27, 31);
3353 cpu_src2
= gen_load_gpr(dc
, rs2
);
3354 cpu_tmp0
= get_temp_tl(dc
);
3355 if (insn
& (1 << 12)) {
3356 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3357 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3359 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3360 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3361 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3364 gen_store_gpr(dc
, rd
, cpu_dst
);
3365 } else if (xop
== 0x27) { /* sra, V9 srax */
3366 cpu_src1
= get_src1(dc
, insn
);
3367 if (IS_IMM
) { /* immediate */
3368 simm
= GET_FIELDs(insn
, 20, 31);
3369 if (insn
& (1 << 12)) {
3370 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3372 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3373 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3375 } else { /* register */
3376 rs2
= GET_FIELD(insn
, 27, 31);
3377 cpu_src2
= gen_load_gpr(dc
, rs2
);
3378 cpu_tmp0
= get_temp_tl(dc
);
3379 if (insn
& (1 << 12)) {
3380 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3381 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3383 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3384 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3385 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3388 gen_store_gpr(dc
, rd
, cpu_dst
);
3390 } else if (xop
< 0x36) {
3392 cpu_src1
= get_src1(dc
, insn
);
3393 cpu_src2
= get_src2(dc
, insn
);
3394 switch (xop
& ~0x10) {
3397 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3398 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3399 dc
->cc_op
= CC_OP_ADD
;
3401 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3405 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3407 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3408 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3409 dc
->cc_op
= CC_OP_LOGIC
;
3413 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3415 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3416 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3417 dc
->cc_op
= CC_OP_LOGIC
;
3421 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3423 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3424 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3425 dc
->cc_op
= CC_OP_LOGIC
;
3430 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3431 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3432 dc
->cc_op
= CC_OP_SUB
;
3434 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3437 case 0x5: /* andn */
3438 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3440 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3441 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3442 dc
->cc_op
= CC_OP_LOGIC
;
3446 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3448 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3449 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3450 dc
->cc_op
= CC_OP_LOGIC
;
3453 case 0x7: /* xorn */
3454 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3456 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3457 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3458 dc
->cc_op
= CC_OP_LOGIC
;
3461 case 0x8: /* addx, V9 addc */
3462 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3465 #ifdef TARGET_SPARC64
3466 case 0x9: /* V9 mulx */
3467 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3470 case 0xa: /* umul */
3471 CHECK_IU_FEATURE(dc
, MUL
);
3472 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3474 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3475 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3476 dc
->cc_op
= CC_OP_LOGIC
;
3479 case 0xb: /* smul */
3480 CHECK_IU_FEATURE(dc
, MUL
);
3481 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
3483 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3484 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3485 dc
->cc_op
= CC_OP_LOGIC
;
3488 case 0xc: /* subx, V9 subc */
3489 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3492 #ifdef TARGET_SPARC64
3493 case 0xd: /* V9 udivx */
3494 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3497 case 0xe: /* udiv */
3498 CHECK_IU_FEATURE(dc
, DIV
);
3500 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3502 dc
->cc_op
= CC_OP_DIV
;
3504 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
3508 case 0xf: /* sdiv */
3509 CHECK_IU_FEATURE(dc
, DIV
);
3511 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3513 dc
->cc_op
= CC_OP_DIV
;
3515 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
3522 gen_store_gpr(dc
, rd
, cpu_dst
);
3524 cpu_src1
= get_src1(dc
, insn
);
3525 cpu_src2
= get_src2(dc
, insn
);
3527 case 0x20: /* taddcc */
3528 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3529 gen_store_gpr(dc
, rd
, cpu_dst
);
3530 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
3531 dc
->cc_op
= CC_OP_TADD
;
3533 case 0x21: /* tsubcc */
3534 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3535 gen_store_gpr(dc
, rd
, cpu_dst
);
3536 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
3537 dc
->cc_op
= CC_OP_TSUB
;
3539 case 0x22: /* taddcctv */
3540 gen_helper_taddcctv(cpu_dst
, cpu_env
,
3541 cpu_src1
, cpu_src2
);
3542 gen_store_gpr(dc
, rd
, cpu_dst
);
3543 dc
->cc_op
= CC_OP_TADDTV
;
3545 case 0x23: /* tsubcctv */
3546 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
3547 cpu_src1
, cpu_src2
);
3548 gen_store_gpr(dc
, rd
, cpu_dst
);
3549 dc
->cc_op
= CC_OP_TSUBTV
;
3551 case 0x24: /* mulscc */
3553 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
3554 gen_store_gpr(dc
, rd
, cpu_dst
);
3555 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3556 dc
->cc_op
= CC_OP_ADD
;
3558 #ifndef TARGET_SPARC64
3559 case 0x25: /* sll */
3560 if (IS_IMM
) { /* immediate */
3561 simm
= GET_FIELDs(insn
, 20, 31);
3562 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3563 } else { /* register */
3564 cpu_tmp0
= get_temp_tl(dc
);
3565 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3566 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3568 gen_store_gpr(dc
, rd
, cpu_dst
);
3570 case 0x26: /* srl */
3571 if (IS_IMM
) { /* immediate */
3572 simm
= GET_FIELDs(insn
, 20, 31);
3573 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3574 } else { /* register */
3575 cpu_tmp0
= get_temp_tl(dc
);
3576 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3577 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3579 gen_store_gpr(dc
, rd
, cpu_dst
);
3581 case 0x27: /* sra */
3582 if (IS_IMM
) { /* immediate */
3583 simm
= GET_FIELDs(insn
, 20, 31);
3584 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3585 } else { /* register */
3586 cpu_tmp0
= get_temp_tl(dc
);
3587 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3588 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3590 gen_store_gpr(dc
, rd
, cpu_dst
);
3595 cpu_tmp0
= get_temp_tl(dc
);
3598 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3599 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
3601 #ifndef TARGET_SPARC64
3602 case 0x01 ... 0x0f: /* undefined in the
3606 case 0x10 ... 0x1f: /* implementation-dependent
3610 if ((rd
== 0x13) && (dc
->def
->features
&
3611 CPU_FEATURE_POWERDOWN
)) {
3612 /* LEON3 power-down */
3614 gen_helper_power_down(cpu_env
);
3618 case 0x2: /* V9 wrccr */
3619 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3620 gen_helper_wrccr(cpu_env
, cpu_tmp0
);
3621 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3622 dc
->cc_op
= CC_OP_FLAGS
;
3624 case 0x3: /* V9 wrasi */
3625 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3626 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
3627 tcg_gen_trunc_tl_i32(cpu_asi
, cpu_tmp0
);
3629 case 0x6: /* V9 wrfprs */
3630 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3631 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
3637 case 0xf: /* V9 sir, nop if user */
3638 #if !defined(CONFIG_USER_ONLY)
3639 if (supervisor(dc
)) {
3644 case 0x13: /* Graphics Status */
3645 if (gen_trap_ifnofpu(dc
)) {
3648 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
3650 case 0x14: /* Softint set */
3651 if (!supervisor(dc
))
3653 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3654 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
3656 case 0x15: /* Softint clear */
3657 if (!supervisor(dc
))
3659 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3660 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
3662 case 0x16: /* Softint write */
3663 if (!supervisor(dc
))
3665 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3666 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
3668 case 0x17: /* Tick compare */
3669 #if !defined(CONFIG_USER_ONLY)
3670 if (!supervisor(dc
))
3676 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
3678 r_tickptr
= tcg_temp_new_ptr();
3679 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3680 offsetof(CPUSPARCState
, tick
));
3681 gen_helper_tick_set_limit(r_tickptr
,
3683 tcg_temp_free_ptr(r_tickptr
);
3686 case 0x18: /* System tick */
3687 #if !defined(CONFIG_USER_ONLY)
3688 if (!supervisor(dc
))
3694 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
3696 r_tickptr
= tcg_temp_new_ptr();
3697 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3698 offsetof(CPUSPARCState
, stick
));
3699 gen_helper_tick_set_count(r_tickptr
,
3701 tcg_temp_free_ptr(r_tickptr
);
3704 case 0x19: /* System tick compare */
3705 #if !defined(CONFIG_USER_ONLY)
3706 if (!supervisor(dc
))
3712 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
3714 r_tickptr
= tcg_temp_new_ptr();
3715 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3716 offsetof(CPUSPARCState
, stick
));
3717 gen_helper_tick_set_limit(r_tickptr
,
3719 tcg_temp_free_ptr(r_tickptr
);
3723 case 0x10: /* Performance Control */
3724 case 0x11: /* Performance Instrumentation
3726 case 0x12: /* Dispatch Control */
3733 #if !defined(CONFIG_USER_ONLY)
3734 case 0x31: /* wrpsr, V9 saved, restored */
3736 if (!supervisor(dc
))
3738 #ifdef TARGET_SPARC64
3741 gen_helper_saved(cpu_env
);
3744 gen_helper_restored(cpu_env
);
3746 case 2: /* UA2005 allclean */
3747 case 3: /* UA2005 otherw */
3748 case 4: /* UA2005 normalw */
3749 case 5: /* UA2005 invalw */
3755 cpu_tmp0
= get_temp_tl(dc
);
3756 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3757 gen_helper_wrpsr(cpu_env
, cpu_tmp0
);
3758 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3759 dc
->cc_op
= CC_OP_FLAGS
;
3767 case 0x32: /* wrwim, V9 wrpr */
3769 if (!supervisor(dc
))
3771 cpu_tmp0
= get_temp_tl(dc
);
3772 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3773 #ifdef TARGET_SPARC64
3779 r_tsptr
= tcg_temp_new_ptr();
3780 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3781 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3782 offsetof(trap_state
, tpc
));
3783 tcg_temp_free_ptr(r_tsptr
);
3790 r_tsptr
= tcg_temp_new_ptr();
3791 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3792 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3793 offsetof(trap_state
, tnpc
));
3794 tcg_temp_free_ptr(r_tsptr
);
3801 r_tsptr
= tcg_temp_new_ptr();
3802 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3803 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3804 offsetof(trap_state
,
3806 tcg_temp_free_ptr(r_tsptr
);
3813 r_tsptr
= tcg_temp_new_ptr();
3814 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3815 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
3816 offsetof(trap_state
, tt
));
3817 tcg_temp_free_ptr(r_tsptr
);
3824 r_tickptr
= tcg_temp_new_ptr();
3825 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3826 offsetof(CPUSPARCState
, tick
));
3827 gen_helper_tick_set_count(r_tickptr
,
3829 tcg_temp_free_ptr(r_tickptr
);
3833 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
3837 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
3838 dc
->npc
= DYNAMIC_PC
;
3842 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3843 offsetof(CPUSPARCState
, tl
));
3844 dc
->npc
= DYNAMIC_PC
;
3847 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
3850 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
3853 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3854 offsetof(CPUSPARCState
,
3857 case 11: // canrestore
3858 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3859 offsetof(CPUSPARCState
,
3862 case 12: // cleanwin
3863 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3864 offsetof(CPUSPARCState
,
3867 case 13: // otherwin
3868 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3869 offsetof(CPUSPARCState
,
3873 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3874 offsetof(CPUSPARCState
,
3877 case 16: // UA2005 gl
3878 CHECK_IU_FEATURE(dc
, GL
);
3879 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3880 offsetof(CPUSPARCState
, gl
));
3882 case 26: // UA2005 strand status
3883 CHECK_IU_FEATURE(dc
, HYPV
);
3884 if (!hypervisor(dc
))
3886 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
3892 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
3893 if (dc
->def
->nwindows
!= 32) {
3894 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
3895 (1 << dc
->def
->nwindows
) - 1);
3900 case 0x33: /* wrtbr, UA2005 wrhpr */
3902 #ifndef TARGET_SPARC64
3903 if (!supervisor(dc
))
3905 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
3907 CHECK_IU_FEATURE(dc
, HYPV
);
3908 if (!hypervisor(dc
))
3910 cpu_tmp0
= get_temp_tl(dc
);
3911 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3914 // XXX gen_op_wrhpstate();
3921 // XXX gen_op_wrhtstate();
3924 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
3927 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
3929 case 31: // hstick_cmpr
3933 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
3934 r_tickptr
= tcg_temp_new_ptr();
3935 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3936 offsetof(CPUSPARCState
, hstick
));
3937 gen_helper_tick_set_limit(r_tickptr
,
3939 tcg_temp_free_ptr(r_tickptr
);
3942 case 6: // hver readonly
3950 #ifdef TARGET_SPARC64
3951 case 0x2c: /* V9 movcc */
3953 int cc
= GET_FIELD_SP(insn
, 11, 12);
3954 int cond
= GET_FIELD_SP(insn
, 14, 17);
3958 if (insn
& (1 << 18)) {
3960 gen_compare(&cmp
, 0, cond
, dc
);
3961 } else if (cc
== 2) {
3962 gen_compare(&cmp
, 1, cond
, dc
);
3967 gen_fcompare(&cmp
, cc
, cond
);
3970 /* The get_src2 above loaded the normal 13-bit
3971 immediate field, not the 11-bit field we have
3972 in movcc. But it did handle the reg case. */
3974 simm
= GET_FIELD_SPs(insn
, 0, 10);
3975 tcg_gen_movi_tl(cpu_src2
, simm
);
3978 dst
= gen_load_gpr(dc
, rd
);
3979 tcg_gen_movcond_tl(cmp
.cond
, dst
,
3983 gen_store_gpr(dc
, rd
, dst
);
3986 case 0x2d: /* V9 sdivx */
3987 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3988 gen_store_gpr(dc
, rd
, cpu_dst
);
3990 case 0x2e: /* V9 popc */
3991 gen_helper_popc(cpu_dst
, cpu_src2
);
3992 gen_store_gpr(dc
, rd
, cpu_dst
);
3994 case 0x2f: /* V9 movr */
3996 int cond
= GET_FIELD_SP(insn
, 10, 12);
4000 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4002 /* The get_src2 above loaded the normal 13-bit
4003 immediate field, not the 10-bit field we have
4004 in movr. But it did handle the reg case. */
4006 simm
= GET_FIELD_SPs(insn
, 0, 9);
4007 tcg_gen_movi_tl(cpu_src2
, simm
);
4010 dst
= gen_load_gpr(dc
, rd
);
4011 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4015 gen_store_gpr(dc
, rd
, dst
);
4023 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4024 #ifdef TARGET_SPARC64
4025 int opf
= GET_FIELD_SP(insn
, 5, 13);
4026 rs1
= GET_FIELD(insn
, 13, 17);
4027 rs2
= GET_FIELD(insn
, 27, 31);
4028 if (gen_trap_ifnofpu(dc
)) {
4033 case 0x000: /* VIS I edge8cc */
4034 CHECK_FPU_FEATURE(dc
, VIS1
);
4035 cpu_src1
= gen_load_gpr(dc
, rs1
);
4036 cpu_src2
= gen_load_gpr(dc
, rs2
);
4037 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4038 gen_store_gpr(dc
, rd
, cpu_dst
);
4040 case 0x001: /* VIS II edge8n */
4041 CHECK_FPU_FEATURE(dc
, VIS2
);
4042 cpu_src1
= gen_load_gpr(dc
, rs1
);
4043 cpu_src2
= gen_load_gpr(dc
, rs2
);
4044 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4045 gen_store_gpr(dc
, rd
, cpu_dst
);
4047 case 0x002: /* VIS I edge8lcc */
4048 CHECK_FPU_FEATURE(dc
, VIS1
);
4049 cpu_src1
= gen_load_gpr(dc
, rs1
);
4050 cpu_src2
= gen_load_gpr(dc
, rs2
);
4051 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4052 gen_store_gpr(dc
, rd
, cpu_dst
);
4054 case 0x003: /* VIS II edge8ln */
4055 CHECK_FPU_FEATURE(dc
, VIS2
);
4056 cpu_src1
= gen_load_gpr(dc
, rs1
);
4057 cpu_src2
= gen_load_gpr(dc
, rs2
);
4058 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4059 gen_store_gpr(dc
, rd
, cpu_dst
);
4061 case 0x004: /* VIS I edge16cc */
4062 CHECK_FPU_FEATURE(dc
, VIS1
);
4063 cpu_src1
= gen_load_gpr(dc
, rs1
);
4064 cpu_src2
= gen_load_gpr(dc
, rs2
);
4065 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4066 gen_store_gpr(dc
, rd
, cpu_dst
);
4068 case 0x005: /* VIS II edge16n */
4069 CHECK_FPU_FEATURE(dc
, VIS2
);
4070 cpu_src1
= gen_load_gpr(dc
, rs1
);
4071 cpu_src2
= gen_load_gpr(dc
, rs2
);
4072 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4073 gen_store_gpr(dc
, rd
, cpu_dst
);
4075 case 0x006: /* VIS I edge16lcc */
4076 CHECK_FPU_FEATURE(dc
, VIS1
);
4077 cpu_src1
= gen_load_gpr(dc
, rs1
);
4078 cpu_src2
= gen_load_gpr(dc
, rs2
);
4079 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4080 gen_store_gpr(dc
, rd
, cpu_dst
);
4082 case 0x007: /* VIS II edge16ln */
4083 CHECK_FPU_FEATURE(dc
, VIS2
);
4084 cpu_src1
= gen_load_gpr(dc
, rs1
);
4085 cpu_src2
= gen_load_gpr(dc
, rs2
);
4086 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4087 gen_store_gpr(dc
, rd
, cpu_dst
);
4089 case 0x008: /* VIS I edge32cc */
4090 CHECK_FPU_FEATURE(dc
, VIS1
);
4091 cpu_src1
= gen_load_gpr(dc
, rs1
);
4092 cpu_src2
= gen_load_gpr(dc
, rs2
);
4093 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4094 gen_store_gpr(dc
, rd
, cpu_dst
);
4096 case 0x009: /* VIS II edge32n */
4097 CHECK_FPU_FEATURE(dc
, VIS2
);
4098 cpu_src1
= gen_load_gpr(dc
, rs1
);
4099 cpu_src2
= gen_load_gpr(dc
, rs2
);
4100 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4101 gen_store_gpr(dc
, rd
, cpu_dst
);
4103 case 0x00a: /* VIS I edge32lcc */
4104 CHECK_FPU_FEATURE(dc
, VIS1
);
4105 cpu_src1
= gen_load_gpr(dc
, rs1
);
4106 cpu_src2
= gen_load_gpr(dc
, rs2
);
4107 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4108 gen_store_gpr(dc
, rd
, cpu_dst
);
4110 case 0x00b: /* VIS II edge32ln */
4111 CHECK_FPU_FEATURE(dc
, VIS2
);
4112 cpu_src1
= gen_load_gpr(dc
, rs1
);
4113 cpu_src2
= gen_load_gpr(dc
, rs2
);
4114 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4115 gen_store_gpr(dc
, rd
, cpu_dst
);
4117 case 0x010: /* VIS I array8 */
4118 CHECK_FPU_FEATURE(dc
, VIS1
);
4119 cpu_src1
= gen_load_gpr(dc
, rs1
);
4120 cpu_src2
= gen_load_gpr(dc
, rs2
);
4121 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4122 gen_store_gpr(dc
, rd
, cpu_dst
);
4124 case 0x012: /* VIS I array16 */
4125 CHECK_FPU_FEATURE(dc
, VIS1
);
4126 cpu_src1
= gen_load_gpr(dc
, rs1
);
4127 cpu_src2
= gen_load_gpr(dc
, rs2
);
4128 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4129 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4130 gen_store_gpr(dc
, rd
, cpu_dst
);
4132 case 0x014: /* VIS I array32 */
4133 CHECK_FPU_FEATURE(dc
, VIS1
);
4134 cpu_src1
= gen_load_gpr(dc
, rs1
);
4135 cpu_src2
= gen_load_gpr(dc
, rs2
);
4136 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4137 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4138 gen_store_gpr(dc
, rd
, cpu_dst
);
4140 case 0x018: /* VIS I alignaddr */
4141 CHECK_FPU_FEATURE(dc
, VIS1
);
4142 cpu_src1
= gen_load_gpr(dc
, rs1
);
4143 cpu_src2
= gen_load_gpr(dc
, rs2
);
4144 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4145 gen_store_gpr(dc
, rd
, cpu_dst
);
4147 case 0x01a: /* VIS I alignaddrl */
4148 CHECK_FPU_FEATURE(dc
, VIS1
);
4149 cpu_src1
= gen_load_gpr(dc
, rs1
);
4150 cpu_src2
= gen_load_gpr(dc
, rs2
);
4151 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4152 gen_store_gpr(dc
, rd
, cpu_dst
);
4154 case 0x019: /* VIS II bmask */
4155 CHECK_FPU_FEATURE(dc
, VIS2
);
4156 cpu_src1
= gen_load_gpr(dc
, rs1
);
4157 cpu_src2
= gen_load_gpr(dc
, rs2
);
4158 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4159 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4160 gen_store_gpr(dc
, rd
, cpu_dst
);
4162 case 0x020: /* VIS I fcmple16 */
4163 CHECK_FPU_FEATURE(dc
, VIS1
);
4164 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4165 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4166 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4167 gen_store_gpr(dc
, rd
, cpu_dst
);
4169 case 0x022: /* VIS I fcmpne16 */
4170 CHECK_FPU_FEATURE(dc
, VIS1
);
4171 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4172 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4173 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4174 gen_store_gpr(dc
, rd
, cpu_dst
);
4176 case 0x024: /* VIS I fcmple32 */
4177 CHECK_FPU_FEATURE(dc
, VIS1
);
4178 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4179 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4180 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4181 gen_store_gpr(dc
, rd
, cpu_dst
);
4183 case 0x026: /* VIS I fcmpne32 */
4184 CHECK_FPU_FEATURE(dc
, VIS1
);
4185 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4186 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4187 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4188 gen_store_gpr(dc
, rd
, cpu_dst
);
4190 case 0x028: /* VIS I fcmpgt16 */
4191 CHECK_FPU_FEATURE(dc
, VIS1
);
4192 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4193 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4194 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4195 gen_store_gpr(dc
, rd
, cpu_dst
);
4197 case 0x02a: /* VIS I fcmpeq16 */
4198 CHECK_FPU_FEATURE(dc
, VIS1
);
4199 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4200 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4201 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4202 gen_store_gpr(dc
, rd
, cpu_dst
);
4204 case 0x02c: /* VIS I fcmpgt32 */
4205 CHECK_FPU_FEATURE(dc
, VIS1
);
4206 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4207 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4208 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4209 gen_store_gpr(dc
, rd
, cpu_dst
);
4211 case 0x02e: /* VIS I fcmpeq32 */
4212 CHECK_FPU_FEATURE(dc
, VIS1
);
4213 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4214 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4215 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4216 gen_store_gpr(dc
, rd
, cpu_dst
);
4218 case 0x031: /* VIS I fmul8x16 */
4219 CHECK_FPU_FEATURE(dc
, VIS1
);
4220 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4222 case 0x033: /* VIS I fmul8x16au */
4223 CHECK_FPU_FEATURE(dc
, VIS1
);
4224 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4226 case 0x035: /* VIS I fmul8x16al */
4227 CHECK_FPU_FEATURE(dc
, VIS1
);
4228 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4230 case 0x036: /* VIS I fmul8sux16 */
4231 CHECK_FPU_FEATURE(dc
, VIS1
);
4232 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4234 case 0x037: /* VIS I fmul8ulx16 */
4235 CHECK_FPU_FEATURE(dc
, VIS1
);
4236 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4238 case 0x038: /* VIS I fmuld8sux16 */
4239 CHECK_FPU_FEATURE(dc
, VIS1
);
4240 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4242 case 0x039: /* VIS I fmuld8ulx16 */
4243 CHECK_FPU_FEATURE(dc
, VIS1
);
4244 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4246 case 0x03a: /* VIS I fpack32 */
4247 CHECK_FPU_FEATURE(dc
, VIS1
);
4248 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4250 case 0x03b: /* VIS I fpack16 */
4251 CHECK_FPU_FEATURE(dc
, VIS1
);
4252 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4253 cpu_dst_32
= gen_dest_fpr_F(dc
);
4254 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4255 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4257 case 0x03d: /* VIS I fpackfix */
4258 CHECK_FPU_FEATURE(dc
, VIS1
);
4259 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4260 cpu_dst_32
= gen_dest_fpr_F(dc
);
4261 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4262 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4264 case 0x03e: /* VIS I pdist */
4265 CHECK_FPU_FEATURE(dc
, VIS1
);
4266 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4268 case 0x048: /* VIS I faligndata */
4269 CHECK_FPU_FEATURE(dc
, VIS1
);
4270 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4272 case 0x04b: /* VIS I fpmerge */
4273 CHECK_FPU_FEATURE(dc
, VIS1
);
4274 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4276 case 0x04c: /* VIS II bshuffle */
4277 CHECK_FPU_FEATURE(dc
, VIS2
);
4278 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4280 case 0x04d: /* VIS I fexpand */
4281 CHECK_FPU_FEATURE(dc
, VIS1
);
4282 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4284 case 0x050: /* VIS I fpadd16 */
4285 CHECK_FPU_FEATURE(dc
, VIS1
);
4286 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4288 case 0x051: /* VIS I fpadd16s */
4289 CHECK_FPU_FEATURE(dc
, VIS1
);
4290 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4292 case 0x052: /* VIS I fpadd32 */
4293 CHECK_FPU_FEATURE(dc
, VIS1
);
4294 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4296 case 0x053: /* VIS I fpadd32s */
4297 CHECK_FPU_FEATURE(dc
, VIS1
);
4298 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4300 case 0x054: /* VIS I fpsub16 */
4301 CHECK_FPU_FEATURE(dc
, VIS1
);
4302 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4304 case 0x055: /* VIS I fpsub16s */
4305 CHECK_FPU_FEATURE(dc
, VIS1
);
4306 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4308 case 0x056: /* VIS I fpsub32 */
4309 CHECK_FPU_FEATURE(dc
, VIS1
);
4310 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4312 case 0x057: /* VIS I fpsub32s */
4313 CHECK_FPU_FEATURE(dc
, VIS1
);
4314 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4316 case 0x060: /* VIS I fzero */
4317 CHECK_FPU_FEATURE(dc
, VIS1
);
4318 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4319 tcg_gen_movi_i64(cpu_dst_64
, 0);
4320 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4322 case 0x061: /* VIS I fzeros */
4323 CHECK_FPU_FEATURE(dc
, VIS1
);
4324 cpu_dst_32
= gen_dest_fpr_F(dc
);
4325 tcg_gen_movi_i32(cpu_dst_32
, 0);
4326 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4328 case 0x062: /* VIS I fnor */
4329 CHECK_FPU_FEATURE(dc
, VIS1
);
4330 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4332 case 0x063: /* VIS I fnors */
4333 CHECK_FPU_FEATURE(dc
, VIS1
);
4334 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4336 case 0x064: /* VIS I fandnot2 */
4337 CHECK_FPU_FEATURE(dc
, VIS1
);
4338 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4340 case 0x065: /* VIS I fandnot2s */
4341 CHECK_FPU_FEATURE(dc
, VIS1
);
4342 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4344 case 0x066: /* VIS I fnot2 */
4345 CHECK_FPU_FEATURE(dc
, VIS1
);
4346 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4348 case 0x067: /* VIS I fnot2s */
4349 CHECK_FPU_FEATURE(dc
, VIS1
);
4350 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4352 case 0x068: /* VIS I fandnot1 */
4353 CHECK_FPU_FEATURE(dc
, VIS1
);
4354 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4356 case 0x069: /* VIS I fandnot1s */
4357 CHECK_FPU_FEATURE(dc
, VIS1
);
4358 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4360 case 0x06a: /* VIS I fnot1 */
4361 CHECK_FPU_FEATURE(dc
, VIS1
);
4362 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4364 case 0x06b: /* VIS I fnot1s */
4365 CHECK_FPU_FEATURE(dc
, VIS1
);
4366 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4368 case 0x06c: /* VIS I fxor */
4369 CHECK_FPU_FEATURE(dc
, VIS1
);
4370 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4372 case 0x06d: /* VIS I fxors */
4373 CHECK_FPU_FEATURE(dc
, VIS1
);
4374 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4376 case 0x06e: /* VIS I fnand */
4377 CHECK_FPU_FEATURE(dc
, VIS1
);
4378 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4380 case 0x06f: /* VIS I fnands */
4381 CHECK_FPU_FEATURE(dc
, VIS1
);
4382 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4384 case 0x070: /* VIS I fand */
4385 CHECK_FPU_FEATURE(dc
, VIS1
);
4386 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4388 case 0x071: /* VIS I fands */
4389 CHECK_FPU_FEATURE(dc
, VIS1
);
4390 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4392 case 0x072: /* VIS I fxnor */
4393 CHECK_FPU_FEATURE(dc
, VIS1
);
4394 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4396 case 0x073: /* VIS I fxnors */
4397 CHECK_FPU_FEATURE(dc
, VIS1
);
4398 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4400 case 0x074: /* VIS I fsrc1 */
4401 CHECK_FPU_FEATURE(dc
, VIS1
);
4402 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4403 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4405 case 0x075: /* VIS I fsrc1s */
4406 CHECK_FPU_FEATURE(dc
, VIS1
);
4407 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4408 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4410 case 0x076: /* VIS I fornot2 */
4411 CHECK_FPU_FEATURE(dc
, VIS1
);
4412 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4414 case 0x077: /* VIS I fornot2s */
4415 CHECK_FPU_FEATURE(dc
, VIS1
);
4416 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4418 case 0x078: /* VIS I fsrc2 */
4419 CHECK_FPU_FEATURE(dc
, VIS1
);
4420 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4421 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4423 case 0x079: /* VIS I fsrc2s */
4424 CHECK_FPU_FEATURE(dc
, VIS1
);
4425 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4426 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4428 case 0x07a: /* VIS I fornot1 */
4429 CHECK_FPU_FEATURE(dc
, VIS1
);
4430 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4432 case 0x07b: /* VIS I fornot1s */
4433 CHECK_FPU_FEATURE(dc
, VIS1
);
4434 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4436 case 0x07c: /* VIS I for */
4437 CHECK_FPU_FEATURE(dc
, VIS1
);
4438 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4440 case 0x07d: /* VIS I fors */
4441 CHECK_FPU_FEATURE(dc
, VIS1
);
4442 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4444 case 0x07e: /* VIS I fone */
4445 CHECK_FPU_FEATURE(dc
, VIS1
);
4446 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4447 tcg_gen_movi_i64(cpu_dst_64
, -1);
4448 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4450 case 0x07f: /* VIS I fones */
4451 CHECK_FPU_FEATURE(dc
, VIS1
);
4452 cpu_dst_32
= gen_dest_fpr_F(dc
);
4453 tcg_gen_movi_i32(cpu_dst_32
, -1);
4454 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4456 case 0x080: /* VIS I shutdown */
4457 case 0x081: /* VIS II siam */
4466 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
4467 #ifdef TARGET_SPARC64
4472 #ifdef TARGET_SPARC64
4473 } else if (xop
== 0x39) { /* V9 return */
4477 cpu_src1
= get_src1(dc
, insn
);
4478 cpu_tmp0
= get_temp_tl(dc
);
4479 if (IS_IMM
) { /* immediate */
4480 simm
= GET_FIELDs(insn
, 19, 31);
4481 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4482 } else { /* register */
4483 rs2
= GET_FIELD(insn
, 27, 31);
4485 cpu_src2
= gen_load_gpr(dc
, rs2
);
4486 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4488 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4491 gen_helper_restore(cpu_env
);
4493 r_const
= tcg_const_i32(3);
4494 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4495 tcg_temp_free_i32(r_const
);
4496 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4497 dc
->npc
= DYNAMIC_PC
;
4501 cpu_src1
= get_src1(dc
, insn
);
4502 cpu_tmp0
= get_temp_tl(dc
);
4503 if (IS_IMM
) { /* immediate */
4504 simm
= GET_FIELDs(insn
, 19, 31);
4505 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4506 } else { /* register */
4507 rs2
= GET_FIELD(insn
, 27, 31);
4509 cpu_src2
= gen_load_gpr(dc
, rs2
);
4510 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4512 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4516 case 0x38: /* jmpl */
4521 t
= gen_dest_gpr(dc
, rd
);
4522 tcg_gen_movi_tl(t
, dc
->pc
);
4523 gen_store_gpr(dc
, rd
, t
);
4525 r_const
= tcg_const_i32(3);
4526 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4527 tcg_temp_free_i32(r_const
);
4528 gen_address_mask(dc
, cpu_tmp0
);
4529 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4530 dc
->npc
= DYNAMIC_PC
;
4533 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4534 case 0x39: /* rett, V9 return */
4538 if (!supervisor(dc
))
4541 r_const
= tcg_const_i32(3);
4542 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4543 tcg_temp_free_i32(r_const
);
4544 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4545 dc
->npc
= DYNAMIC_PC
;
4546 gen_helper_rett(cpu_env
);
4550 case 0x3b: /* flush */
4551 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
4555 case 0x3c: /* save */
4557 gen_helper_save(cpu_env
);
4558 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4560 case 0x3d: /* restore */
4562 gen_helper_restore(cpu_env
);
4563 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4565 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4566 case 0x3e: /* V9 done/retry */
4570 if (!supervisor(dc
))
4572 dc
->npc
= DYNAMIC_PC
;
4573 dc
->pc
= DYNAMIC_PC
;
4574 gen_helper_done(cpu_env
);
4577 if (!supervisor(dc
))
4579 dc
->npc
= DYNAMIC_PC
;
4580 dc
->pc
= DYNAMIC_PC
;
4581 gen_helper_retry(cpu_env
);
4596 case 3: /* load/store instructions */
4598 unsigned int xop
= GET_FIELD(insn
, 7, 12);
4599 /* ??? gen_address_mask prevents us from using a source
4600 register directly. Always generate a temporary. */
4601 TCGv cpu_addr
= get_temp_tl(dc
);
4603 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
4604 if (xop
== 0x3c || xop
== 0x3e) {
4605 /* V9 casa/casxa : no offset */
4606 } else if (IS_IMM
) { /* immediate */
4607 simm
= GET_FIELDs(insn
, 19, 31);
4609 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
4611 } else { /* register */
4612 rs2
= GET_FIELD(insn
, 27, 31);
4614 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
4617 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
4618 (xop
> 0x17 && xop
<= 0x1d ) ||
4619 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
4620 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
4623 case 0x0: /* ld, V9 lduw, load unsigned word */
4624 gen_address_mask(dc
, cpu_addr
);
4625 tcg_gen_qemu_ld32u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4627 case 0x1: /* ldub, load unsigned byte */
4628 gen_address_mask(dc
, cpu_addr
);
4629 tcg_gen_qemu_ld8u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4631 case 0x2: /* lduh, load unsigned halfword */
4632 gen_address_mask(dc
, cpu_addr
);
4633 tcg_gen_qemu_ld16u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4635 case 0x3: /* ldd, load double word */
4643 r_const
= tcg_const_i32(7);
4644 /* XXX remove alignment check */
4645 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4646 tcg_temp_free_i32(r_const
);
4647 gen_address_mask(dc
, cpu_addr
);
4648 t64
= tcg_temp_new_i64();
4649 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4650 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4651 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4652 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
4653 tcg_gen_shri_i64(t64
, t64
, 32);
4654 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4655 tcg_temp_free_i64(t64
);
4656 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4659 case 0x9: /* ldsb, load signed byte */
4660 gen_address_mask(dc
, cpu_addr
);
4661 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4663 case 0xa: /* ldsh, load signed halfword */
4664 gen_address_mask(dc
, cpu_addr
);
4665 tcg_gen_qemu_ld16s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4667 case 0xd: /* ldstub -- XXX: should be atomically */
4671 gen_address_mask(dc
, cpu_addr
);
4672 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4673 r_const
= tcg_const_tl(0xff);
4674 tcg_gen_qemu_st8(r_const
, cpu_addr
, dc
->mem_idx
);
4675 tcg_temp_free(r_const
);
4679 /* swap, swap register with memory. Also atomically */
4681 TCGv t0
= get_temp_tl(dc
);
4682 CHECK_IU_FEATURE(dc
, SWAP
);
4683 cpu_src1
= gen_load_gpr(dc
, rd
);
4684 gen_address_mask(dc
, cpu_addr
);
4685 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4686 tcg_gen_qemu_st32(cpu_src1
, cpu_addr
, dc
->mem_idx
);
4687 tcg_gen_mov_tl(cpu_val
, t0
);
4690 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4691 case 0x10: /* lda, V9 lduwa, load word alternate */
4692 #ifndef TARGET_SPARC64
4695 if (!supervisor(dc
))
4699 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 0);
4701 case 0x11: /* lduba, load unsigned byte alternate */
4702 #ifndef TARGET_SPARC64
4705 if (!supervisor(dc
))
4709 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 0);
4711 case 0x12: /* lduha, load unsigned halfword alternate */
4712 #ifndef TARGET_SPARC64
4715 if (!supervisor(dc
))
4719 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 0);
4721 case 0x13: /* ldda, load double word alternate */
4722 #ifndef TARGET_SPARC64
4725 if (!supervisor(dc
))
4731 gen_ldda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4733 case 0x19: /* ldsba, load signed byte alternate */
4734 #ifndef TARGET_SPARC64
4737 if (!supervisor(dc
))
4741 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 1);
4743 case 0x1a: /* ldsha, load signed halfword alternate */
4744 #ifndef TARGET_SPARC64
4747 if (!supervisor(dc
))
4751 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 1);
4753 case 0x1d: /* ldstuba -- XXX: should be atomically */
4754 #ifndef TARGET_SPARC64
4757 if (!supervisor(dc
))
4761 gen_ldstub_asi(cpu_val
, cpu_addr
, insn
);
4763 case 0x1f: /* swapa, swap reg with alt. memory. Also
4765 CHECK_IU_FEATURE(dc
, SWAP
);
4766 #ifndef TARGET_SPARC64
4769 if (!supervisor(dc
))
4773 cpu_src1
= gen_load_gpr(dc
, rd
);
4774 gen_swap_asi(cpu_val
, cpu_src1
, cpu_addr
, insn
);
4777 #ifndef TARGET_SPARC64
4778 case 0x30: /* ldc */
4779 case 0x31: /* ldcsr */
4780 case 0x33: /* lddc */
4784 #ifdef TARGET_SPARC64
4785 case 0x08: /* V9 ldsw */
4786 gen_address_mask(dc
, cpu_addr
);
4787 tcg_gen_qemu_ld32s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4789 case 0x0b: /* V9 ldx */
4790 gen_address_mask(dc
, cpu_addr
);
4791 tcg_gen_qemu_ld64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4793 case 0x18: /* V9 ldswa */
4795 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 1);
4797 case 0x1b: /* V9 ldxa */
4799 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 8, 0);
4801 case 0x2d: /* V9 prefetch, no effect */
4803 case 0x30: /* V9 ldfa */
4804 if (gen_trap_ifnofpu(dc
)) {
4808 gen_ldf_asi(cpu_addr
, insn
, 4, rd
);
4809 gen_update_fprs_dirty(rd
);
4811 case 0x33: /* V9 lddfa */
4812 if (gen_trap_ifnofpu(dc
)) {
4816 gen_ldf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
4817 gen_update_fprs_dirty(DFPREG(rd
));
4819 case 0x3d: /* V9 prefetcha, no effect */
4821 case 0x32: /* V9 ldqfa */
4822 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4823 if (gen_trap_ifnofpu(dc
)) {
4827 gen_ldf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
4828 gen_update_fprs_dirty(QFPREG(rd
));
4834 gen_store_gpr(dc
, rd
, cpu_val
);
4835 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4838 } else if (xop
>= 0x20 && xop
< 0x24) {
4841 if (gen_trap_ifnofpu(dc
)) {
4846 case 0x20: /* ldf, load fpreg */
4847 gen_address_mask(dc
, cpu_addr
);
4848 t0
= get_temp_tl(dc
);
4849 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4850 cpu_dst_32
= gen_dest_fpr_F(dc
);
4851 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4852 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4854 case 0x21: /* ldfsr, V9 ldxfsr */
4855 #ifdef TARGET_SPARC64
4856 gen_address_mask(dc
, cpu_addr
);
4858 TCGv_i64 t64
= tcg_temp_new_i64();
4859 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4860 gen_helper_ldxfsr(cpu_env
, t64
);
4861 tcg_temp_free_i64(t64
);
4865 cpu_dst_32
= get_temp_i32(dc
);
4866 t0
= get_temp_tl(dc
);
4867 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4868 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4869 gen_helper_ldfsr(cpu_env
, cpu_dst_32
);
4871 case 0x22: /* ldqf, load quad fpreg */
4875 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4876 r_const
= tcg_const_i32(dc
->mem_idx
);
4877 gen_address_mask(dc
, cpu_addr
);
4878 gen_helper_ldqf(cpu_env
, cpu_addr
, r_const
);
4879 tcg_temp_free_i32(r_const
);
4880 gen_op_store_QT0_fpr(QFPREG(rd
));
4881 gen_update_fprs_dirty(QFPREG(rd
));
4884 case 0x23: /* lddf, load double fpreg */
4885 gen_address_mask(dc
, cpu_addr
);
4886 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4887 tcg_gen_qemu_ld64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
);
4888 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4893 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
4894 xop
== 0xe || xop
== 0x1e) {
4895 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
4898 case 0x4: /* st, store word */
4899 gen_address_mask(dc
, cpu_addr
);
4900 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
4902 case 0x5: /* stb, store byte */
4903 gen_address_mask(dc
, cpu_addr
);
4904 tcg_gen_qemu_st8(cpu_val
, cpu_addr
, dc
->mem_idx
);
4906 case 0x6: /* sth, store halfword */
4907 gen_address_mask(dc
, cpu_addr
);
4908 tcg_gen_qemu_st16(cpu_val
, cpu_addr
, dc
->mem_idx
);
4910 case 0x7: /* std, store double word */
4919 gen_address_mask(dc
, cpu_addr
);
4920 r_const
= tcg_const_i32(7);
4921 /* XXX remove alignment check */
4922 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4923 tcg_temp_free_i32(r_const
);
4924 lo
= gen_load_gpr(dc
, rd
+ 1);
4926 t64
= tcg_temp_new_i64();
4927 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
4928 tcg_gen_qemu_st64(t64
, cpu_addr
, dc
->mem_idx
);
4929 tcg_temp_free_i64(t64
);
4932 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4933 case 0x14: /* sta, V9 stwa, store word alternate */
4934 #ifndef TARGET_SPARC64
4937 if (!supervisor(dc
))
4941 gen_st_asi(cpu_val
, cpu_addr
, insn
, 4);
4942 dc
->npc
= DYNAMIC_PC
;
4944 case 0x15: /* stba, store byte alternate */
4945 #ifndef TARGET_SPARC64
4948 if (!supervisor(dc
))
4952 gen_st_asi(cpu_val
, cpu_addr
, insn
, 1);
4953 dc
->npc
= DYNAMIC_PC
;
4955 case 0x16: /* stha, store halfword alternate */
4956 #ifndef TARGET_SPARC64
4959 if (!supervisor(dc
))
4963 gen_st_asi(cpu_val
, cpu_addr
, insn
, 2);
4964 dc
->npc
= DYNAMIC_PC
;
4966 case 0x17: /* stda, store double word alternate */
4967 #ifndef TARGET_SPARC64
4970 if (!supervisor(dc
))
4977 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4981 #ifdef TARGET_SPARC64
4982 case 0x0e: /* V9 stx */
4983 gen_address_mask(dc
, cpu_addr
);
4984 tcg_gen_qemu_st64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4986 case 0x1e: /* V9 stxa */
4988 gen_st_asi(cpu_val
, cpu_addr
, insn
, 8);
4989 dc
->npc
= DYNAMIC_PC
;
4995 } else if (xop
> 0x23 && xop
< 0x28) {
4996 if (gen_trap_ifnofpu(dc
)) {
5001 case 0x24: /* stf, store fpreg */
5003 TCGv t
= get_temp_tl(dc
);
5004 gen_address_mask(dc
, cpu_addr
);
5005 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5006 tcg_gen_ext_i32_tl(t
, cpu_src1_32
);
5007 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5010 case 0x25: /* stfsr, V9 stxfsr */
5012 TCGv t
= get_temp_tl(dc
);
5014 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUSPARCState
, fsr
));
5015 #ifdef TARGET_SPARC64
5016 gen_address_mask(dc
, cpu_addr
);
5018 tcg_gen_qemu_st64(t
, cpu_addr
, dc
->mem_idx
);
5022 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5026 #ifdef TARGET_SPARC64
5027 /* V9 stqf, store quad fpreg */
5031 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5032 gen_op_load_fpr_QT0(QFPREG(rd
));
5033 r_const
= tcg_const_i32(dc
->mem_idx
);
5034 gen_address_mask(dc
, cpu_addr
);
5035 gen_helper_stqf(cpu_env
, cpu_addr
, r_const
);
5036 tcg_temp_free_i32(r_const
);
5039 #else /* !TARGET_SPARC64 */
5040 /* stdfq, store floating point queue */
5041 #if defined(CONFIG_USER_ONLY)
5044 if (!supervisor(dc
))
5046 if (gen_trap_ifnofpu(dc
)) {
5052 case 0x27: /* stdf, store double fpreg */
5053 gen_address_mask(dc
, cpu_addr
);
5054 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5055 tcg_gen_qemu_st64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
);
5060 } else if (xop
> 0x33 && xop
< 0x3f) {
5063 #ifdef TARGET_SPARC64
5064 case 0x34: /* V9 stfa */
5065 if (gen_trap_ifnofpu(dc
)) {
5068 gen_stf_asi(cpu_addr
, insn
, 4, rd
);
5070 case 0x36: /* V9 stqfa */
5074 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5075 if (gen_trap_ifnofpu(dc
)) {
5078 r_const
= tcg_const_i32(7);
5079 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
5080 tcg_temp_free_i32(r_const
);
5081 gen_stf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
5084 case 0x37: /* V9 stdfa */
5085 if (gen_trap_ifnofpu(dc
)) {
5088 gen_stf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
5090 case 0x3e: /* V9 casxa */
5091 rs2
= GET_FIELD(insn
, 27, 31);
5092 cpu_src2
= gen_load_gpr(dc
, rs2
);
5093 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5096 case 0x34: /* stc */
5097 case 0x35: /* stcsr */
5098 case 0x36: /* stdcq */
5099 case 0x37: /* stdc */
5102 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5103 case 0x3c: /* V9 or LEON3 casa */
5104 #ifndef TARGET_SPARC64
5105 CHECK_IU_FEATURE(dc
, CASA
);
5109 /* LEON3 allows CASA from user space with ASI 0xa */
5110 if ((GET_FIELD(insn
, 19, 26) != 0xa) && !supervisor(dc
)) {
5114 rs2
= GET_FIELD(insn
, 27, 31);
5115 cpu_src2
= gen_load_gpr(dc
, rs2
);
5116 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5128 /* default case for non jump instructions */
5129 if (dc
->npc
== DYNAMIC_PC
) {
5130 dc
->pc
= DYNAMIC_PC
;
5132 } else if (dc
->npc
== JUMP_PC
) {
5133 /* we can do a static jump */
5134 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5138 dc
->npc
= dc
->npc
+ 4;
5147 r_const
= tcg_const_i32(TT_ILL_INSN
);
5148 gen_helper_raise_exception(cpu_env
, r_const
);
5149 tcg_temp_free_i32(r_const
);
5158 r_const
= tcg_const_i32(TT_UNIMP_FLUSH
);
5159 gen_helper_raise_exception(cpu_env
, r_const
);
5160 tcg_temp_free_i32(r_const
);
5164 #if !defined(CONFIG_USER_ONLY)
5170 r_const
= tcg_const_i32(TT_PRIV_INSN
);
5171 gen_helper_raise_exception(cpu_env
, r_const
);
5172 tcg_temp_free_i32(r_const
);
5179 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP
);
5182 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5185 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR
);
5189 #ifndef TARGET_SPARC64
5195 r_const
= tcg_const_i32(TT_NCP_INSN
);
5196 gen_helper_raise_exception(cpu_env
, r_const
);
5197 tcg_temp_free(r_const
);
5203 if (dc
->n_t32
!= 0) {
5205 for (i
= dc
->n_t32
- 1; i
>= 0; --i
) {
5206 tcg_temp_free_i32(dc
->t32
[i
]);
5210 if (dc
->n_ttl
!= 0) {
5212 for (i
= dc
->n_ttl
- 1; i
>= 0; --i
) {
5213 tcg_temp_free(dc
->ttl
[i
]);
5219 void gen_intermediate_code(CPUSPARCState
* env
, TranslationBlock
* tb
)
5221 SPARCCPU
*cpu
= sparc_env_get_cpu(env
);
5222 CPUState
*cs
= CPU(cpu
);
5223 target_ulong pc_start
, last_pc
;
5224 DisasContext dc1
, *dc
= &dc1
;
5229 memset(dc
, 0, sizeof(DisasContext
));
5234 dc
->npc
= (target_ulong
) tb
->cs_base
;
5235 dc
->cc_op
= CC_OP_DYNAMIC
;
5236 dc
->mem_idx
= cpu_mmu_index(env
, false);
5238 dc
->fpu_enabled
= tb_fpu_enabled(tb
->flags
);
5239 dc
->address_mask_32bit
= tb_am_enabled(tb
->flags
);
5240 dc
->singlestep
= (cs
->singlestep_enabled
|| singlestep
);
5243 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5244 if (max_insns
== 0) {
5245 max_insns
= CF_COUNT_MASK
;
5247 if (max_insns
> TCG_MAX_INSNS
) {
5248 max_insns
= TCG_MAX_INSNS
;
5253 if (dc
->npc
& JUMP_PC
) {
5254 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5255 tcg_gen_insn_start(dc
->pc
, dc
->jump_pc
[0] | JUMP_PC
);
5257 tcg_gen_insn_start(dc
->pc
, dc
->npc
);
5262 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
5263 if (dc
->pc
!= pc_start
) {
5266 gen_helper_debug(cpu_env
);
5272 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5276 insn
= cpu_ldl_code(env
, dc
->pc
);
5278 disas_sparc_insn(dc
, insn
);
5282 /* if the next PC is different, we abort now */
5283 if (dc
->pc
!= (last_pc
+ 4))
5285 /* if we reach a page boundary, we stop generation so that the
5286 PC of a TT_TFAULT exception is always in the right page */
5287 if ((dc
->pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
5289 /* if single step mode, we generate only one instruction and
5290 generate an exception */
5291 if (dc
->singlestep
) {
5294 } while (!tcg_op_buf_full() &&
5295 (dc
->pc
- pc_start
) < (TARGET_PAGE_SIZE
- 32) &&
5296 num_insns
< max_insns
);
5299 if (tb
->cflags
& CF_LAST_IO
) {
5303 if (dc
->pc
!= DYNAMIC_PC
&&
5304 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
5305 /* static PC and NPC: we can use direct chaining */
5306 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5308 if (dc
->pc
!= DYNAMIC_PC
) {
5309 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5315 gen_tb_end(tb
, num_insns
);
5317 tb
->size
= last_pc
+ 4 - pc_start
;
5318 tb
->icount
= num_insns
;
5321 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5322 qemu_log("--------------\n");
5323 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5324 log_target_disas(cs
, pc_start
, last_pc
+ 4 - pc_start
, 0);
5330 void gen_intermediate_code_init(CPUSPARCState
*env
)
5334 static const char * const gregnames
[8] = {
5335 NULL
, // g0 not used
5344 static const char * const fregnames
[32] = {
5345 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5346 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5347 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5348 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5351 /* init various static tables */
5355 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
5356 cpu_regwptr
= tcg_global_mem_new_ptr(cpu_env
,
5357 offsetof(CPUSPARCState
, regwptr
),
5359 #ifdef TARGET_SPARC64
5360 cpu_xcc
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUSPARCState
, xcc
),
5362 cpu_asi
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUSPARCState
, asi
),
5364 cpu_fprs
= tcg_global_mem_new_i32(cpu_env
,
5365 offsetof(CPUSPARCState
, fprs
),
5367 cpu_gsr
= tcg_global_mem_new(cpu_env
, offsetof(CPUSPARCState
, gsr
),
5369 cpu_tick_cmpr
= tcg_global_mem_new(cpu_env
,
5370 offsetof(CPUSPARCState
, tick_cmpr
),
5372 cpu_stick_cmpr
= tcg_global_mem_new(cpu_env
,
5373 offsetof(CPUSPARCState
, stick_cmpr
),
5375 cpu_hstick_cmpr
= tcg_global_mem_new(cpu_env
,
5376 offsetof(CPUSPARCState
, hstick_cmpr
),
5378 cpu_hintp
= tcg_global_mem_new(cpu_env
, offsetof(CPUSPARCState
, hintp
),
5380 cpu_htba
= tcg_global_mem_new(cpu_env
, offsetof(CPUSPARCState
, htba
),
5382 cpu_hver
= tcg_global_mem_new(cpu_env
, offsetof(CPUSPARCState
, hver
),
5384 cpu_ssr
= tcg_global_mem_new(cpu_env
,
5385 offsetof(CPUSPARCState
, ssr
), "ssr");
5386 cpu_ver
= tcg_global_mem_new(cpu_env
,
5387 offsetof(CPUSPARCState
, version
), "ver");
5388 cpu_softint
= tcg_global_mem_new_i32(cpu_env
,
5389 offsetof(CPUSPARCState
, softint
),
5392 cpu_wim
= tcg_global_mem_new(cpu_env
, offsetof(CPUSPARCState
, wim
),
5395 cpu_cond
= tcg_global_mem_new(cpu_env
, offsetof(CPUSPARCState
, cond
),
5397 cpu_cc_src
= tcg_global_mem_new(cpu_env
,
5398 offsetof(CPUSPARCState
, cc_src
),
5400 cpu_cc_src2
= tcg_global_mem_new(cpu_env
,
5401 offsetof(CPUSPARCState
, cc_src2
),
5403 cpu_cc_dst
= tcg_global_mem_new(cpu_env
,
5404 offsetof(CPUSPARCState
, cc_dst
),
5406 cpu_cc_op
= tcg_global_mem_new_i32(cpu_env
,
5407 offsetof(CPUSPARCState
, cc_op
),
5409 cpu_psr
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUSPARCState
, psr
),
5411 cpu_fsr
= tcg_global_mem_new(cpu_env
, offsetof(CPUSPARCState
, fsr
),
5413 cpu_pc
= tcg_global_mem_new(cpu_env
, offsetof(CPUSPARCState
, pc
),
5415 cpu_npc
= tcg_global_mem_new(cpu_env
, offsetof(CPUSPARCState
, npc
),
5417 cpu_y
= tcg_global_mem_new(cpu_env
, offsetof(CPUSPARCState
, y
), "y");
5418 #ifndef CONFIG_USER_ONLY
5419 cpu_tbr
= tcg_global_mem_new(cpu_env
, offsetof(CPUSPARCState
, tbr
),
5422 for (i
= 1; i
< 8; i
++) {
5423 cpu_gregs
[i
] = tcg_global_mem_new(cpu_env
,
5424 offsetof(CPUSPARCState
, gregs
[i
]),
5427 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5428 cpu_fpr
[i
] = tcg_global_mem_new_i64(cpu_env
,
5429 offsetof(CPUSPARCState
, fpr
[i
]),
5435 void restore_state_to_opc(CPUSPARCState
*env
, TranslationBlock
*tb
,
5438 target_ulong pc
= data
[0];
5439 target_ulong npc
= data
[1];
5442 if (npc
== DYNAMIC_PC
) {
5443 /* dynamic NPC: already stored */
5444 } else if (npc
& JUMP_PC
) {
5445 /* jump PC: use 'cond' and the jump targets of the translation */
5447 env
->npc
= npc
& ~3;