4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "disas/disas.h"
29 #include "exec/helper-proto.h"
31 #include "exec/cpu_ldst.h"
33 #include "exec/helper-gen.h"
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
41 /* global register indexes */
42 static TCGv_ptr cpu_env
, cpu_regwptr
;
43 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
44 static TCGv_i32 cpu_cc_op
;
45 static TCGv_i32 cpu_psr
;
46 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
, cpu_gregs
[8];
48 #ifndef CONFIG_USER_ONLY
53 static TCGv_i32 cpu_xcc
, cpu_asi
, cpu_fprs
;
55 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
56 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
57 static TCGv_i32 cpu_softint
;
61 /* Floating point registers */
62 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
64 static target_ulong gen_opc_npc
[OPC_BUF_SIZE
];
65 static target_ulong gen_opc_jump_pc
[2];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext
{
70 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
76 int address_mask_32bit
;
78 uint32_t cc_op
; /* current CC operation */
79 struct TranslationBlock
*tb
;
94 // This function uses non-native bit order
95 #define GET_FIELD(X, FROM, TO) \
96 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
98 // This function uses the order in the manuals, i.e. bit 0 is 2^0
99 #define GET_FIELD_SP(X, FROM, TO) \
100 GET_FIELD(X, 31 - (TO), 31 - (FROM))
102 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
103 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
105 #ifdef TARGET_SPARC64
106 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
107 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
109 #define DFPREG(r) (r & 0x1e)
110 #define QFPREG(r) (r & 0x1c)
113 #define UA2005_HTRAP_MASK 0xff
114 #define V8_TRAP_MASK 0x7f
116 static int sign_extend(int x
, int len
)
119 return (x
<< len
) >> len
;
122 #define IS_IMM (insn & (1<<13))
124 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
127 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
128 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
132 static inline TCGv
get_temp_tl(DisasContext
*dc
)
135 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
136 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
140 static inline void gen_update_fprs_dirty(int rd
)
142 #if defined(TARGET_SPARC64)
143 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, (rd
< 32) ? 1 : 2);
147 /* floating point registers moves */
148 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
150 #if TCG_TARGET_REG_BITS == 32
152 return TCGV_LOW(cpu_fpr
[src
/ 2]);
154 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
158 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr
[src
/ 2]));
160 TCGv_i32 ret
= get_temp_i32(dc
);
161 TCGv_i64 t
= tcg_temp_new_i64();
163 tcg_gen_shri_i64(t
, cpu_fpr
[src
/ 2], 32);
164 tcg_gen_trunc_i64_i32(ret
, t
);
165 tcg_temp_free_i64(t
);
172 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
174 #if TCG_TARGET_REG_BITS == 32
176 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
178 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
181 TCGv_i64 t
= MAKE_TCGV_I64(GET_TCGV_I32(v
));
182 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
183 (dst
& 1 ? 0 : 32), 32);
185 gen_update_fprs_dirty(dst
);
188 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
190 return get_temp_i32(dc
);
193 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
196 return cpu_fpr
[src
/ 2];
199 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
202 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
203 gen_update_fprs_dirty(dst
);
206 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
208 return cpu_fpr
[DFPREG(dst
) / 2];
211 static void gen_op_load_fpr_QT0(unsigned int src
)
213 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
214 offsetof(CPU_QuadU
, ll
.upper
));
215 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
216 offsetof(CPU_QuadU
, ll
.lower
));
219 static void gen_op_load_fpr_QT1(unsigned int src
)
221 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
222 offsetof(CPU_QuadU
, ll
.upper
));
223 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
224 offsetof(CPU_QuadU
, ll
.lower
));
227 static void gen_op_store_QT0_fpr(unsigned int dst
)
229 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
230 offsetof(CPU_QuadU
, ll
.upper
));
231 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
232 offsetof(CPU_QuadU
, ll
.lower
));
235 #ifdef TARGET_SPARC64
236 static void gen_move_Q(unsigned int rd
, unsigned int rs
)
241 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
242 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
243 gen_update_fprs_dirty(rd
);
248 #ifdef CONFIG_USER_ONLY
249 #define supervisor(dc) 0
250 #ifdef TARGET_SPARC64
251 #define hypervisor(dc) 0
254 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
255 #ifdef TARGET_SPARC64
256 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
261 #ifdef TARGET_SPARC64
263 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
265 #define AM_CHECK(dc) (1)
269 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
271 #ifdef TARGET_SPARC64
273 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
277 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
279 if (reg
== 0 || reg
>= 8) {
280 TCGv t
= get_temp_tl(dc
);
282 tcg_gen_movi_tl(t
, 0);
284 tcg_gen_ld_tl(t
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
288 return cpu_gregs
[reg
];
292 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
296 tcg_gen_mov_tl(cpu_gregs
[reg
], v
);
298 tcg_gen_st_tl(v
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
303 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
305 if (reg
== 0 || reg
>= 8) {
306 return get_temp_tl(dc
);
308 return cpu_gregs
[reg
];
312 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
,
313 target_ulong pc
, target_ulong npc
)
315 TranslationBlock
*tb
;
318 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
319 (npc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
321 /* jump to same page: we can use a direct jump */
322 tcg_gen_goto_tb(tb_num
);
323 tcg_gen_movi_tl(cpu_pc
, pc
);
324 tcg_gen_movi_tl(cpu_npc
, npc
);
325 tcg_gen_exit_tb((uintptr_t)tb
+ tb_num
);
327 /* jump to another page: currently not optimized */
328 tcg_gen_movi_tl(cpu_pc
, pc
);
329 tcg_gen_movi_tl(cpu_npc
, npc
);
335 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
337 tcg_gen_extu_i32_tl(reg
, src
);
338 tcg_gen_shri_tl(reg
, reg
, PSR_NEG_SHIFT
);
339 tcg_gen_andi_tl(reg
, reg
, 0x1);
342 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
344 tcg_gen_extu_i32_tl(reg
, src
);
345 tcg_gen_shri_tl(reg
, reg
, PSR_ZERO_SHIFT
);
346 tcg_gen_andi_tl(reg
, reg
, 0x1);
349 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
351 tcg_gen_extu_i32_tl(reg
, src
);
352 tcg_gen_shri_tl(reg
, reg
, PSR_OVF_SHIFT
);
353 tcg_gen_andi_tl(reg
, reg
, 0x1);
356 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
358 tcg_gen_extu_i32_tl(reg
, src
);
359 tcg_gen_shri_tl(reg
, reg
, PSR_CARRY_SHIFT
);
360 tcg_gen_andi_tl(reg
, reg
, 0x1);
363 static inline void gen_op_addi_cc(TCGv dst
, TCGv src1
, target_long src2
)
365 tcg_gen_mov_tl(cpu_cc_src
, src1
);
366 tcg_gen_movi_tl(cpu_cc_src2
, src2
);
367 tcg_gen_addi_tl(cpu_cc_dst
, cpu_cc_src
, src2
);
368 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
371 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
373 tcg_gen_mov_tl(cpu_cc_src
, src1
);
374 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
375 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
376 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
379 static TCGv_i32
gen_add32_carry32(void)
381 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
383 /* Carry is computed from a previous add: (dst < src) */
384 #if TARGET_LONG_BITS == 64
385 cc_src1_32
= tcg_temp_new_i32();
386 cc_src2_32
= tcg_temp_new_i32();
387 tcg_gen_trunc_i64_i32(cc_src1_32
, cpu_cc_dst
);
388 tcg_gen_trunc_i64_i32(cc_src2_32
, cpu_cc_src
);
390 cc_src1_32
= cpu_cc_dst
;
391 cc_src2_32
= cpu_cc_src
;
394 carry_32
= tcg_temp_new_i32();
395 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
397 #if TARGET_LONG_BITS == 64
398 tcg_temp_free_i32(cc_src1_32
);
399 tcg_temp_free_i32(cc_src2_32
);
405 static TCGv_i32
gen_sub32_carry32(void)
407 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
409 /* Carry is computed from a previous borrow: (src1 < src2) */
410 #if TARGET_LONG_BITS == 64
411 cc_src1_32
= tcg_temp_new_i32();
412 cc_src2_32
= tcg_temp_new_i32();
413 tcg_gen_trunc_i64_i32(cc_src1_32
, cpu_cc_src
);
414 tcg_gen_trunc_i64_i32(cc_src2_32
, cpu_cc_src2
);
416 cc_src1_32
= cpu_cc_src
;
417 cc_src2_32
= cpu_cc_src2
;
420 carry_32
= tcg_temp_new_i32();
421 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
423 #if TARGET_LONG_BITS == 64
424 tcg_temp_free_i32(cc_src1_32
);
425 tcg_temp_free_i32(cc_src2_32
);
431 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
432 TCGv src2
, int update_cc
)
440 /* Carry is known to be zero. Fall back to plain ADD. */
442 gen_op_add_cc(dst
, src1
, src2
);
444 tcg_gen_add_tl(dst
, src1
, src2
);
451 if (TARGET_LONG_BITS
== 32) {
452 /* We can re-use the host's hardware carry generation by using
453 an ADD2 opcode. We discard the low part of the output.
454 Ideally we'd combine this operation with the add that
455 generated the carry in the first place. */
456 carry
= tcg_temp_new();
457 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
458 tcg_temp_free(carry
);
461 carry_32
= gen_add32_carry32();
467 carry_32
= gen_sub32_carry32();
471 /* We need external help to produce the carry. */
472 carry_32
= tcg_temp_new_i32();
473 gen_helper_compute_C_icc(carry_32
, cpu_env
);
477 #if TARGET_LONG_BITS == 64
478 carry
= tcg_temp_new();
479 tcg_gen_extu_i32_i64(carry
, carry_32
);
484 tcg_gen_add_tl(dst
, src1
, src2
);
485 tcg_gen_add_tl(dst
, dst
, carry
);
487 tcg_temp_free_i32(carry_32
);
488 #if TARGET_LONG_BITS == 64
489 tcg_temp_free(carry
);
494 tcg_gen_mov_tl(cpu_cc_src
, src1
);
495 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
496 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
497 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
498 dc
->cc_op
= CC_OP_ADDX
;
502 static inline void gen_op_subi_cc(TCGv dst
, TCGv src1
, target_long src2
, DisasContext
*dc
)
504 tcg_gen_mov_tl(cpu_cc_src
, src1
);
505 tcg_gen_movi_tl(cpu_cc_src2
, src2
);
507 tcg_gen_mov_tl(cpu_cc_dst
, src1
);
508 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
509 dc
->cc_op
= CC_OP_LOGIC
;
511 tcg_gen_subi_tl(cpu_cc_dst
, cpu_cc_src
, src2
);
512 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
513 dc
->cc_op
= CC_OP_SUB
;
515 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
518 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
520 tcg_gen_mov_tl(cpu_cc_src
, src1
);
521 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
522 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
523 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
526 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
527 TCGv src2
, int update_cc
)
535 /* Carry is known to be zero. Fall back to plain SUB. */
537 gen_op_sub_cc(dst
, src1
, src2
);
539 tcg_gen_sub_tl(dst
, src1
, src2
);
546 carry_32
= gen_add32_carry32();
552 if (TARGET_LONG_BITS
== 32) {
553 /* We can re-use the host's hardware carry generation by using
554 a SUB2 opcode. We discard the low part of the output.
555 Ideally we'd combine this operation with the add that
556 generated the carry in the first place. */
557 carry
= tcg_temp_new();
558 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
559 tcg_temp_free(carry
);
562 carry_32
= gen_sub32_carry32();
566 /* We need external help to produce the carry. */
567 carry_32
= tcg_temp_new_i32();
568 gen_helper_compute_C_icc(carry_32
, cpu_env
);
572 #if TARGET_LONG_BITS == 64
573 carry
= tcg_temp_new();
574 tcg_gen_extu_i32_i64(carry
, carry_32
);
579 tcg_gen_sub_tl(dst
, src1
, src2
);
580 tcg_gen_sub_tl(dst
, dst
, carry
);
582 tcg_temp_free_i32(carry_32
);
583 #if TARGET_LONG_BITS == 64
584 tcg_temp_free(carry
);
589 tcg_gen_mov_tl(cpu_cc_src
, src1
);
590 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
591 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
592 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
593 dc
->cc_op
= CC_OP_SUBX
;
597 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
599 TCGv r_temp
, zero
, t0
;
601 r_temp
= tcg_temp_new();
608 zero
= tcg_const_tl(0);
609 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
610 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
611 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
612 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
617 // env->y = (b2 << 31) | (env->y >> 1);
618 tcg_gen_andi_tl(r_temp
, cpu_cc_src
, 0x1);
619 tcg_gen_shli_tl(r_temp
, r_temp
, 31);
620 tcg_gen_shri_tl(t0
, cpu_y
, 1);
621 tcg_gen_andi_tl(t0
, t0
, 0x7fffffff);
622 tcg_gen_or_tl(t0
, t0
, r_temp
);
623 tcg_gen_andi_tl(cpu_y
, t0
, 0xffffffff);
626 gen_mov_reg_N(t0
, cpu_psr
);
627 gen_mov_reg_V(r_temp
, cpu_psr
);
628 tcg_gen_xor_tl(t0
, t0
, r_temp
);
629 tcg_temp_free(r_temp
);
631 // T0 = (b1 << 31) | (T0 >> 1);
633 tcg_gen_shli_tl(t0
, t0
, 31);
634 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
635 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
638 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
640 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
643 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
645 #if TARGET_LONG_BITS == 32
647 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
649 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
652 TCGv t0
= tcg_temp_new_i64();
653 TCGv t1
= tcg_temp_new_i64();
656 tcg_gen_ext32s_i64(t0
, src1
);
657 tcg_gen_ext32s_i64(t1
, src2
);
659 tcg_gen_ext32u_i64(t0
, src1
);
660 tcg_gen_ext32u_i64(t1
, src2
);
663 tcg_gen_mul_i64(dst
, t0
, t1
);
667 tcg_gen_shri_i64(cpu_y
, dst
, 32);
671 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
673 /* zero-extend truncated operands before multiplication */
674 gen_op_multiply(dst
, src1
, src2
, 0);
677 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
679 /* sign-extend truncated operands before multiplication */
680 gen_op_multiply(dst
, src1
, src2
, 1);
684 static inline void gen_op_eval_ba(TCGv dst
)
686 tcg_gen_movi_tl(dst
, 1);
690 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
692 gen_mov_reg_Z(dst
, src
);
696 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
698 TCGv t0
= tcg_temp_new();
699 gen_mov_reg_N(t0
, src
);
700 gen_mov_reg_V(dst
, src
);
701 tcg_gen_xor_tl(dst
, dst
, t0
);
702 gen_mov_reg_Z(t0
, src
);
703 tcg_gen_or_tl(dst
, dst
, t0
);
708 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
710 TCGv t0
= tcg_temp_new();
711 gen_mov_reg_V(t0
, src
);
712 gen_mov_reg_N(dst
, src
);
713 tcg_gen_xor_tl(dst
, dst
, t0
);
718 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
720 TCGv t0
= tcg_temp_new();
721 gen_mov_reg_Z(t0
, src
);
722 gen_mov_reg_C(dst
, src
);
723 tcg_gen_or_tl(dst
, dst
, t0
);
728 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
730 gen_mov_reg_C(dst
, src
);
734 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
736 gen_mov_reg_V(dst
, src
);
740 static inline void gen_op_eval_bn(TCGv dst
)
742 tcg_gen_movi_tl(dst
, 0);
746 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
748 gen_mov_reg_N(dst
, src
);
752 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
754 gen_mov_reg_Z(dst
, src
);
755 tcg_gen_xori_tl(dst
, dst
, 0x1);
759 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
761 gen_op_eval_ble(dst
, src
);
762 tcg_gen_xori_tl(dst
, dst
, 0x1);
766 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
768 gen_op_eval_bl(dst
, src
);
769 tcg_gen_xori_tl(dst
, dst
, 0x1);
773 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
775 gen_op_eval_bleu(dst
, src
);
776 tcg_gen_xori_tl(dst
, dst
, 0x1);
780 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
782 gen_mov_reg_C(dst
, src
);
783 tcg_gen_xori_tl(dst
, dst
, 0x1);
787 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
789 gen_mov_reg_N(dst
, src
);
790 tcg_gen_xori_tl(dst
, dst
, 0x1);
794 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
796 gen_mov_reg_V(dst
, src
);
797 tcg_gen_xori_tl(dst
, dst
, 0x1);
801 FPSR bit field FCC1 | FCC0:
807 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
808 unsigned int fcc_offset
)
810 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
811 tcg_gen_andi_tl(reg
, reg
, 0x1);
814 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
815 unsigned int fcc_offset
)
817 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
818 tcg_gen_andi_tl(reg
, reg
, 0x1);
822 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
823 unsigned int fcc_offset
)
825 TCGv t0
= tcg_temp_new();
826 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
827 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
828 tcg_gen_or_tl(dst
, dst
, t0
);
832 // 1 or 2: FCC0 ^ FCC1
833 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
834 unsigned int fcc_offset
)
836 TCGv t0
= tcg_temp_new();
837 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
838 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
839 tcg_gen_xor_tl(dst
, dst
, t0
);
844 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
845 unsigned int fcc_offset
)
847 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
851 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
852 unsigned int fcc_offset
)
854 TCGv t0
= tcg_temp_new();
855 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
856 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
857 tcg_gen_andc_tl(dst
, dst
, t0
);
862 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
863 unsigned int fcc_offset
)
865 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
869 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
870 unsigned int fcc_offset
)
872 TCGv t0
= tcg_temp_new();
873 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
874 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
875 tcg_gen_andc_tl(dst
, t0
, dst
);
880 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
881 unsigned int fcc_offset
)
883 TCGv t0
= tcg_temp_new();
884 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
885 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
886 tcg_gen_and_tl(dst
, dst
, t0
);
891 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
892 unsigned int fcc_offset
)
894 TCGv t0
= tcg_temp_new();
895 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
896 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
897 tcg_gen_or_tl(dst
, dst
, t0
);
898 tcg_gen_xori_tl(dst
, dst
, 0x1);
902 // 0 or 3: !(FCC0 ^ FCC1)
903 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
904 unsigned int fcc_offset
)
906 TCGv t0
= tcg_temp_new();
907 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
908 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
909 tcg_gen_xor_tl(dst
, dst
, t0
);
910 tcg_gen_xori_tl(dst
, dst
, 0x1);
915 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
916 unsigned int fcc_offset
)
918 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
919 tcg_gen_xori_tl(dst
, dst
, 0x1);
922 // !1: !(FCC0 & !FCC1)
923 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
924 unsigned int fcc_offset
)
926 TCGv t0
= tcg_temp_new();
927 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
928 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
929 tcg_gen_andc_tl(dst
, dst
, t0
);
930 tcg_gen_xori_tl(dst
, dst
, 0x1);
935 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
936 unsigned int fcc_offset
)
938 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
939 tcg_gen_xori_tl(dst
, dst
, 0x1);
942 // !2: !(!FCC0 & FCC1)
943 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
944 unsigned int fcc_offset
)
946 TCGv t0
= tcg_temp_new();
947 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
948 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
949 tcg_gen_andc_tl(dst
, t0
, dst
);
950 tcg_gen_xori_tl(dst
, dst
, 0x1);
954 // !3: !(FCC0 & FCC1)
955 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
956 unsigned int fcc_offset
)
958 TCGv t0
= tcg_temp_new();
959 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
960 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
961 tcg_gen_and_tl(dst
, dst
, t0
);
962 tcg_gen_xori_tl(dst
, dst
, 0x1);
966 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
967 target_ulong pc2
, TCGv r_cond
)
971 l1
= gen_new_label();
973 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
975 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
978 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
981 static inline void gen_branch_a(DisasContext
*dc
, target_ulong pc1
,
982 target_ulong pc2
, TCGv r_cond
)
986 l1
= gen_new_label();
988 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
990 gen_goto_tb(dc
, 0, pc2
, pc1
);
993 gen_goto_tb(dc
, 1, pc2
+ 4, pc2
+ 8);
996 static inline void gen_generic_branch(DisasContext
*dc
)
998 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
999 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
1000 TCGv zero
= tcg_const_tl(0);
1002 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1004 tcg_temp_free(npc0
);
1005 tcg_temp_free(npc1
);
1006 tcg_temp_free(zero
);
1009 /* call this function before using the condition register as it may
1010 have been set for a jump */
1011 static inline void flush_cond(DisasContext
*dc
)
1013 if (dc
->npc
== JUMP_PC
) {
1014 gen_generic_branch(dc
);
1015 dc
->npc
= DYNAMIC_PC
;
1019 static inline void save_npc(DisasContext
*dc
)
1021 if (dc
->npc
== JUMP_PC
) {
1022 gen_generic_branch(dc
);
1023 dc
->npc
= DYNAMIC_PC
;
1024 } else if (dc
->npc
!= DYNAMIC_PC
) {
1025 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1029 static inline void update_psr(DisasContext
*dc
)
1031 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1032 dc
->cc_op
= CC_OP_FLAGS
;
1033 gen_helper_compute_psr(cpu_env
);
1037 static inline void save_state(DisasContext
*dc
)
1039 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1043 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1045 if (dc
->npc
== JUMP_PC
) {
1046 gen_generic_branch(dc
);
1047 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1048 dc
->pc
= DYNAMIC_PC
;
1049 } else if (dc
->npc
== DYNAMIC_PC
) {
1050 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1051 dc
->pc
= DYNAMIC_PC
;
1057 static inline void gen_op_next_insn(void)
1059 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1060 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1063 static void free_compare(DisasCompare
*cmp
)
1066 tcg_temp_free(cmp
->c1
);
1069 tcg_temp_free(cmp
->c2
);
1073 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1076 static int subcc_cond
[16] = {
1092 -1, /* no overflow */
1095 static int logic_cond
[16] = {
1097 TCG_COND_EQ
, /* eq: Z */
1098 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1099 TCG_COND_LT
, /* lt: N ^ V -> N */
1100 TCG_COND_EQ
, /* leu: C | Z -> Z */
1101 TCG_COND_NEVER
, /* ltu: C -> 0 */
1102 TCG_COND_LT
, /* neg: N */
1103 TCG_COND_NEVER
, /* vs: V -> 0 */
1105 TCG_COND_NE
, /* ne: !Z */
1106 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1107 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1108 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1109 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1110 TCG_COND_GE
, /* pos: !N */
1111 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1117 #ifdef TARGET_SPARC64
1127 switch (dc
->cc_op
) {
1129 cmp
->cond
= logic_cond
[cond
];
1131 cmp
->is_bool
= false;
1133 cmp
->c2
= tcg_const_tl(0);
1134 #ifdef TARGET_SPARC64
1137 cmp
->c1
= tcg_temp_new();
1138 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1143 cmp
->c1
= cpu_cc_dst
;
1150 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1151 goto do_compare_dst_0
;
1153 case 7: /* overflow */
1154 case 15: /* !overflow */
1158 cmp
->cond
= subcc_cond
[cond
];
1159 cmp
->is_bool
= false;
1160 #ifdef TARGET_SPARC64
1162 /* Note that sign-extension works for unsigned compares as
1163 long as both operands are sign-extended. */
1164 cmp
->g1
= cmp
->g2
= false;
1165 cmp
->c1
= tcg_temp_new();
1166 cmp
->c2
= tcg_temp_new();
1167 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1168 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1172 cmp
->g1
= cmp
->g2
= true;
1173 cmp
->c1
= cpu_cc_src
;
1174 cmp
->c2
= cpu_cc_src2
;
1181 gen_helper_compute_psr(cpu_env
);
1182 dc
->cc_op
= CC_OP_FLAGS
;
1186 /* We're going to generate a boolean result. */
1187 cmp
->cond
= TCG_COND_NE
;
1188 cmp
->is_bool
= true;
1189 cmp
->g1
= cmp
->g2
= false;
1190 cmp
->c1
= r_dst
= tcg_temp_new();
1191 cmp
->c2
= tcg_const_tl(0);
1195 gen_op_eval_bn(r_dst
);
1198 gen_op_eval_be(r_dst
, r_src
);
1201 gen_op_eval_ble(r_dst
, r_src
);
1204 gen_op_eval_bl(r_dst
, r_src
);
1207 gen_op_eval_bleu(r_dst
, r_src
);
1210 gen_op_eval_bcs(r_dst
, r_src
);
1213 gen_op_eval_bneg(r_dst
, r_src
);
1216 gen_op_eval_bvs(r_dst
, r_src
);
1219 gen_op_eval_ba(r_dst
);
1222 gen_op_eval_bne(r_dst
, r_src
);
1225 gen_op_eval_bg(r_dst
, r_src
);
1228 gen_op_eval_bge(r_dst
, r_src
);
1231 gen_op_eval_bgu(r_dst
, r_src
);
1234 gen_op_eval_bcc(r_dst
, r_src
);
1237 gen_op_eval_bpos(r_dst
, r_src
);
1240 gen_op_eval_bvc(r_dst
, r_src
);
1247 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1249 unsigned int offset
;
1252 /* For now we still generate a straight boolean result. */
1253 cmp
->cond
= TCG_COND_NE
;
1254 cmp
->is_bool
= true;
1255 cmp
->g1
= cmp
->g2
= false;
1256 cmp
->c1
= r_dst
= tcg_temp_new();
1257 cmp
->c2
= tcg_const_tl(0);
1277 gen_op_eval_bn(r_dst
);
1280 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1283 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1286 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1289 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1292 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1295 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1298 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1301 gen_op_eval_ba(r_dst
);
1304 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1307 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1310 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1313 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1316 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1319 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1322 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1327 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1331 gen_compare(&cmp
, cc
, cond
, dc
);
1333 /* The interface is to return a boolean in r_dst. */
1335 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1337 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1343 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1346 gen_fcompare(&cmp
, cc
, cond
);
1348 /* The interface is to return a boolean in r_dst. */
1350 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1352 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1358 #ifdef TARGET_SPARC64
1360 static const int gen_tcg_cond_reg
[8] = {
1371 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1373 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1374 cmp
->is_bool
= false;
1378 cmp
->c2
= tcg_const_tl(0);
1381 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1384 gen_compare_reg(&cmp
, cond
, r_src
);
1386 /* The interface is to return a boolean in r_dst. */
1387 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1393 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1395 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1396 target_ulong target
= dc
->pc
+ offset
;
1398 #ifdef TARGET_SPARC64
1399 if (unlikely(AM_CHECK(dc
))) {
1400 target
&= 0xffffffffULL
;
1404 /* unconditional not taken */
1406 dc
->pc
= dc
->npc
+ 4;
1407 dc
->npc
= dc
->pc
+ 4;
1410 dc
->npc
= dc
->pc
+ 4;
1412 } else if (cond
== 0x8) {
1413 /* unconditional taken */
1416 dc
->npc
= dc
->pc
+ 4;
1420 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1424 gen_cond(cpu_cond
, cc
, cond
, dc
);
1426 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1430 dc
->jump_pc
[0] = target
;
1431 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1432 dc
->jump_pc
[1] = DYNAMIC_PC
;
1433 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1435 dc
->jump_pc
[1] = dc
->npc
+ 4;
1442 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1444 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1445 target_ulong target
= dc
->pc
+ offset
;
1447 #ifdef TARGET_SPARC64
1448 if (unlikely(AM_CHECK(dc
))) {
1449 target
&= 0xffffffffULL
;
1453 /* unconditional not taken */
1455 dc
->pc
= dc
->npc
+ 4;
1456 dc
->npc
= dc
->pc
+ 4;
1459 dc
->npc
= dc
->pc
+ 4;
1461 } else if (cond
== 0x8) {
1462 /* unconditional taken */
1465 dc
->npc
= dc
->pc
+ 4;
1469 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1473 gen_fcond(cpu_cond
, cc
, cond
);
1475 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1479 dc
->jump_pc
[0] = target
;
1480 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1481 dc
->jump_pc
[1] = DYNAMIC_PC
;
1482 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1484 dc
->jump_pc
[1] = dc
->npc
+ 4;
1491 #ifdef TARGET_SPARC64
1492 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1495 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1496 target_ulong target
= dc
->pc
+ offset
;
1498 if (unlikely(AM_CHECK(dc
))) {
1499 target
&= 0xffffffffULL
;
1502 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1504 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1508 dc
->jump_pc
[0] = target
;
1509 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1510 dc
->jump_pc
[1] = DYNAMIC_PC
;
1511 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1513 dc
->jump_pc
[1] = dc
->npc
+ 4;
1519 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1523 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1526 gen_helper_fcmps_fcc1(cpu_env
, r_rs1
, r_rs2
);
1529 gen_helper_fcmps_fcc2(cpu_env
, r_rs1
, r_rs2
);
1532 gen_helper_fcmps_fcc3(cpu_env
, r_rs1
, r_rs2
);
1537 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1541 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1544 gen_helper_fcmpd_fcc1(cpu_env
, r_rs1
, r_rs2
);
1547 gen_helper_fcmpd_fcc2(cpu_env
, r_rs1
, r_rs2
);
1550 gen_helper_fcmpd_fcc3(cpu_env
, r_rs1
, r_rs2
);
1555 static inline void gen_op_fcmpq(int fccno
)
1559 gen_helper_fcmpq(cpu_env
);
1562 gen_helper_fcmpq_fcc1(cpu_env
);
1565 gen_helper_fcmpq_fcc2(cpu_env
);
1568 gen_helper_fcmpq_fcc3(cpu_env
);
1573 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1577 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1580 gen_helper_fcmpes_fcc1(cpu_env
, r_rs1
, r_rs2
);
1583 gen_helper_fcmpes_fcc2(cpu_env
, r_rs1
, r_rs2
);
1586 gen_helper_fcmpes_fcc3(cpu_env
, r_rs1
, r_rs2
);
1591 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1595 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1598 gen_helper_fcmped_fcc1(cpu_env
, r_rs1
, r_rs2
);
1601 gen_helper_fcmped_fcc2(cpu_env
, r_rs1
, r_rs2
);
1604 gen_helper_fcmped_fcc3(cpu_env
, r_rs1
, r_rs2
);
1609 static inline void gen_op_fcmpeq(int fccno
)
1613 gen_helper_fcmpeq(cpu_env
);
1616 gen_helper_fcmpeq_fcc1(cpu_env
);
1619 gen_helper_fcmpeq_fcc2(cpu_env
);
1622 gen_helper_fcmpeq_fcc3(cpu_env
);
1629 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1631 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1634 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1636 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1639 static inline void gen_op_fcmpq(int fccno
)
1641 gen_helper_fcmpq(cpu_env
);
1644 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1646 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1649 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1651 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1654 static inline void gen_op_fcmpeq(int fccno
)
1656 gen_helper_fcmpeq(cpu_env
);
1660 static inline void gen_op_fpexception_im(int fsr_flags
)
1664 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1665 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1666 r_const
= tcg_const_i32(TT_FP_EXCP
);
1667 gen_helper_raise_exception(cpu_env
, r_const
);
1668 tcg_temp_free_i32(r_const
);
1671 static int gen_trap_ifnofpu(DisasContext
*dc
)
1673 #if !defined(CONFIG_USER_ONLY)
1674 if (!dc
->fpu_enabled
) {
1678 r_const
= tcg_const_i32(TT_NFPU_INSN
);
1679 gen_helper_raise_exception(cpu_env
, r_const
);
1680 tcg_temp_free_i32(r_const
);
1688 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1690 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1693 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1694 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1698 src
= gen_load_fpr_F(dc
, rs
);
1699 dst
= gen_dest_fpr_F(dc
);
1701 gen(dst
, cpu_env
, src
);
1703 gen_store_fpr_F(dc
, rd
, dst
);
1706 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1707 void (*gen
)(TCGv_i32
, TCGv_i32
))
1711 src
= gen_load_fpr_F(dc
, rs
);
1712 dst
= gen_dest_fpr_F(dc
);
1716 gen_store_fpr_F(dc
, rd
, dst
);
1719 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1720 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1722 TCGv_i32 dst
, src1
, src2
;
1724 src1
= gen_load_fpr_F(dc
, rs1
);
1725 src2
= gen_load_fpr_F(dc
, rs2
);
1726 dst
= gen_dest_fpr_F(dc
);
1728 gen(dst
, cpu_env
, src1
, src2
);
1730 gen_store_fpr_F(dc
, rd
, dst
);
1733 #ifdef TARGET_SPARC64
1734 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1735 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1737 TCGv_i32 dst
, src1
, src2
;
1739 src1
= gen_load_fpr_F(dc
, rs1
);
1740 src2
= gen_load_fpr_F(dc
, rs2
);
1741 dst
= gen_dest_fpr_F(dc
);
1743 gen(dst
, src1
, src2
);
1745 gen_store_fpr_F(dc
, rd
, dst
);
1749 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1750 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1754 src
= gen_load_fpr_D(dc
, rs
);
1755 dst
= gen_dest_fpr_D(dc
, rd
);
1757 gen(dst
, cpu_env
, src
);
1759 gen_store_fpr_D(dc
, rd
, dst
);
1762 #ifdef TARGET_SPARC64
1763 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1764 void (*gen
)(TCGv_i64
, TCGv_i64
))
1768 src
= gen_load_fpr_D(dc
, rs
);
1769 dst
= gen_dest_fpr_D(dc
, rd
);
1773 gen_store_fpr_D(dc
, rd
, dst
);
1777 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1778 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1780 TCGv_i64 dst
, src1
, src2
;
1782 src1
= gen_load_fpr_D(dc
, rs1
);
1783 src2
= gen_load_fpr_D(dc
, rs2
);
1784 dst
= gen_dest_fpr_D(dc
, rd
);
1786 gen(dst
, cpu_env
, src1
, src2
);
1788 gen_store_fpr_D(dc
, rd
, dst
);
1791 #ifdef TARGET_SPARC64
1792 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1793 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1795 TCGv_i64 dst
, src1
, src2
;
1797 src1
= gen_load_fpr_D(dc
, rs1
);
1798 src2
= gen_load_fpr_D(dc
, rs2
);
1799 dst
= gen_dest_fpr_D(dc
, rd
);
1801 gen(dst
, src1
, src2
);
1803 gen_store_fpr_D(dc
, rd
, dst
);
1806 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1807 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1809 TCGv_i64 dst
, src1
, src2
;
1811 src1
= gen_load_fpr_D(dc
, rs1
);
1812 src2
= gen_load_fpr_D(dc
, rs2
);
1813 dst
= gen_dest_fpr_D(dc
, rd
);
1815 gen(dst
, cpu_gsr
, src1
, src2
);
1817 gen_store_fpr_D(dc
, rd
, dst
);
1820 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1821 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1823 TCGv_i64 dst
, src0
, src1
, src2
;
1825 src1
= gen_load_fpr_D(dc
, rs1
);
1826 src2
= gen_load_fpr_D(dc
, rs2
);
1827 src0
= gen_load_fpr_D(dc
, rd
);
1828 dst
= gen_dest_fpr_D(dc
, rd
);
1830 gen(dst
, src0
, src1
, src2
);
1832 gen_store_fpr_D(dc
, rd
, dst
);
1836 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1837 void (*gen
)(TCGv_ptr
))
1839 gen_op_load_fpr_QT1(QFPREG(rs
));
1843 gen_op_store_QT0_fpr(QFPREG(rd
));
1844 gen_update_fprs_dirty(QFPREG(rd
));
1847 #ifdef TARGET_SPARC64
1848 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1849 void (*gen
)(TCGv_ptr
))
1851 gen_op_load_fpr_QT1(QFPREG(rs
));
1855 gen_op_store_QT0_fpr(QFPREG(rd
));
1856 gen_update_fprs_dirty(QFPREG(rd
));
1860 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1861 void (*gen
)(TCGv_ptr
))
1863 gen_op_load_fpr_QT0(QFPREG(rs1
));
1864 gen_op_load_fpr_QT1(QFPREG(rs2
));
1868 gen_op_store_QT0_fpr(QFPREG(rd
));
1869 gen_update_fprs_dirty(QFPREG(rd
));
1872 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1873 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1876 TCGv_i32 src1
, src2
;
1878 src1
= gen_load_fpr_F(dc
, rs1
);
1879 src2
= gen_load_fpr_F(dc
, rs2
);
1880 dst
= gen_dest_fpr_D(dc
, rd
);
1882 gen(dst
, cpu_env
, src1
, src2
);
1884 gen_store_fpr_D(dc
, rd
, dst
);
1887 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1888 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1890 TCGv_i64 src1
, src2
;
1892 src1
= gen_load_fpr_D(dc
, rs1
);
1893 src2
= gen_load_fpr_D(dc
, rs2
);
1895 gen(cpu_env
, src1
, src2
);
1897 gen_op_store_QT0_fpr(QFPREG(rd
));
1898 gen_update_fprs_dirty(QFPREG(rd
));
1901 #ifdef TARGET_SPARC64
1902 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1903 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1908 src
= gen_load_fpr_F(dc
, rs
);
1909 dst
= gen_dest_fpr_D(dc
, rd
);
1911 gen(dst
, cpu_env
, src
);
1913 gen_store_fpr_D(dc
, rd
, dst
);
1917 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1918 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1923 src
= gen_load_fpr_F(dc
, rs
);
1924 dst
= gen_dest_fpr_D(dc
, rd
);
1926 gen(dst
, cpu_env
, src
);
1928 gen_store_fpr_D(dc
, rd
, dst
);
1931 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1932 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1937 src
= gen_load_fpr_D(dc
, rs
);
1938 dst
= gen_dest_fpr_F(dc
);
1940 gen(dst
, cpu_env
, src
);
1942 gen_store_fpr_F(dc
, rd
, dst
);
1945 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1946 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1950 gen_op_load_fpr_QT1(QFPREG(rs
));
1951 dst
= gen_dest_fpr_F(dc
);
1955 gen_store_fpr_F(dc
, rd
, dst
);
1958 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1959 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1963 gen_op_load_fpr_QT1(QFPREG(rs
));
1964 dst
= gen_dest_fpr_D(dc
, rd
);
1968 gen_store_fpr_D(dc
, rd
, dst
);
1971 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1972 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1976 src
= gen_load_fpr_F(dc
, rs
);
1980 gen_op_store_QT0_fpr(QFPREG(rd
));
1981 gen_update_fprs_dirty(QFPREG(rd
));
1984 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1985 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1989 src
= gen_load_fpr_D(dc
, rs
);
1993 gen_op_store_QT0_fpr(QFPREG(rd
));
1994 gen_update_fprs_dirty(QFPREG(rd
));
1998 #ifdef TARGET_SPARC64
1999 static inline TCGv_i32
gen_get_asi(int insn
, TCGv r_addr
)
2005 r_asi
= tcg_temp_new_i32();
2006 tcg_gen_mov_i32(r_asi
, cpu_asi
);
2008 asi
= GET_FIELD(insn
, 19, 26);
2009 r_asi
= tcg_const_i32(asi
);
2014 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2017 TCGv_i32 r_asi
, r_size
, r_sign
;
2019 r_asi
= gen_get_asi(insn
, addr
);
2020 r_size
= tcg_const_i32(size
);
2021 r_sign
= tcg_const_i32(sign
);
2022 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2023 tcg_temp_free_i32(r_sign
);
2024 tcg_temp_free_i32(r_size
);
2025 tcg_temp_free_i32(r_asi
);
2028 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2030 TCGv_i32 r_asi
, r_size
;
2032 r_asi
= gen_get_asi(insn
, addr
);
2033 r_size
= tcg_const_i32(size
);
2034 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2035 tcg_temp_free_i32(r_size
);
2036 tcg_temp_free_i32(r_asi
);
2039 static inline void gen_ldf_asi(TCGv addr
, int insn
, int size
, int rd
)
2041 TCGv_i32 r_asi
, r_size
, r_rd
;
2043 r_asi
= gen_get_asi(insn
, addr
);
2044 r_size
= tcg_const_i32(size
);
2045 r_rd
= tcg_const_i32(rd
);
2046 gen_helper_ldf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2047 tcg_temp_free_i32(r_rd
);
2048 tcg_temp_free_i32(r_size
);
2049 tcg_temp_free_i32(r_asi
);
2052 static inline void gen_stf_asi(TCGv addr
, int insn
, int size
, int rd
)
2054 TCGv_i32 r_asi
, r_size
, r_rd
;
2056 r_asi
= gen_get_asi(insn
, addr
);
2057 r_size
= tcg_const_i32(size
);
2058 r_rd
= tcg_const_i32(rd
);
2059 gen_helper_stf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2060 tcg_temp_free_i32(r_rd
);
2061 tcg_temp_free_i32(r_size
);
2062 tcg_temp_free_i32(r_asi
);
2065 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2067 TCGv_i32 r_asi
, r_size
, r_sign
;
2068 TCGv_i64 t64
= tcg_temp_new_i64();
2070 r_asi
= gen_get_asi(insn
, addr
);
2071 r_size
= tcg_const_i32(4);
2072 r_sign
= tcg_const_i32(0);
2073 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2074 tcg_temp_free_i32(r_sign
);
2075 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2076 tcg_temp_free_i32(r_size
);
2077 tcg_temp_free_i32(r_asi
);
2078 tcg_gen_trunc_i64_tl(dst
, t64
);
2079 tcg_temp_free_i64(t64
);
2082 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2085 TCGv_i32 r_asi
, r_rd
;
2087 r_asi
= gen_get_asi(insn
, addr
);
2088 r_rd
= tcg_const_i32(rd
);
2089 gen_helper_ldda_asi(cpu_env
, addr
, r_asi
, r_rd
);
2090 tcg_temp_free_i32(r_rd
);
2091 tcg_temp_free_i32(r_asi
);
2094 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2097 TCGv_i32 r_asi
, r_size
;
2098 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2099 TCGv_i64 t64
= tcg_temp_new_i64();
2101 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2102 r_asi
= gen_get_asi(insn
, addr
);
2103 r_size
= tcg_const_i32(8);
2104 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2105 tcg_temp_free_i32(r_size
);
2106 tcg_temp_free_i32(r_asi
);
2107 tcg_temp_free_i64(t64
);
2110 static inline void gen_casx_asi(DisasContext
*dc
, TCGv addr
,
2111 TCGv val2
, int insn
, int rd
)
2113 TCGv val1
= gen_load_gpr(dc
, rd
);
2114 TCGv dst
= gen_dest_gpr(dc
, rd
);
2115 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2117 gen_helper_casx_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2118 tcg_temp_free_i32(r_asi
);
2119 gen_store_gpr(dc
, rd
, dst
);
2122 #elif !defined(CONFIG_USER_ONLY)
2124 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2127 TCGv_i32 r_asi
, r_size
, r_sign
;
2128 TCGv_i64 t64
= tcg_temp_new_i64();
2130 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2131 r_size
= tcg_const_i32(size
);
2132 r_sign
= tcg_const_i32(sign
);
2133 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2134 tcg_temp_free_i32(r_sign
);
2135 tcg_temp_free_i32(r_size
);
2136 tcg_temp_free_i32(r_asi
);
2137 tcg_gen_trunc_i64_tl(dst
, t64
);
2138 tcg_temp_free_i64(t64
);
2141 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2143 TCGv_i32 r_asi
, r_size
;
2144 TCGv_i64 t64
= tcg_temp_new_i64();
2146 tcg_gen_extu_tl_i64(t64
, src
);
2147 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2148 r_size
= tcg_const_i32(size
);
2149 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2150 tcg_temp_free_i32(r_size
);
2151 tcg_temp_free_i32(r_asi
);
2152 tcg_temp_free_i64(t64
);
2155 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2157 TCGv_i32 r_asi
, r_size
, r_sign
;
2158 TCGv_i64 r_val
, t64
;
2160 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2161 r_size
= tcg_const_i32(4);
2162 r_sign
= tcg_const_i32(0);
2163 t64
= tcg_temp_new_i64();
2164 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2165 tcg_temp_free(r_sign
);
2166 r_val
= tcg_temp_new_i64();
2167 tcg_gen_extu_tl_i64(r_val
, src
);
2168 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2169 tcg_temp_free_i64(r_val
);
2170 tcg_temp_free_i32(r_size
);
2171 tcg_temp_free_i32(r_asi
);
2172 tcg_gen_trunc_i64_tl(dst
, t64
);
2173 tcg_temp_free_i64(t64
);
2176 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2179 TCGv_i32 r_asi
, r_size
, r_sign
;
2183 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2184 r_size
= tcg_const_i32(8);
2185 r_sign
= tcg_const_i32(0);
2186 t64
= tcg_temp_new_i64();
2187 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2188 tcg_temp_free_i32(r_sign
);
2189 tcg_temp_free_i32(r_size
);
2190 tcg_temp_free_i32(r_asi
);
2192 t
= gen_dest_gpr(dc
, rd
+ 1);
2193 tcg_gen_trunc_i64_tl(t
, t64
);
2194 gen_store_gpr(dc
, rd
+ 1, t
);
2196 tcg_gen_shri_i64(t64
, t64
, 32);
2197 tcg_gen_trunc_i64_tl(hi
, t64
);
2198 tcg_temp_free_i64(t64
);
2199 gen_store_gpr(dc
, rd
, hi
);
2202 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2205 TCGv_i32 r_asi
, r_size
;
2206 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2207 TCGv_i64 t64
= tcg_temp_new_i64();
2209 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2210 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2211 r_size
= tcg_const_i32(8);
2212 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2213 tcg_temp_free_i32(r_size
);
2214 tcg_temp_free_i32(r_asi
);
2215 tcg_temp_free_i64(t64
);
2219 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2220 static inline void gen_cas_asi(DisasContext
*dc
, TCGv addr
,
2221 TCGv val2
, int insn
, int rd
)
2223 TCGv val1
= gen_load_gpr(dc
, rd
);
2224 TCGv dst
= gen_dest_gpr(dc
, rd
);
2225 #ifdef TARGET_SPARC64
2226 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2228 TCGv_i32 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2231 gen_helper_cas_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2232 tcg_temp_free_i32(r_asi
);
2233 gen_store_gpr(dc
, rd
, dst
);
2236 static inline void gen_ldstub_asi(TCGv dst
, TCGv addr
, int insn
)
2239 TCGv_i32 r_asi
, r_size
;
2241 gen_ld_asi(dst
, addr
, insn
, 1, 0);
2243 r_val
= tcg_const_i64(0xffULL
);
2244 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2245 r_size
= tcg_const_i32(1);
2246 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2247 tcg_temp_free_i32(r_size
);
2248 tcg_temp_free_i32(r_asi
);
2249 tcg_temp_free_i64(r_val
);
2253 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2255 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2256 return gen_load_gpr(dc
, rs1
);
2259 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2261 if (IS_IMM
) { /* immediate */
2262 target_long simm
= GET_FIELDs(insn
, 19, 31);
2263 TCGv t
= get_temp_tl(dc
);
2264 tcg_gen_movi_tl(t
, simm
);
2266 } else { /* register */
2267 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2268 return gen_load_gpr(dc
, rs2
);
2272 #ifdef TARGET_SPARC64
2273 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2275 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2277 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2278 or fold the comparison down to 32 bits and use movcond_i32. Choose
2280 c32
= tcg_temp_new_i32();
2282 tcg_gen_trunc_i64_i32(c32
, cmp
->c1
);
2284 TCGv_i64 c64
= tcg_temp_new_i64();
2285 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2286 tcg_gen_trunc_i64_i32(c32
, c64
);
2287 tcg_temp_free_i64(c64
);
2290 s1
= gen_load_fpr_F(dc
, rs
);
2291 s2
= gen_load_fpr_F(dc
, rd
);
2292 dst
= gen_dest_fpr_F(dc
);
2293 zero
= tcg_const_i32(0);
2295 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2297 tcg_temp_free_i32(c32
);
2298 tcg_temp_free_i32(zero
);
2299 gen_store_fpr_F(dc
, rd
, dst
);
2302 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2304 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2305 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2306 gen_load_fpr_D(dc
, rs
),
2307 gen_load_fpr_D(dc
, rd
));
2308 gen_store_fpr_D(dc
, rd
, dst
);
2311 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2313 int qd
= QFPREG(rd
);
2314 int qs
= QFPREG(rs
);
2316 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2317 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2318 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2319 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2321 gen_update_fprs_dirty(qd
);
2324 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_ptr cpu_env
)
2326 TCGv_i32 r_tl
= tcg_temp_new_i32();
2328 /* load env->tl into r_tl */
2329 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2331 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2332 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2334 /* calculate offset to current trap state from env->ts, reuse r_tl */
2335 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2336 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2338 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2340 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2341 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2342 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2343 tcg_temp_free_ptr(r_tl_tmp
);
2346 tcg_temp_free_i32(r_tl
);
2349 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2350 int width
, bool cc
, bool left
)
2352 TCGv lo1
, lo2
, t1
, t2
;
2353 uint64_t amask
, tabl
, tabr
;
2354 int shift
, imask
, omask
;
2357 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2358 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2359 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2360 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2361 dc
->cc_op
= CC_OP_SUB
;
2364 /* Theory of operation: there are two tables, left and right (not to
2365 be confused with the left and right versions of the opcode). These
2366 are indexed by the low 3 bits of the inputs. To make things "easy",
2367 these tables are loaded into two constants, TABL and TABR below.
2368 The operation index = (input & imask) << shift calculates the index
2369 into the constant, while val = (table >> index) & omask calculates
2370 the value we're looking for. */
2377 tabl
= 0x80c0e0f0f8fcfeffULL
;
2378 tabr
= 0xff7f3f1f0f070301ULL
;
2380 tabl
= 0x0103070f1f3f7fffULL
;
2381 tabr
= 0xfffefcf8f0e0c080ULL
;
2401 tabl
= (2 << 2) | 3;
2402 tabr
= (3 << 2) | 1;
2404 tabl
= (1 << 2) | 3;
2405 tabr
= (3 << 2) | 2;
2412 lo1
= tcg_temp_new();
2413 lo2
= tcg_temp_new();
2414 tcg_gen_andi_tl(lo1
, s1
, imask
);
2415 tcg_gen_andi_tl(lo2
, s2
, imask
);
2416 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2417 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2419 t1
= tcg_const_tl(tabl
);
2420 t2
= tcg_const_tl(tabr
);
2421 tcg_gen_shr_tl(lo1
, t1
, lo1
);
2422 tcg_gen_shr_tl(lo2
, t2
, lo2
);
2423 tcg_gen_andi_tl(dst
, lo1
, omask
);
2424 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2428 amask
&= 0xffffffffULL
;
2430 tcg_gen_andi_tl(s1
, s1
, amask
);
2431 tcg_gen_andi_tl(s2
, s2
, amask
);
2433 /* We want to compute
2434 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2435 We've already done dst = lo1, so this reduces to
2436 dst &= (s1 == s2 ? -1 : lo2)
2441 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
2442 tcg_gen_neg_tl(t1
, t1
);
2443 tcg_gen_or_tl(lo2
, lo2
, t1
);
2444 tcg_gen_and_tl(dst
, dst
, lo2
);
2452 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2454 TCGv tmp
= tcg_temp_new();
2456 tcg_gen_add_tl(tmp
, s1
, s2
);
2457 tcg_gen_andi_tl(dst
, tmp
, -8);
2459 tcg_gen_neg_tl(tmp
, tmp
);
2461 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2466 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2470 t1
= tcg_temp_new();
2471 t2
= tcg_temp_new();
2472 shift
= tcg_temp_new();
2474 tcg_gen_andi_tl(shift
, gsr
, 7);
2475 tcg_gen_shli_tl(shift
, shift
, 3);
2476 tcg_gen_shl_tl(t1
, s1
, shift
);
2478 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2479 shift of (up to 63) followed by a constant shift of 1. */
2480 tcg_gen_xori_tl(shift
, shift
, 63);
2481 tcg_gen_shr_tl(t2
, s2
, shift
);
2482 tcg_gen_shri_tl(t2
, t2
, 1);
2484 tcg_gen_or_tl(dst
, t1
, t2
);
2488 tcg_temp_free(shift
);
2492 #define CHECK_IU_FEATURE(dc, FEATURE) \
2493 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2495 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2496 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2499 /* before an instruction, dc->pc must be static */
2500 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2502 unsigned int opc
, rs1
, rs2
, rd
;
2503 TCGv cpu_src1
, cpu_src2
;
2504 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2505 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2508 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2509 tcg_gen_debug_insn_start(dc
->pc
);
2512 opc
= GET_FIELD(insn
, 0, 1);
2513 rd
= GET_FIELD(insn
, 2, 6);
2516 case 0: /* branches/sethi */
2518 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2521 #ifdef TARGET_SPARC64
2522 case 0x1: /* V9 BPcc */
2526 target
= GET_FIELD_SP(insn
, 0, 18);
2527 target
= sign_extend(target
, 19);
2529 cc
= GET_FIELD_SP(insn
, 20, 21);
2531 do_branch(dc
, target
, insn
, 0);
2533 do_branch(dc
, target
, insn
, 1);
2538 case 0x3: /* V9 BPr */
2540 target
= GET_FIELD_SP(insn
, 0, 13) |
2541 (GET_FIELD_SP(insn
, 20, 21) << 14);
2542 target
= sign_extend(target
, 16);
2544 cpu_src1
= get_src1(dc
, insn
);
2545 do_branch_reg(dc
, target
, insn
, cpu_src1
);
2548 case 0x5: /* V9 FBPcc */
2550 int cc
= GET_FIELD_SP(insn
, 20, 21);
2551 if (gen_trap_ifnofpu(dc
)) {
2554 target
= GET_FIELD_SP(insn
, 0, 18);
2555 target
= sign_extend(target
, 19);
2557 do_fbranch(dc
, target
, insn
, cc
);
2561 case 0x7: /* CBN+x */
2566 case 0x2: /* BN+x */
2568 target
= GET_FIELD(insn
, 10, 31);
2569 target
= sign_extend(target
, 22);
2571 do_branch(dc
, target
, insn
, 0);
2574 case 0x6: /* FBN+x */
2576 if (gen_trap_ifnofpu(dc
)) {
2579 target
= GET_FIELD(insn
, 10, 31);
2580 target
= sign_extend(target
, 22);
2582 do_fbranch(dc
, target
, insn
, 0);
2585 case 0x4: /* SETHI */
2586 /* Special-case %g0 because that's the canonical nop. */
2588 uint32_t value
= GET_FIELD(insn
, 10, 31);
2589 TCGv t
= gen_dest_gpr(dc
, rd
);
2590 tcg_gen_movi_tl(t
, value
<< 10);
2591 gen_store_gpr(dc
, rd
, t
);
2594 case 0x0: /* UNIMPL */
2603 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
2604 TCGv o7
= gen_dest_gpr(dc
, 15);
2606 tcg_gen_movi_tl(o7
, dc
->pc
);
2607 gen_store_gpr(dc
, 15, o7
);
2610 #ifdef TARGET_SPARC64
2611 if (unlikely(AM_CHECK(dc
))) {
2612 target
&= 0xffffffffULL
;
2618 case 2: /* FPU & Logical Operations */
2620 unsigned int xop
= GET_FIELD(insn
, 7, 12);
2621 TCGv cpu_dst
= get_temp_tl(dc
);
2624 if (xop
== 0x3a) { /* generate trap */
2625 int cond
= GET_FIELD(insn
, 3, 6);
2637 /* Conditional trap. */
2639 #ifdef TARGET_SPARC64
2641 int cc
= GET_FIELD_SP(insn
, 11, 12);
2643 gen_compare(&cmp
, 0, cond
, dc
);
2644 } else if (cc
== 2) {
2645 gen_compare(&cmp
, 1, cond
, dc
);
2650 gen_compare(&cmp
, 0, cond
, dc
);
2652 l1
= gen_new_label();
2653 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
2654 cmp
.c1
, cmp
.c2
, l1
);
2658 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2659 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2661 /* Don't use the normal temporaries, as they may well have
2662 gone out of scope with the branch above. While we're
2663 doing that we might as well pre-truncate to 32-bit. */
2664 trap
= tcg_temp_new_i32();
2666 rs1
= GET_FIELD_SP(insn
, 14, 18);
2668 rs2
= GET_FIELD_SP(insn
, 0, 6);
2670 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
2671 /* Signal that the trap value is fully constant. */
2674 TCGv t1
= gen_load_gpr(dc
, rs1
);
2675 tcg_gen_trunc_tl_i32(trap
, t1
);
2676 tcg_gen_addi_i32(trap
, trap
, rs2
);
2680 rs2
= GET_FIELD_SP(insn
, 0, 4);
2681 t1
= gen_load_gpr(dc
, rs1
);
2682 t2
= gen_load_gpr(dc
, rs2
);
2683 tcg_gen_add_tl(t1
, t1
, t2
);
2684 tcg_gen_trunc_tl_i32(trap
, t1
);
2687 tcg_gen_andi_i32(trap
, trap
, mask
);
2688 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2691 gen_helper_raise_exception(cpu_env
, trap
);
2692 tcg_temp_free_i32(trap
);
2695 /* An unconditional trap ends the TB. */
2699 /* A conditional trap falls through to the next insn. */
2703 } else if (xop
== 0x28) {
2704 rs1
= GET_FIELD(insn
, 13, 17);
2707 #ifndef TARGET_SPARC64
2708 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2709 manual, rdy on the microSPARC
2711 case 0x0f: /* stbar in the SPARCv8 manual,
2712 rdy on the microSPARC II */
2713 case 0x10 ... 0x1f: /* implementation-dependent in the
2714 SPARCv8 manual, rdy on the
2717 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
2718 TCGv t
= gen_dest_gpr(dc
, rd
);
2719 /* Read Asr17 for a Leon3 monoprocessor */
2720 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
2721 gen_store_gpr(dc
, rd
, t
);
2725 gen_store_gpr(dc
, rd
, cpu_y
);
2727 #ifdef TARGET_SPARC64
2728 case 0x2: /* V9 rdccr */
2730 gen_helper_rdccr(cpu_dst
, cpu_env
);
2731 gen_store_gpr(dc
, rd
, cpu_dst
);
2733 case 0x3: /* V9 rdasi */
2734 tcg_gen_ext_i32_tl(cpu_dst
, cpu_asi
);
2735 gen_store_gpr(dc
, rd
, cpu_dst
);
2737 case 0x4: /* V9 rdtick */
2741 r_tickptr
= tcg_temp_new_ptr();
2742 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2743 offsetof(CPUSPARCState
, tick
));
2744 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2745 tcg_temp_free_ptr(r_tickptr
);
2746 gen_store_gpr(dc
, rd
, cpu_dst
);
2749 case 0x5: /* V9 rdpc */
2751 TCGv t
= gen_dest_gpr(dc
, rd
);
2752 if (unlikely(AM_CHECK(dc
))) {
2753 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
2755 tcg_gen_movi_tl(t
, dc
->pc
);
2757 gen_store_gpr(dc
, rd
, t
);
2760 case 0x6: /* V9 rdfprs */
2761 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
2762 gen_store_gpr(dc
, rd
, cpu_dst
);
2764 case 0xf: /* V9 membar */
2765 break; /* no effect */
2766 case 0x13: /* Graphics Status */
2767 if (gen_trap_ifnofpu(dc
)) {
2770 gen_store_gpr(dc
, rd
, cpu_gsr
);
2772 case 0x16: /* Softint */
2773 tcg_gen_ext_i32_tl(cpu_dst
, cpu_softint
);
2774 gen_store_gpr(dc
, rd
, cpu_dst
);
2776 case 0x17: /* Tick compare */
2777 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
2779 case 0x18: /* System tick */
2783 r_tickptr
= tcg_temp_new_ptr();
2784 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2785 offsetof(CPUSPARCState
, stick
));
2786 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2787 tcg_temp_free_ptr(r_tickptr
);
2788 gen_store_gpr(dc
, rd
, cpu_dst
);
2791 case 0x19: /* System tick compare */
2792 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
2794 case 0x10: /* Performance Control */
2795 case 0x11: /* Performance Instrumentation Counter */
2796 case 0x12: /* Dispatch Control */
2797 case 0x14: /* Softint set, WO */
2798 case 0x15: /* Softint clear, WO */
2803 #if !defined(CONFIG_USER_ONLY)
2804 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
2805 #ifndef TARGET_SPARC64
2806 if (!supervisor(dc
)) {
2810 gen_helper_rdpsr(cpu_dst
, cpu_env
);
2812 CHECK_IU_FEATURE(dc
, HYPV
);
2813 if (!hypervisor(dc
))
2815 rs1
= GET_FIELD(insn
, 13, 17);
2818 // gen_op_rdhpstate();
2821 // gen_op_rdhtstate();
2824 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
2827 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
2830 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
2832 case 31: // hstick_cmpr
2833 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
2839 gen_store_gpr(dc
, rd
, cpu_dst
);
2841 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
2842 if (!supervisor(dc
)) {
2845 cpu_tmp0
= get_temp_tl(dc
);
2846 #ifdef TARGET_SPARC64
2847 rs1
= GET_FIELD(insn
, 13, 17);
2853 r_tsptr
= tcg_temp_new_ptr();
2854 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2855 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2856 offsetof(trap_state
, tpc
));
2857 tcg_temp_free_ptr(r_tsptr
);
2864 r_tsptr
= tcg_temp_new_ptr();
2865 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2866 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2867 offsetof(trap_state
, tnpc
));
2868 tcg_temp_free_ptr(r_tsptr
);
2875 r_tsptr
= tcg_temp_new_ptr();
2876 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2877 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2878 offsetof(trap_state
, tstate
));
2879 tcg_temp_free_ptr(r_tsptr
);
2884 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2886 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2887 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
2888 offsetof(trap_state
, tt
));
2889 tcg_temp_free_ptr(r_tsptr
);
2896 r_tickptr
= tcg_temp_new_ptr();
2897 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2898 offsetof(CPUSPARCState
, tick
));
2899 gen_helper_tick_get_count(cpu_tmp0
, r_tickptr
);
2900 tcg_temp_free_ptr(r_tickptr
);
2904 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
2907 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2908 offsetof(CPUSPARCState
, pstate
));
2911 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2912 offsetof(CPUSPARCState
, tl
));
2915 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2916 offsetof(CPUSPARCState
, psrpil
));
2919 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
2922 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2923 offsetof(CPUSPARCState
, cansave
));
2925 case 11: // canrestore
2926 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2927 offsetof(CPUSPARCState
, canrestore
));
2929 case 12: // cleanwin
2930 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2931 offsetof(CPUSPARCState
, cleanwin
));
2933 case 13: // otherwin
2934 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2935 offsetof(CPUSPARCState
, otherwin
));
2938 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2939 offsetof(CPUSPARCState
, wstate
));
2941 case 16: // UA2005 gl
2942 CHECK_IU_FEATURE(dc
, GL
);
2943 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2944 offsetof(CPUSPARCState
, gl
));
2946 case 26: // UA2005 strand status
2947 CHECK_IU_FEATURE(dc
, HYPV
);
2948 if (!hypervisor(dc
))
2950 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
2953 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
2960 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
2962 gen_store_gpr(dc
, rd
, cpu_tmp0
);
2964 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
2965 #ifdef TARGET_SPARC64
2967 gen_helper_flushw(cpu_env
);
2969 if (!supervisor(dc
))
2971 gen_store_gpr(dc
, rd
, cpu_tbr
);
2975 } else if (xop
== 0x34) { /* FPU Operations */
2976 if (gen_trap_ifnofpu(dc
)) {
2979 gen_op_clear_ieee_excp_and_FTT();
2980 rs1
= GET_FIELD(insn
, 13, 17);
2981 rs2
= GET_FIELD(insn
, 27, 31);
2982 xop
= GET_FIELD(insn
, 18, 26);
2985 case 0x1: /* fmovs */
2986 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
2987 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
2989 case 0x5: /* fnegs */
2990 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
2992 case 0x9: /* fabss */
2993 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
2995 case 0x29: /* fsqrts */
2996 CHECK_FPU_FEATURE(dc
, FSQRT
);
2997 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
2999 case 0x2a: /* fsqrtd */
3000 CHECK_FPU_FEATURE(dc
, FSQRT
);
3001 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
3003 case 0x2b: /* fsqrtq */
3004 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3005 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
3007 case 0x41: /* fadds */
3008 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
3010 case 0x42: /* faddd */
3011 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
3013 case 0x43: /* faddq */
3014 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3015 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
3017 case 0x45: /* fsubs */
3018 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
3020 case 0x46: /* fsubd */
3021 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3023 case 0x47: /* fsubq */
3024 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3025 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3027 case 0x49: /* fmuls */
3028 CHECK_FPU_FEATURE(dc
, FMUL
);
3029 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3031 case 0x4a: /* fmuld */
3032 CHECK_FPU_FEATURE(dc
, FMUL
);
3033 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3035 case 0x4b: /* fmulq */
3036 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3037 CHECK_FPU_FEATURE(dc
, FMUL
);
3038 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3040 case 0x4d: /* fdivs */
3041 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3043 case 0x4e: /* fdivd */
3044 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3046 case 0x4f: /* fdivq */
3047 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3048 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3050 case 0x69: /* fsmuld */
3051 CHECK_FPU_FEATURE(dc
, FSMULD
);
3052 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3054 case 0x6e: /* fdmulq */
3055 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3056 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3058 case 0xc4: /* fitos */
3059 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3061 case 0xc6: /* fdtos */
3062 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3064 case 0xc7: /* fqtos */
3065 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3066 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3068 case 0xc8: /* fitod */
3069 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3071 case 0xc9: /* fstod */
3072 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3074 case 0xcb: /* fqtod */
3075 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3076 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3078 case 0xcc: /* fitoq */
3079 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3080 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3082 case 0xcd: /* fstoq */
3083 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3084 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3086 case 0xce: /* fdtoq */
3087 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3088 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3090 case 0xd1: /* fstoi */
3091 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3093 case 0xd2: /* fdtoi */
3094 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3096 case 0xd3: /* fqtoi */
3097 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3098 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3100 #ifdef TARGET_SPARC64
3101 case 0x2: /* V9 fmovd */
3102 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3103 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3105 case 0x3: /* V9 fmovq */
3106 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3107 gen_move_Q(rd
, rs2
);
3109 case 0x6: /* V9 fnegd */
3110 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3112 case 0x7: /* V9 fnegq */
3113 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3114 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3116 case 0xa: /* V9 fabsd */
3117 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3119 case 0xb: /* V9 fabsq */
3120 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3121 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3123 case 0x81: /* V9 fstox */
3124 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3126 case 0x82: /* V9 fdtox */
3127 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3129 case 0x83: /* V9 fqtox */
3130 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3131 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3133 case 0x84: /* V9 fxtos */
3134 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3136 case 0x88: /* V9 fxtod */
3137 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3139 case 0x8c: /* V9 fxtoq */
3140 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3141 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3147 } else if (xop
== 0x35) { /* FPU Operations */
3148 #ifdef TARGET_SPARC64
3151 if (gen_trap_ifnofpu(dc
)) {
3154 gen_op_clear_ieee_excp_and_FTT();
3155 rs1
= GET_FIELD(insn
, 13, 17);
3156 rs2
= GET_FIELD(insn
, 27, 31);
3157 xop
= GET_FIELD(insn
, 18, 26);
3160 #ifdef TARGET_SPARC64
3164 cond = GET_FIELD_SP(insn, 10, 12); \
3165 cpu_src1 = get_src1(dc, insn); \
3166 gen_compare_reg(&cmp, cond, cpu_src1); \
3167 gen_fmov##sz(dc, &cmp, rd, rs2); \
3168 free_compare(&cmp); \
3171 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3174 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3177 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3178 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3185 #ifdef TARGET_SPARC64
3186 #define FMOVCC(fcc, sz) \
3189 cond = GET_FIELD_SP(insn, 14, 17); \
3190 gen_fcompare(&cmp, fcc, cond); \
3191 gen_fmov##sz(dc, &cmp, rd, rs2); \
3192 free_compare(&cmp); \
3195 case 0x001: /* V9 fmovscc %fcc0 */
3198 case 0x002: /* V9 fmovdcc %fcc0 */
3201 case 0x003: /* V9 fmovqcc %fcc0 */
3202 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3205 case 0x041: /* V9 fmovscc %fcc1 */
3208 case 0x042: /* V9 fmovdcc %fcc1 */
3211 case 0x043: /* V9 fmovqcc %fcc1 */
3212 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3215 case 0x081: /* V9 fmovscc %fcc2 */
3218 case 0x082: /* V9 fmovdcc %fcc2 */
3221 case 0x083: /* V9 fmovqcc %fcc2 */
3222 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3225 case 0x0c1: /* V9 fmovscc %fcc3 */
3228 case 0x0c2: /* V9 fmovdcc %fcc3 */
3231 case 0x0c3: /* V9 fmovqcc %fcc3 */
3232 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3236 #define FMOVCC(xcc, sz) \
3239 cond = GET_FIELD_SP(insn, 14, 17); \
3240 gen_compare(&cmp, xcc, cond, dc); \
3241 gen_fmov##sz(dc, &cmp, rd, rs2); \
3242 free_compare(&cmp); \
3245 case 0x101: /* V9 fmovscc %icc */
3248 case 0x102: /* V9 fmovdcc %icc */
3251 case 0x103: /* V9 fmovqcc %icc */
3252 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3255 case 0x181: /* V9 fmovscc %xcc */
3258 case 0x182: /* V9 fmovdcc %xcc */
3261 case 0x183: /* V9 fmovqcc %xcc */
3262 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3267 case 0x51: /* fcmps, V9 %fcc */
3268 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3269 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3270 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3272 case 0x52: /* fcmpd, V9 %fcc */
3273 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3274 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3275 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3277 case 0x53: /* fcmpq, V9 %fcc */
3278 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3279 gen_op_load_fpr_QT0(QFPREG(rs1
));
3280 gen_op_load_fpr_QT1(QFPREG(rs2
));
3281 gen_op_fcmpq(rd
& 3);
3283 case 0x55: /* fcmpes, V9 %fcc */
3284 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3285 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3286 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3288 case 0x56: /* fcmped, V9 %fcc */
3289 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3290 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3291 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3293 case 0x57: /* fcmpeq, V9 %fcc */
3294 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3295 gen_op_load_fpr_QT0(QFPREG(rs1
));
3296 gen_op_load_fpr_QT1(QFPREG(rs2
));
3297 gen_op_fcmpeq(rd
& 3);
3302 } else if (xop
== 0x2) {
3303 TCGv dst
= gen_dest_gpr(dc
, rd
);
3304 rs1
= GET_FIELD(insn
, 13, 17);
3306 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3307 if (IS_IMM
) { /* immediate */
3308 simm
= GET_FIELDs(insn
, 19, 31);
3309 tcg_gen_movi_tl(dst
, simm
);
3310 gen_store_gpr(dc
, rd
, dst
);
3311 } else { /* register */
3312 rs2
= GET_FIELD(insn
, 27, 31);
3314 tcg_gen_movi_tl(dst
, 0);
3315 gen_store_gpr(dc
, rd
, dst
);
3317 cpu_src2
= gen_load_gpr(dc
, rs2
);
3318 gen_store_gpr(dc
, rd
, cpu_src2
);
3322 cpu_src1
= get_src1(dc
, insn
);
3323 if (IS_IMM
) { /* immediate */
3324 simm
= GET_FIELDs(insn
, 19, 31);
3325 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
3326 gen_store_gpr(dc
, rd
, dst
);
3327 } else { /* register */
3328 rs2
= GET_FIELD(insn
, 27, 31);
3330 /* mov shortcut: or x, %g0, y -> mov x, y */
3331 gen_store_gpr(dc
, rd
, cpu_src1
);
3333 cpu_src2
= gen_load_gpr(dc
, rs2
);
3334 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
3335 gen_store_gpr(dc
, rd
, dst
);
3339 #ifdef TARGET_SPARC64
3340 } else if (xop
== 0x25) { /* sll, V9 sllx */
3341 cpu_src1
= get_src1(dc
, insn
);
3342 if (IS_IMM
) { /* immediate */
3343 simm
= GET_FIELDs(insn
, 20, 31);
3344 if (insn
& (1 << 12)) {
3345 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3347 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3349 } else { /* register */
3350 rs2
= GET_FIELD(insn
, 27, 31);
3351 cpu_src2
= gen_load_gpr(dc
, rs2
);
3352 cpu_tmp0
= get_temp_tl(dc
);
3353 if (insn
& (1 << 12)) {
3354 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3356 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3358 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3360 gen_store_gpr(dc
, rd
, cpu_dst
);
3361 } else if (xop
== 0x26) { /* srl, V9 srlx */
3362 cpu_src1
= get_src1(dc
, insn
);
3363 if (IS_IMM
) { /* immediate */
3364 simm
= GET_FIELDs(insn
, 20, 31);
3365 if (insn
& (1 << 12)) {
3366 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3368 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3369 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3371 } else { /* register */
3372 rs2
= GET_FIELD(insn
, 27, 31);
3373 cpu_src2
= gen_load_gpr(dc
, rs2
);
3374 cpu_tmp0
= get_temp_tl(dc
);
3375 if (insn
& (1 << 12)) {
3376 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3377 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3379 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3380 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3381 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3384 gen_store_gpr(dc
, rd
, cpu_dst
);
3385 } else if (xop
== 0x27) { /* sra, V9 srax */
3386 cpu_src1
= get_src1(dc
, insn
);
3387 if (IS_IMM
) { /* immediate */
3388 simm
= GET_FIELDs(insn
, 20, 31);
3389 if (insn
& (1 << 12)) {
3390 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3392 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3393 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3395 } else { /* register */
3396 rs2
= GET_FIELD(insn
, 27, 31);
3397 cpu_src2
= gen_load_gpr(dc
, rs2
);
3398 cpu_tmp0
= get_temp_tl(dc
);
3399 if (insn
& (1 << 12)) {
3400 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3401 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3403 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3404 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3405 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3408 gen_store_gpr(dc
, rd
, cpu_dst
);
3410 } else if (xop
< 0x36) {
3412 cpu_src1
= get_src1(dc
, insn
);
3413 cpu_src2
= get_src2(dc
, insn
);
3414 switch (xop
& ~0x10) {
3417 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3418 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3419 dc
->cc_op
= CC_OP_ADD
;
3421 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3425 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3427 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3428 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3429 dc
->cc_op
= CC_OP_LOGIC
;
3433 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3435 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3436 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3437 dc
->cc_op
= CC_OP_LOGIC
;
3441 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3443 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3444 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3445 dc
->cc_op
= CC_OP_LOGIC
;
3450 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3451 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3452 dc
->cc_op
= CC_OP_SUB
;
3454 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3457 case 0x5: /* andn */
3458 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3460 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3461 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3462 dc
->cc_op
= CC_OP_LOGIC
;
3466 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3468 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3469 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3470 dc
->cc_op
= CC_OP_LOGIC
;
3473 case 0x7: /* xorn */
3474 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3476 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3477 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3478 dc
->cc_op
= CC_OP_LOGIC
;
3481 case 0x8: /* addx, V9 addc */
3482 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3485 #ifdef TARGET_SPARC64
3486 case 0x9: /* V9 mulx */
3487 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3490 case 0xa: /* umul */
3491 CHECK_IU_FEATURE(dc
, MUL
);
3492 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3494 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3495 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3496 dc
->cc_op
= CC_OP_LOGIC
;
3499 case 0xb: /* smul */
3500 CHECK_IU_FEATURE(dc
, MUL
);
3501 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
3503 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3504 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3505 dc
->cc_op
= CC_OP_LOGIC
;
3508 case 0xc: /* subx, V9 subc */
3509 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3512 #ifdef TARGET_SPARC64
3513 case 0xd: /* V9 udivx */
3514 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3517 case 0xe: /* udiv */
3518 CHECK_IU_FEATURE(dc
, DIV
);
3520 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3522 dc
->cc_op
= CC_OP_DIV
;
3524 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
3528 case 0xf: /* sdiv */
3529 CHECK_IU_FEATURE(dc
, DIV
);
3531 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3533 dc
->cc_op
= CC_OP_DIV
;
3535 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
3542 gen_store_gpr(dc
, rd
, cpu_dst
);
3544 cpu_src1
= get_src1(dc
, insn
);
3545 cpu_src2
= get_src2(dc
, insn
);
3547 case 0x20: /* taddcc */
3548 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3549 gen_store_gpr(dc
, rd
, cpu_dst
);
3550 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
3551 dc
->cc_op
= CC_OP_TADD
;
3553 case 0x21: /* tsubcc */
3554 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3555 gen_store_gpr(dc
, rd
, cpu_dst
);
3556 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
3557 dc
->cc_op
= CC_OP_TSUB
;
3559 case 0x22: /* taddcctv */
3560 gen_helper_taddcctv(cpu_dst
, cpu_env
,
3561 cpu_src1
, cpu_src2
);
3562 gen_store_gpr(dc
, rd
, cpu_dst
);
3563 dc
->cc_op
= CC_OP_TADDTV
;
3565 case 0x23: /* tsubcctv */
3566 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
3567 cpu_src1
, cpu_src2
);
3568 gen_store_gpr(dc
, rd
, cpu_dst
);
3569 dc
->cc_op
= CC_OP_TSUBTV
;
3571 case 0x24: /* mulscc */
3573 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
3574 gen_store_gpr(dc
, rd
, cpu_dst
);
3575 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3576 dc
->cc_op
= CC_OP_ADD
;
3578 #ifndef TARGET_SPARC64
3579 case 0x25: /* sll */
3580 if (IS_IMM
) { /* immediate */
3581 simm
= GET_FIELDs(insn
, 20, 31);
3582 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3583 } else { /* register */
3584 cpu_tmp0
= get_temp_tl(dc
);
3585 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3586 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3588 gen_store_gpr(dc
, rd
, cpu_dst
);
3590 case 0x26: /* srl */
3591 if (IS_IMM
) { /* immediate */
3592 simm
= GET_FIELDs(insn
, 20, 31);
3593 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3594 } else { /* register */
3595 cpu_tmp0
= get_temp_tl(dc
);
3596 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3597 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3599 gen_store_gpr(dc
, rd
, cpu_dst
);
3601 case 0x27: /* sra */
3602 if (IS_IMM
) { /* immediate */
3603 simm
= GET_FIELDs(insn
, 20, 31);
3604 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3605 } else { /* register */
3606 cpu_tmp0
= get_temp_tl(dc
);
3607 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3608 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3610 gen_store_gpr(dc
, rd
, cpu_dst
);
3615 cpu_tmp0
= get_temp_tl(dc
);
3618 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3619 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
3621 #ifndef TARGET_SPARC64
3622 case 0x01 ... 0x0f: /* undefined in the
3626 case 0x10 ... 0x1f: /* implementation-dependent
3630 if ((rd
== 0x13) && (dc
->def
->features
&
3631 CPU_FEATURE_POWERDOWN
)) {
3632 /* LEON3 power-down */
3634 gen_helper_power_down(cpu_env
);
3638 case 0x2: /* V9 wrccr */
3639 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3640 gen_helper_wrccr(cpu_env
, cpu_tmp0
);
3641 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3642 dc
->cc_op
= CC_OP_FLAGS
;
3644 case 0x3: /* V9 wrasi */
3645 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3646 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
3647 tcg_gen_trunc_tl_i32(cpu_asi
, cpu_tmp0
);
3649 case 0x6: /* V9 wrfprs */
3650 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3651 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
3657 case 0xf: /* V9 sir, nop if user */
3658 #if !defined(CONFIG_USER_ONLY)
3659 if (supervisor(dc
)) {
3664 case 0x13: /* Graphics Status */
3665 if (gen_trap_ifnofpu(dc
)) {
3668 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
3670 case 0x14: /* Softint set */
3671 if (!supervisor(dc
))
3673 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3674 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
3676 case 0x15: /* Softint clear */
3677 if (!supervisor(dc
))
3679 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3680 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
3682 case 0x16: /* Softint write */
3683 if (!supervisor(dc
))
3685 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3686 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
3688 case 0x17: /* Tick compare */
3689 #if !defined(CONFIG_USER_ONLY)
3690 if (!supervisor(dc
))
3696 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
3698 r_tickptr
= tcg_temp_new_ptr();
3699 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3700 offsetof(CPUSPARCState
, tick
));
3701 gen_helper_tick_set_limit(r_tickptr
,
3703 tcg_temp_free_ptr(r_tickptr
);
3706 case 0x18: /* System tick */
3707 #if !defined(CONFIG_USER_ONLY)
3708 if (!supervisor(dc
))
3714 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
3716 r_tickptr
= tcg_temp_new_ptr();
3717 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3718 offsetof(CPUSPARCState
, stick
));
3719 gen_helper_tick_set_count(r_tickptr
,
3721 tcg_temp_free_ptr(r_tickptr
);
3724 case 0x19: /* System tick compare */
3725 #if !defined(CONFIG_USER_ONLY)
3726 if (!supervisor(dc
))
3732 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
3734 r_tickptr
= tcg_temp_new_ptr();
3735 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3736 offsetof(CPUSPARCState
, stick
));
3737 gen_helper_tick_set_limit(r_tickptr
,
3739 tcg_temp_free_ptr(r_tickptr
);
3743 case 0x10: /* Performance Control */
3744 case 0x11: /* Performance Instrumentation
3746 case 0x12: /* Dispatch Control */
3753 #if !defined(CONFIG_USER_ONLY)
3754 case 0x31: /* wrpsr, V9 saved, restored */
3756 if (!supervisor(dc
))
3758 #ifdef TARGET_SPARC64
3761 gen_helper_saved(cpu_env
);
3764 gen_helper_restored(cpu_env
);
3766 case 2: /* UA2005 allclean */
3767 case 3: /* UA2005 otherw */
3768 case 4: /* UA2005 normalw */
3769 case 5: /* UA2005 invalw */
3775 cpu_tmp0
= get_temp_tl(dc
);
3776 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3777 gen_helper_wrpsr(cpu_env
, cpu_tmp0
);
3778 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3779 dc
->cc_op
= CC_OP_FLAGS
;
3787 case 0x32: /* wrwim, V9 wrpr */
3789 if (!supervisor(dc
))
3791 cpu_tmp0
= get_temp_tl(dc
);
3792 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3793 #ifdef TARGET_SPARC64
3799 r_tsptr
= tcg_temp_new_ptr();
3800 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3801 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3802 offsetof(trap_state
, tpc
));
3803 tcg_temp_free_ptr(r_tsptr
);
3810 r_tsptr
= tcg_temp_new_ptr();
3811 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3812 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3813 offsetof(trap_state
, tnpc
));
3814 tcg_temp_free_ptr(r_tsptr
);
3821 r_tsptr
= tcg_temp_new_ptr();
3822 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3823 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3824 offsetof(trap_state
,
3826 tcg_temp_free_ptr(r_tsptr
);
3833 r_tsptr
= tcg_temp_new_ptr();
3834 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3835 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
3836 offsetof(trap_state
, tt
));
3837 tcg_temp_free_ptr(r_tsptr
);
3844 r_tickptr
= tcg_temp_new_ptr();
3845 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3846 offsetof(CPUSPARCState
, tick
));
3847 gen_helper_tick_set_count(r_tickptr
,
3849 tcg_temp_free_ptr(r_tickptr
);
3853 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
3857 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
3858 dc
->npc
= DYNAMIC_PC
;
3862 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3863 offsetof(CPUSPARCState
, tl
));
3864 dc
->npc
= DYNAMIC_PC
;
3867 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
3870 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
3873 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3874 offsetof(CPUSPARCState
,
3877 case 11: // canrestore
3878 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3879 offsetof(CPUSPARCState
,
3882 case 12: // cleanwin
3883 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3884 offsetof(CPUSPARCState
,
3887 case 13: // otherwin
3888 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3889 offsetof(CPUSPARCState
,
3893 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3894 offsetof(CPUSPARCState
,
3897 case 16: // UA2005 gl
3898 CHECK_IU_FEATURE(dc
, GL
);
3899 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3900 offsetof(CPUSPARCState
, gl
));
3902 case 26: // UA2005 strand status
3903 CHECK_IU_FEATURE(dc
, HYPV
);
3904 if (!hypervisor(dc
))
3906 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
3912 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
3913 if (dc
->def
->nwindows
!= 32) {
3914 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
3915 (1 << dc
->def
->nwindows
) - 1);
3920 case 0x33: /* wrtbr, UA2005 wrhpr */
3922 #ifndef TARGET_SPARC64
3923 if (!supervisor(dc
))
3925 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
3927 CHECK_IU_FEATURE(dc
, HYPV
);
3928 if (!hypervisor(dc
))
3930 cpu_tmp0
= get_temp_tl(dc
);
3931 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3934 // XXX gen_op_wrhpstate();
3941 // XXX gen_op_wrhtstate();
3944 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
3947 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
3949 case 31: // hstick_cmpr
3953 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
3954 r_tickptr
= tcg_temp_new_ptr();
3955 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3956 offsetof(CPUSPARCState
, hstick
));
3957 gen_helper_tick_set_limit(r_tickptr
,
3959 tcg_temp_free_ptr(r_tickptr
);
3962 case 6: // hver readonly
3970 #ifdef TARGET_SPARC64
3971 case 0x2c: /* V9 movcc */
3973 int cc
= GET_FIELD_SP(insn
, 11, 12);
3974 int cond
= GET_FIELD_SP(insn
, 14, 17);
3978 if (insn
& (1 << 18)) {
3980 gen_compare(&cmp
, 0, cond
, dc
);
3981 } else if (cc
== 2) {
3982 gen_compare(&cmp
, 1, cond
, dc
);
3987 gen_fcompare(&cmp
, cc
, cond
);
3990 /* The get_src2 above loaded the normal 13-bit
3991 immediate field, not the 11-bit field we have
3992 in movcc. But it did handle the reg case. */
3994 simm
= GET_FIELD_SPs(insn
, 0, 10);
3995 tcg_gen_movi_tl(cpu_src2
, simm
);
3998 dst
= gen_load_gpr(dc
, rd
);
3999 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4003 gen_store_gpr(dc
, rd
, dst
);
4006 case 0x2d: /* V9 sdivx */
4007 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
4008 gen_store_gpr(dc
, rd
, cpu_dst
);
4010 case 0x2e: /* V9 popc */
4011 gen_helper_popc(cpu_dst
, cpu_src2
);
4012 gen_store_gpr(dc
, rd
, cpu_dst
);
4014 case 0x2f: /* V9 movr */
4016 int cond
= GET_FIELD_SP(insn
, 10, 12);
4020 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4022 /* The get_src2 above loaded the normal 13-bit
4023 immediate field, not the 10-bit field we have
4024 in movr. But it did handle the reg case. */
4026 simm
= GET_FIELD_SPs(insn
, 0, 9);
4027 tcg_gen_movi_tl(cpu_src2
, simm
);
4030 dst
= gen_load_gpr(dc
, rd
);
4031 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4035 gen_store_gpr(dc
, rd
, dst
);
4043 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4044 #ifdef TARGET_SPARC64
4045 int opf
= GET_FIELD_SP(insn
, 5, 13);
4046 rs1
= GET_FIELD(insn
, 13, 17);
4047 rs2
= GET_FIELD(insn
, 27, 31);
4048 if (gen_trap_ifnofpu(dc
)) {
4053 case 0x000: /* VIS I edge8cc */
4054 CHECK_FPU_FEATURE(dc
, VIS1
);
4055 cpu_src1
= gen_load_gpr(dc
, rs1
);
4056 cpu_src2
= gen_load_gpr(dc
, rs2
);
4057 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4058 gen_store_gpr(dc
, rd
, cpu_dst
);
4060 case 0x001: /* VIS II edge8n */
4061 CHECK_FPU_FEATURE(dc
, VIS2
);
4062 cpu_src1
= gen_load_gpr(dc
, rs1
);
4063 cpu_src2
= gen_load_gpr(dc
, rs2
);
4064 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4065 gen_store_gpr(dc
, rd
, cpu_dst
);
4067 case 0x002: /* VIS I edge8lcc */
4068 CHECK_FPU_FEATURE(dc
, VIS1
);
4069 cpu_src1
= gen_load_gpr(dc
, rs1
);
4070 cpu_src2
= gen_load_gpr(dc
, rs2
);
4071 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4072 gen_store_gpr(dc
, rd
, cpu_dst
);
4074 case 0x003: /* VIS II edge8ln */
4075 CHECK_FPU_FEATURE(dc
, VIS2
);
4076 cpu_src1
= gen_load_gpr(dc
, rs1
);
4077 cpu_src2
= gen_load_gpr(dc
, rs2
);
4078 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4079 gen_store_gpr(dc
, rd
, cpu_dst
);
4081 case 0x004: /* VIS I edge16cc */
4082 CHECK_FPU_FEATURE(dc
, VIS1
);
4083 cpu_src1
= gen_load_gpr(dc
, rs1
);
4084 cpu_src2
= gen_load_gpr(dc
, rs2
);
4085 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4086 gen_store_gpr(dc
, rd
, cpu_dst
);
4088 case 0x005: /* VIS II edge16n */
4089 CHECK_FPU_FEATURE(dc
, VIS2
);
4090 cpu_src1
= gen_load_gpr(dc
, rs1
);
4091 cpu_src2
= gen_load_gpr(dc
, rs2
);
4092 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4093 gen_store_gpr(dc
, rd
, cpu_dst
);
4095 case 0x006: /* VIS I edge16lcc */
4096 CHECK_FPU_FEATURE(dc
, VIS1
);
4097 cpu_src1
= gen_load_gpr(dc
, rs1
);
4098 cpu_src2
= gen_load_gpr(dc
, rs2
);
4099 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4100 gen_store_gpr(dc
, rd
, cpu_dst
);
4102 case 0x007: /* VIS II edge16ln */
4103 CHECK_FPU_FEATURE(dc
, VIS2
);
4104 cpu_src1
= gen_load_gpr(dc
, rs1
);
4105 cpu_src2
= gen_load_gpr(dc
, rs2
);
4106 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4107 gen_store_gpr(dc
, rd
, cpu_dst
);
4109 case 0x008: /* VIS I edge32cc */
4110 CHECK_FPU_FEATURE(dc
, VIS1
);
4111 cpu_src1
= gen_load_gpr(dc
, rs1
);
4112 cpu_src2
= gen_load_gpr(dc
, rs2
);
4113 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4114 gen_store_gpr(dc
, rd
, cpu_dst
);
4116 case 0x009: /* VIS II edge32n */
4117 CHECK_FPU_FEATURE(dc
, VIS2
);
4118 cpu_src1
= gen_load_gpr(dc
, rs1
);
4119 cpu_src2
= gen_load_gpr(dc
, rs2
);
4120 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4121 gen_store_gpr(dc
, rd
, cpu_dst
);
4123 case 0x00a: /* VIS I edge32lcc */
4124 CHECK_FPU_FEATURE(dc
, VIS1
);
4125 cpu_src1
= gen_load_gpr(dc
, rs1
);
4126 cpu_src2
= gen_load_gpr(dc
, rs2
);
4127 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4128 gen_store_gpr(dc
, rd
, cpu_dst
);
4130 case 0x00b: /* VIS II edge32ln */
4131 CHECK_FPU_FEATURE(dc
, VIS2
);
4132 cpu_src1
= gen_load_gpr(dc
, rs1
);
4133 cpu_src2
= gen_load_gpr(dc
, rs2
);
4134 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4135 gen_store_gpr(dc
, rd
, cpu_dst
);
4137 case 0x010: /* VIS I array8 */
4138 CHECK_FPU_FEATURE(dc
, VIS1
);
4139 cpu_src1
= gen_load_gpr(dc
, rs1
);
4140 cpu_src2
= gen_load_gpr(dc
, rs2
);
4141 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4142 gen_store_gpr(dc
, rd
, cpu_dst
);
4144 case 0x012: /* VIS I array16 */
4145 CHECK_FPU_FEATURE(dc
, VIS1
);
4146 cpu_src1
= gen_load_gpr(dc
, rs1
);
4147 cpu_src2
= gen_load_gpr(dc
, rs2
);
4148 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4149 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4150 gen_store_gpr(dc
, rd
, cpu_dst
);
4152 case 0x014: /* VIS I array32 */
4153 CHECK_FPU_FEATURE(dc
, VIS1
);
4154 cpu_src1
= gen_load_gpr(dc
, rs1
);
4155 cpu_src2
= gen_load_gpr(dc
, rs2
);
4156 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4157 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4158 gen_store_gpr(dc
, rd
, cpu_dst
);
4160 case 0x018: /* VIS I alignaddr */
4161 CHECK_FPU_FEATURE(dc
, VIS1
);
4162 cpu_src1
= gen_load_gpr(dc
, rs1
);
4163 cpu_src2
= gen_load_gpr(dc
, rs2
);
4164 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4165 gen_store_gpr(dc
, rd
, cpu_dst
);
4167 case 0x01a: /* VIS I alignaddrl */
4168 CHECK_FPU_FEATURE(dc
, VIS1
);
4169 cpu_src1
= gen_load_gpr(dc
, rs1
);
4170 cpu_src2
= gen_load_gpr(dc
, rs2
);
4171 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4172 gen_store_gpr(dc
, rd
, cpu_dst
);
4174 case 0x019: /* VIS II bmask */
4175 CHECK_FPU_FEATURE(dc
, VIS2
);
4176 cpu_src1
= gen_load_gpr(dc
, rs1
);
4177 cpu_src2
= gen_load_gpr(dc
, rs2
);
4178 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4179 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4180 gen_store_gpr(dc
, rd
, cpu_dst
);
4182 case 0x020: /* VIS I fcmple16 */
4183 CHECK_FPU_FEATURE(dc
, VIS1
);
4184 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4185 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4186 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4187 gen_store_gpr(dc
, rd
, cpu_dst
);
4189 case 0x022: /* VIS I fcmpne16 */
4190 CHECK_FPU_FEATURE(dc
, VIS1
);
4191 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4192 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4193 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4194 gen_store_gpr(dc
, rd
, cpu_dst
);
4196 case 0x024: /* VIS I fcmple32 */
4197 CHECK_FPU_FEATURE(dc
, VIS1
);
4198 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4199 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4200 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4201 gen_store_gpr(dc
, rd
, cpu_dst
);
4203 case 0x026: /* VIS I fcmpne32 */
4204 CHECK_FPU_FEATURE(dc
, VIS1
);
4205 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4206 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4207 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4208 gen_store_gpr(dc
, rd
, cpu_dst
);
4210 case 0x028: /* VIS I fcmpgt16 */
4211 CHECK_FPU_FEATURE(dc
, VIS1
);
4212 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4213 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4214 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4215 gen_store_gpr(dc
, rd
, cpu_dst
);
4217 case 0x02a: /* VIS I fcmpeq16 */
4218 CHECK_FPU_FEATURE(dc
, VIS1
);
4219 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4220 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4221 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4222 gen_store_gpr(dc
, rd
, cpu_dst
);
4224 case 0x02c: /* VIS I fcmpgt32 */
4225 CHECK_FPU_FEATURE(dc
, VIS1
);
4226 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4227 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4228 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4229 gen_store_gpr(dc
, rd
, cpu_dst
);
4231 case 0x02e: /* VIS I fcmpeq32 */
4232 CHECK_FPU_FEATURE(dc
, VIS1
);
4233 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4234 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4235 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4236 gen_store_gpr(dc
, rd
, cpu_dst
);
4238 case 0x031: /* VIS I fmul8x16 */
4239 CHECK_FPU_FEATURE(dc
, VIS1
);
4240 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4242 case 0x033: /* VIS I fmul8x16au */
4243 CHECK_FPU_FEATURE(dc
, VIS1
);
4244 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4246 case 0x035: /* VIS I fmul8x16al */
4247 CHECK_FPU_FEATURE(dc
, VIS1
);
4248 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4250 case 0x036: /* VIS I fmul8sux16 */
4251 CHECK_FPU_FEATURE(dc
, VIS1
);
4252 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4254 case 0x037: /* VIS I fmul8ulx16 */
4255 CHECK_FPU_FEATURE(dc
, VIS1
);
4256 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4258 case 0x038: /* VIS I fmuld8sux16 */
4259 CHECK_FPU_FEATURE(dc
, VIS1
);
4260 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4262 case 0x039: /* VIS I fmuld8ulx16 */
4263 CHECK_FPU_FEATURE(dc
, VIS1
);
4264 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4266 case 0x03a: /* VIS I fpack32 */
4267 CHECK_FPU_FEATURE(dc
, VIS1
);
4268 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4270 case 0x03b: /* VIS I fpack16 */
4271 CHECK_FPU_FEATURE(dc
, VIS1
);
4272 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4273 cpu_dst_32
= gen_dest_fpr_F(dc
);
4274 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4275 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4277 case 0x03d: /* VIS I fpackfix */
4278 CHECK_FPU_FEATURE(dc
, VIS1
);
4279 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4280 cpu_dst_32
= gen_dest_fpr_F(dc
);
4281 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4282 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4284 case 0x03e: /* VIS I pdist */
4285 CHECK_FPU_FEATURE(dc
, VIS1
);
4286 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4288 case 0x048: /* VIS I faligndata */
4289 CHECK_FPU_FEATURE(dc
, VIS1
);
4290 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4292 case 0x04b: /* VIS I fpmerge */
4293 CHECK_FPU_FEATURE(dc
, VIS1
);
4294 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4296 case 0x04c: /* VIS II bshuffle */
4297 CHECK_FPU_FEATURE(dc
, VIS2
);
4298 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4300 case 0x04d: /* VIS I fexpand */
4301 CHECK_FPU_FEATURE(dc
, VIS1
);
4302 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4304 case 0x050: /* VIS I fpadd16 */
4305 CHECK_FPU_FEATURE(dc
, VIS1
);
4306 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4308 case 0x051: /* VIS I fpadd16s */
4309 CHECK_FPU_FEATURE(dc
, VIS1
);
4310 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4312 case 0x052: /* VIS I fpadd32 */
4313 CHECK_FPU_FEATURE(dc
, VIS1
);
4314 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4316 case 0x053: /* VIS I fpadd32s */
4317 CHECK_FPU_FEATURE(dc
, VIS1
);
4318 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4320 case 0x054: /* VIS I fpsub16 */
4321 CHECK_FPU_FEATURE(dc
, VIS1
);
4322 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4324 case 0x055: /* VIS I fpsub16s */
4325 CHECK_FPU_FEATURE(dc
, VIS1
);
4326 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4328 case 0x056: /* VIS I fpsub32 */
4329 CHECK_FPU_FEATURE(dc
, VIS1
);
4330 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4332 case 0x057: /* VIS I fpsub32s */
4333 CHECK_FPU_FEATURE(dc
, VIS1
);
4334 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4336 case 0x060: /* VIS I fzero */
4337 CHECK_FPU_FEATURE(dc
, VIS1
);
4338 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4339 tcg_gen_movi_i64(cpu_dst_64
, 0);
4340 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4342 case 0x061: /* VIS I fzeros */
4343 CHECK_FPU_FEATURE(dc
, VIS1
);
4344 cpu_dst_32
= gen_dest_fpr_F(dc
);
4345 tcg_gen_movi_i32(cpu_dst_32
, 0);
4346 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4348 case 0x062: /* VIS I fnor */
4349 CHECK_FPU_FEATURE(dc
, VIS1
);
4350 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4352 case 0x063: /* VIS I fnors */
4353 CHECK_FPU_FEATURE(dc
, VIS1
);
4354 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4356 case 0x064: /* VIS I fandnot2 */
4357 CHECK_FPU_FEATURE(dc
, VIS1
);
4358 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4360 case 0x065: /* VIS I fandnot2s */
4361 CHECK_FPU_FEATURE(dc
, VIS1
);
4362 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4364 case 0x066: /* VIS I fnot2 */
4365 CHECK_FPU_FEATURE(dc
, VIS1
);
4366 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4368 case 0x067: /* VIS I fnot2s */
4369 CHECK_FPU_FEATURE(dc
, VIS1
);
4370 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4372 case 0x068: /* VIS I fandnot1 */
4373 CHECK_FPU_FEATURE(dc
, VIS1
);
4374 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4376 case 0x069: /* VIS I fandnot1s */
4377 CHECK_FPU_FEATURE(dc
, VIS1
);
4378 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4380 case 0x06a: /* VIS I fnot1 */
4381 CHECK_FPU_FEATURE(dc
, VIS1
);
4382 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4384 case 0x06b: /* VIS I fnot1s */
4385 CHECK_FPU_FEATURE(dc
, VIS1
);
4386 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4388 case 0x06c: /* VIS I fxor */
4389 CHECK_FPU_FEATURE(dc
, VIS1
);
4390 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4392 case 0x06d: /* VIS I fxors */
4393 CHECK_FPU_FEATURE(dc
, VIS1
);
4394 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4396 case 0x06e: /* VIS I fnand */
4397 CHECK_FPU_FEATURE(dc
, VIS1
);
4398 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4400 case 0x06f: /* VIS I fnands */
4401 CHECK_FPU_FEATURE(dc
, VIS1
);
4402 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4404 case 0x070: /* VIS I fand */
4405 CHECK_FPU_FEATURE(dc
, VIS1
);
4406 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4408 case 0x071: /* VIS I fands */
4409 CHECK_FPU_FEATURE(dc
, VIS1
);
4410 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4412 case 0x072: /* VIS I fxnor */
4413 CHECK_FPU_FEATURE(dc
, VIS1
);
4414 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4416 case 0x073: /* VIS I fxnors */
4417 CHECK_FPU_FEATURE(dc
, VIS1
);
4418 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4420 case 0x074: /* VIS I fsrc1 */
4421 CHECK_FPU_FEATURE(dc
, VIS1
);
4422 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4423 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4425 case 0x075: /* VIS I fsrc1s */
4426 CHECK_FPU_FEATURE(dc
, VIS1
);
4427 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4428 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4430 case 0x076: /* VIS I fornot2 */
4431 CHECK_FPU_FEATURE(dc
, VIS1
);
4432 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4434 case 0x077: /* VIS I fornot2s */
4435 CHECK_FPU_FEATURE(dc
, VIS1
);
4436 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4438 case 0x078: /* VIS I fsrc2 */
4439 CHECK_FPU_FEATURE(dc
, VIS1
);
4440 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4441 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4443 case 0x079: /* VIS I fsrc2s */
4444 CHECK_FPU_FEATURE(dc
, VIS1
);
4445 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4446 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4448 case 0x07a: /* VIS I fornot1 */
4449 CHECK_FPU_FEATURE(dc
, VIS1
);
4450 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4452 case 0x07b: /* VIS I fornot1s */
4453 CHECK_FPU_FEATURE(dc
, VIS1
);
4454 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4456 case 0x07c: /* VIS I for */
4457 CHECK_FPU_FEATURE(dc
, VIS1
);
4458 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4460 case 0x07d: /* VIS I fors */
4461 CHECK_FPU_FEATURE(dc
, VIS1
);
4462 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4464 case 0x07e: /* VIS I fone */
4465 CHECK_FPU_FEATURE(dc
, VIS1
);
4466 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4467 tcg_gen_movi_i64(cpu_dst_64
, -1);
4468 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4470 case 0x07f: /* VIS I fones */
4471 CHECK_FPU_FEATURE(dc
, VIS1
);
4472 cpu_dst_32
= gen_dest_fpr_F(dc
);
4473 tcg_gen_movi_i32(cpu_dst_32
, -1);
4474 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4476 case 0x080: /* VIS I shutdown */
4477 case 0x081: /* VIS II siam */
4486 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
4487 #ifdef TARGET_SPARC64
4492 #ifdef TARGET_SPARC64
4493 } else if (xop
== 0x39) { /* V9 return */
4497 cpu_src1
= get_src1(dc
, insn
);
4498 cpu_tmp0
= get_temp_tl(dc
);
4499 if (IS_IMM
) { /* immediate */
4500 simm
= GET_FIELDs(insn
, 19, 31);
4501 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4502 } else { /* register */
4503 rs2
= GET_FIELD(insn
, 27, 31);
4505 cpu_src2
= gen_load_gpr(dc
, rs2
);
4506 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4508 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4511 gen_helper_restore(cpu_env
);
4513 r_const
= tcg_const_i32(3);
4514 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4515 tcg_temp_free_i32(r_const
);
4516 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4517 dc
->npc
= DYNAMIC_PC
;
4521 cpu_src1
= get_src1(dc
, insn
);
4522 cpu_tmp0
= get_temp_tl(dc
);
4523 if (IS_IMM
) { /* immediate */
4524 simm
= GET_FIELDs(insn
, 19, 31);
4525 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4526 } else { /* register */
4527 rs2
= GET_FIELD(insn
, 27, 31);
4529 cpu_src2
= gen_load_gpr(dc
, rs2
);
4530 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4532 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4536 case 0x38: /* jmpl */
4541 t
= gen_dest_gpr(dc
, rd
);
4542 tcg_gen_movi_tl(t
, dc
->pc
);
4543 gen_store_gpr(dc
, rd
, t
);
4545 r_const
= tcg_const_i32(3);
4546 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4547 tcg_temp_free_i32(r_const
);
4548 gen_address_mask(dc
, cpu_tmp0
);
4549 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4550 dc
->npc
= DYNAMIC_PC
;
4553 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4554 case 0x39: /* rett, V9 return */
4558 if (!supervisor(dc
))
4561 r_const
= tcg_const_i32(3);
4562 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4563 tcg_temp_free_i32(r_const
);
4564 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4565 dc
->npc
= DYNAMIC_PC
;
4566 gen_helper_rett(cpu_env
);
4570 case 0x3b: /* flush */
4571 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
4575 case 0x3c: /* save */
4577 gen_helper_save(cpu_env
);
4578 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4580 case 0x3d: /* restore */
4582 gen_helper_restore(cpu_env
);
4583 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4585 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4586 case 0x3e: /* V9 done/retry */
4590 if (!supervisor(dc
))
4592 dc
->npc
= DYNAMIC_PC
;
4593 dc
->pc
= DYNAMIC_PC
;
4594 gen_helper_done(cpu_env
);
4597 if (!supervisor(dc
))
4599 dc
->npc
= DYNAMIC_PC
;
4600 dc
->pc
= DYNAMIC_PC
;
4601 gen_helper_retry(cpu_env
);
4616 case 3: /* load/store instructions */
4618 unsigned int xop
= GET_FIELD(insn
, 7, 12);
4619 /* ??? gen_address_mask prevents us from using a source
4620 register directly. Always generate a temporary. */
4621 TCGv cpu_addr
= get_temp_tl(dc
);
4623 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
4624 if (xop
== 0x3c || xop
== 0x3e) {
4625 /* V9 casa/casxa : no offset */
4626 } else if (IS_IMM
) { /* immediate */
4627 simm
= GET_FIELDs(insn
, 19, 31);
4629 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
4631 } else { /* register */
4632 rs2
= GET_FIELD(insn
, 27, 31);
4634 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
4637 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
4638 (xop
> 0x17 && xop
<= 0x1d ) ||
4639 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
4640 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
4643 case 0x0: /* ld, V9 lduw, load unsigned word */
4644 gen_address_mask(dc
, cpu_addr
);
4645 tcg_gen_qemu_ld32u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4647 case 0x1: /* ldub, load unsigned byte */
4648 gen_address_mask(dc
, cpu_addr
);
4649 tcg_gen_qemu_ld8u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4651 case 0x2: /* lduh, load unsigned halfword */
4652 gen_address_mask(dc
, cpu_addr
);
4653 tcg_gen_qemu_ld16u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4655 case 0x3: /* ldd, load double word */
4663 r_const
= tcg_const_i32(7);
4664 /* XXX remove alignment check */
4665 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4666 tcg_temp_free_i32(r_const
);
4667 gen_address_mask(dc
, cpu_addr
);
4668 t64
= tcg_temp_new_i64();
4669 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4670 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4671 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4672 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
4673 tcg_gen_shri_i64(t64
, t64
, 32);
4674 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4675 tcg_temp_free_i64(t64
);
4676 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4679 case 0x9: /* ldsb, load signed byte */
4680 gen_address_mask(dc
, cpu_addr
);
4681 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4683 case 0xa: /* ldsh, load signed halfword */
4684 gen_address_mask(dc
, cpu_addr
);
4685 tcg_gen_qemu_ld16s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4687 case 0xd: /* ldstub -- XXX: should be atomically */
4691 gen_address_mask(dc
, cpu_addr
);
4692 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4693 r_const
= tcg_const_tl(0xff);
4694 tcg_gen_qemu_st8(r_const
, cpu_addr
, dc
->mem_idx
);
4695 tcg_temp_free(r_const
);
4699 /* swap, swap register with memory. Also atomically */
4701 TCGv t0
= get_temp_tl(dc
);
4702 CHECK_IU_FEATURE(dc
, SWAP
);
4703 cpu_src1
= gen_load_gpr(dc
, rd
);
4704 gen_address_mask(dc
, cpu_addr
);
4705 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4706 tcg_gen_qemu_st32(cpu_src1
, cpu_addr
, dc
->mem_idx
);
4707 tcg_gen_mov_tl(cpu_val
, t0
);
4710 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4711 case 0x10: /* lda, V9 lduwa, load word alternate */
4712 #ifndef TARGET_SPARC64
4715 if (!supervisor(dc
))
4719 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 0);
4721 case 0x11: /* lduba, load unsigned byte alternate */
4722 #ifndef TARGET_SPARC64
4725 if (!supervisor(dc
))
4729 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 0);
4731 case 0x12: /* lduha, load unsigned halfword alternate */
4732 #ifndef TARGET_SPARC64
4735 if (!supervisor(dc
))
4739 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 0);
4741 case 0x13: /* ldda, load double word alternate */
4742 #ifndef TARGET_SPARC64
4745 if (!supervisor(dc
))
4751 gen_ldda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4753 case 0x19: /* ldsba, load signed byte alternate */
4754 #ifndef TARGET_SPARC64
4757 if (!supervisor(dc
))
4761 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 1);
4763 case 0x1a: /* ldsha, load signed halfword alternate */
4764 #ifndef TARGET_SPARC64
4767 if (!supervisor(dc
))
4771 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 1);
4773 case 0x1d: /* ldstuba -- XXX: should be atomically */
4774 #ifndef TARGET_SPARC64
4777 if (!supervisor(dc
))
4781 gen_ldstub_asi(cpu_val
, cpu_addr
, insn
);
4783 case 0x1f: /* swapa, swap reg with alt. memory. Also
4785 CHECK_IU_FEATURE(dc
, SWAP
);
4786 #ifndef TARGET_SPARC64
4789 if (!supervisor(dc
))
4793 cpu_src1
= gen_load_gpr(dc
, rd
);
4794 gen_swap_asi(cpu_val
, cpu_src1
, cpu_addr
, insn
);
4797 #ifndef TARGET_SPARC64
4798 case 0x30: /* ldc */
4799 case 0x31: /* ldcsr */
4800 case 0x33: /* lddc */
4804 #ifdef TARGET_SPARC64
4805 case 0x08: /* V9 ldsw */
4806 gen_address_mask(dc
, cpu_addr
);
4807 tcg_gen_qemu_ld32s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4809 case 0x0b: /* V9 ldx */
4810 gen_address_mask(dc
, cpu_addr
);
4811 tcg_gen_qemu_ld64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4813 case 0x18: /* V9 ldswa */
4815 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 1);
4817 case 0x1b: /* V9 ldxa */
4819 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 8, 0);
4821 case 0x2d: /* V9 prefetch, no effect */
4823 case 0x30: /* V9 ldfa */
4824 if (gen_trap_ifnofpu(dc
)) {
4828 gen_ldf_asi(cpu_addr
, insn
, 4, rd
);
4829 gen_update_fprs_dirty(rd
);
4831 case 0x33: /* V9 lddfa */
4832 if (gen_trap_ifnofpu(dc
)) {
4836 gen_ldf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
4837 gen_update_fprs_dirty(DFPREG(rd
));
4839 case 0x3d: /* V9 prefetcha, no effect */
4841 case 0x32: /* V9 ldqfa */
4842 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4843 if (gen_trap_ifnofpu(dc
)) {
4847 gen_ldf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
4848 gen_update_fprs_dirty(QFPREG(rd
));
4854 gen_store_gpr(dc
, rd
, cpu_val
);
4855 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4858 } else if (xop
>= 0x20 && xop
< 0x24) {
4861 if (gen_trap_ifnofpu(dc
)) {
4866 case 0x20: /* ldf, load fpreg */
4867 gen_address_mask(dc
, cpu_addr
);
4868 t0
= get_temp_tl(dc
);
4869 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4870 cpu_dst_32
= gen_dest_fpr_F(dc
);
4871 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4872 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4874 case 0x21: /* ldfsr, V9 ldxfsr */
4875 #ifdef TARGET_SPARC64
4876 gen_address_mask(dc
, cpu_addr
);
4878 TCGv_i64 t64
= tcg_temp_new_i64();
4879 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4880 gen_helper_ldxfsr(cpu_env
, t64
);
4881 tcg_temp_free_i64(t64
);
4885 cpu_dst_32
= get_temp_i32(dc
);
4886 t0
= get_temp_tl(dc
);
4887 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4888 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4889 gen_helper_ldfsr(cpu_env
, cpu_dst_32
);
4891 case 0x22: /* ldqf, load quad fpreg */
4895 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4896 r_const
= tcg_const_i32(dc
->mem_idx
);
4897 gen_address_mask(dc
, cpu_addr
);
4898 gen_helper_ldqf(cpu_env
, cpu_addr
, r_const
);
4899 tcg_temp_free_i32(r_const
);
4900 gen_op_store_QT0_fpr(QFPREG(rd
));
4901 gen_update_fprs_dirty(QFPREG(rd
));
4904 case 0x23: /* lddf, load double fpreg */
4905 gen_address_mask(dc
, cpu_addr
);
4906 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4907 tcg_gen_qemu_ld64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
);
4908 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4913 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
4914 xop
== 0xe || xop
== 0x1e) {
4915 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
4918 case 0x4: /* st, store word */
4919 gen_address_mask(dc
, cpu_addr
);
4920 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
4922 case 0x5: /* stb, store byte */
4923 gen_address_mask(dc
, cpu_addr
);
4924 tcg_gen_qemu_st8(cpu_val
, cpu_addr
, dc
->mem_idx
);
4926 case 0x6: /* sth, store halfword */
4927 gen_address_mask(dc
, cpu_addr
);
4928 tcg_gen_qemu_st16(cpu_val
, cpu_addr
, dc
->mem_idx
);
4930 case 0x7: /* std, store double word */
4939 gen_address_mask(dc
, cpu_addr
);
4940 r_const
= tcg_const_i32(7);
4941 /* XXX remove alignment check */
4942 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4943 tcg_temp_free_i32(r_const
);
4944 lo
= gen_load_gpr(dc
, rd
+ 1);
4946 t64
= tcg_temp_new_i64();
4947 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
4948 tcg_gen_qemu_st64(t64
, cpu_addr
, dc
->mem_idx
);
4949 tcg_temp_free_i64(t64
);
4952 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4953 case 0x14: /* sta, V9 stwa, store word alternate */
4954 #ifndef TARGET_SPARC64
4957 if (!supervisor(dc
))
4961 gen_st_asi(cpu_val
, cpu_addr
, insn
, 4);
4962 dc
->npc
= DYNAMIC_PC
;
4964 case 0x15: /* stba, store byte alternate */
4965 #ifndef TARGET_SPARC64
4968 if (!supervisor(dc
))
4972 gen_st_asi(cpu_val
, cpu_addr
, insn
, 1);
4973 dc
->npc
= DYNAMIC_PC
;
4975 case 0x16: /* stha, store halfword alternate */
4976 #ifndef TARGET_SPARC64
4979 if (!supervisor(dc
))
4983 gen_st_asi(cpu_val
, cpu_addr
, insn
, 2);
4984 dc
->npc
= DYNAMIC_PC
;
4986 case 0x17: /* stda, store double word alternate */
4987 #ifndef TARGET_SPARC64
4990 if (!supervisor(dc
))
4997 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
5001 #ifdef TARGET_SPARC64
5002 case 0x0e: /* V9 stx */
5003 gen_address_mask(dc
, cpu_addr
);
5004 tcg_gen_qemu_st64(cpu_val
, cpu_addr
, dc
->mem_idx
);
5006 case 0x1e: /* V9 stxa */
5008 gen_st_asi(cpu_val
, cpu_addr
, insn
, 8);
5009 dc
->npc
= DYNAMIC_PC
;
5015 } else if (xop
> 0x23 && xop
< 0x28) {
5016 if (gen_trap_ifnofpu(dc
)) {
5021 case 0x24: /* stf, store fpreg */
5023 TCGv t
= get_temp_tl(dc
);
5024 gen_address_mask(dc
, cpu_addr
);
5025 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5026 tcg_gen_ext_i32_tl(t
, cpu_src1_32
);
5027 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5030 case 0x25: /* stfsr, V9 stxfsr */
5032 TCGv t
= get_temp_tl(dc
);
5034 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUSPARCState
, fsr
));
5035 #ifdef TARGET_SPARC64
5036 gen_address_mask(dc
, cpu_addr
);
5038 tcg_gen_qemu_st64(t
, cpu_addr
, dc
->mem_idx
);
5042 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5046 #ifdef TARGET_SPARC64
5047 /* V9 stqf, store quad fpreg */
5051 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5052 gen_op_load_fpr_QT0(QFPREG(rd
));
5053 r_const
= tcg_const_i32(dc
->mem_idx
);
5054 gen_address_mask(dc
, cpu_addr
);
5055 gen_helper_stqf(cpu_env
, cpu_addr
, r_const
);
5056 tcg_temp_free_i32(r_const
);
5059 #else /* !TARGET_SPARC64 */
5060 /* stdfq, store floating point queue */
5061 #if defined(CONFIG_USER_ONLY)
5064 if (!supervisor(dc
))
5066 if (gen_trap_ifnofpu(dc
)) {
5072 case 0x27: /* stdf, store double fpreg */
5073 gen_address_mask(dc
, cpu_addr
);
5074 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5075 tcg_gen_qemu_st64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
);
5080 } else if (xop
> 0x33 && xop
< 0x3f) {
5083 #ifdef TARGET_SPARC64
5084 case 0x34: /* V9 stfa */
5085 if (gen_trap_ifnofpu(dc
)) {
5088 gen_stf_asi(cpu_addr
, insn
, 4, rd
);
5090 case 0x36: /* V9 stqfa */
5094 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5095 if (gen_trap_ifnofpu(dc
)) {
5098 r_const
= tcg_const_i32(7);
5099 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
5100 tcg_temp_free_i32(r_const
);
5101 gen_stf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
5104 case 0x37: /* V9 stdfa */
5105 if (gen_trap_ifnofpu(dc
)) {
5108 gen_stf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
5110 case 0x3e: /* V9 casxa */
5111 rs2
= GET_FIELD(insn
, 27, 31);
5112 cpu_src2
= gen_load_gpr(dc
, rs2
);
5113 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5116 case 0x34: /* stc */
5117 case 0x35: /* stcsr */
5118 case 0x36: /* stdcq */
5119 case 0x37: /* stdc */
5122 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5123 case 0x3c: /* V9 or LEON3 casa */
5124 #ifndef TARGET_SPARC64
5125 CHECK_IU_FEATURE(dc
, CASA
);
5129 if (!supervisor(dc
)) {
5133 rs2
= GET_FIELD(insn
, 27, 31);
5134 cpu_src2
= gen_load_gpr(dc
, rs2
);
5135 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5147 /* default case for non jump instructions */
5148 if (dc
->npc
== DYNAMIC_PC
) {
5149 dc
->pc
= DYNAMIC_PC
;
5151 } else if (dc
->npc
== JUMP_PC
) {
5152 /* we can do a static jump */
5153 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5157 dc
->npc
= dc
->npc
+ 4;
5166 r_const
= tcg_const_i32(TT_ILL_INSN
);
5167 gen_helper_raise_exception(cpu_env
, r_const
);
5168 tcg_temp_free_i32(r_const
);
5177 r_const
= tcg_const_i32(TT_UNIMP_FLUSH
);
5178 gen_helper_raise_exception(cpu_env
, r_const
);
5179 tcg_temp_free_i32(r_const
);
5183 #if !defined(CONFIG_USER_ONLY)
5189 r_const
= tcg_const_i32(TT_PRIV_INSN
);
5190 gen_helper_raise_exception(cpu_env
, r_const
);
5191 tcg_temp_free_i32(r_const
);
5198 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP
);
5201 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5204 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR
);
5208 #ifndef TARGET_SPARC64
5214 r_const
= tcg_const_i32(TT_NCP_INSN
);
5215 gen_helper_raise_exception(cpu_env
, r_const
);
5216 tcg_temp_free(r_const
);
5222 if (dc
->n_t32
!= 0) {
5224 for (i
= dc
->n_t32
- 1; i
>= 0; --i
) {
5225 tcg_temp_free_i32(dc
->t32
[i
]);
5229 if (dc
->n_ttl
!= 0) {
5231 for (i
= dc
->n_ttl
- 1; i
>= 0; --i
) {
5232 tcg_temp_free(dc
->ttl
[i
]);
5238 static inline void gen_intermediate_code_internal(SPARCCPU
*cpu
,
5239 TranslationBlock
*tb
,
5242 CPUState
*cs
= CPU(cpu
);
5243 CPUSPARCState
*env
= &cpu
->env
;
5244 target_ulong pc_start
, last_pc
;
5245 uint16_t *gen_opc_end
;
5246 DisasContext dc1
, *dc
= &dc1
;
5253 memset(dc
, 0, sizeof(DisasContext
));
5258 dc
->npc
= (target_ulong
) tb
->cs_base
;
5259 dc
->cc_op
= CC_OP_DYNAMIC
;
5260 dc
->mem_idx
= cpu_mmu_index(env
);
5262 dc
->fpu_enabled
= tb_fpu_enabled(tb
->flags
);
5263 dc
->address_mask_32bit
= tb_am_enabled(tb
->flags
);
5264 dc
->singlestep
= (cs
->singlestep_enabled
|| singlestep
);
5265 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
5268 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5270 max_insns
= CF_COUNT_MASK
;
5273 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
5274 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
5275 if (bp
->pc
== dc
->pc
) {
5276 if (dc
->pc
!= pc_start
)
5278 gen_helper_debug(cpu_env
);
5286 qemu_log("Search PC...\n");
5287 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
5291 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5292 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
5293 gen_opc_npc
[lj
] = dc
->npc
;
5294 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
5295 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
5298 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
5301 insn
= cpu_ldl_code(env
, dc
->pc
);
5303 disas_sparc_insn(dc
, insn
);
5308 /* if the next PC is different, we abort now */
5309 if (dc
->pc
!= (last_pc
+ 4))
5311 /* if we reach a page boundary, we stop generation so that the
5312 PC of a TT_TFAULT exception is always in the right page */
5313 if ((dc
->pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
5315 /* if single step mode, we generate only one instruction and
5316 generate an exception */
5317 if (dc
->singlestep
) {
5320 } while ((tcg_ctx
.gen_opc_ptr
< gen_opc_end
) &&
5321 (dc
->pc
- pc_start
) < (TARGET_PAGE_SIZE
- 32) &&
5322 num_insns
< max_insns
);
5325 if (tb
->cflags
& CF_LAST_IO
) {
5329 if (dc
->pc
!= DYNAMIC_PC
&&
5330 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
5331 /* static PC and NPC: we can use direct chaining */
5332 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5334 if (dc
->pc
!= DYNAMIC_PC
) {
5335 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5341 gen_tb_end(tb
, num_insns
);
5342 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
5344 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
5347 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5351 gen_opc_jump_pc
[0] = dc
->jump_pc
[0];
5352 gen_opc_jump_pc
[1] = dc
->jump_pc
[1];
5354 tb
->size
= last_pc
+ 4 - pc_start
;
5355 tb
->icount
= num_insns
;
5358 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5359 qemu_log("--------------\n");
5360 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5361 log_target_disas(env
, pc_start
, last_pc
+ 4 - pc_start
, 0);
5367 void gen_intermediate_code(CPUSPARCState
* env
, TranslationBlock
* tb
)
5369 gen_intermediate_code_internal(sparc_env_get_cpu(env
), tb
, false);
5372 void gen_intermediate_code_pc(CPUSPARCState
* env
, TranslationBlock
* tb
)
5374 gen_intermediate_code_internal(sparc_env_get_cpu(env
), tb
, true);
5377 void gen_intermediate_code_init(CPUSPARCState
*env
)
5381 static const char * const gregnames
[8] = {
5382 NULL
, // g0 not used
5391 static const char * const fregnames
[32] = {
5392 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5393 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5394 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5395 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5398 /* init various static tables */
5402 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
5403 cpu_regwptr
= tcg_global_mem_new_ptr(TCG_AREG0
,
5404 offsetof(CPUSPARCState
, regwptr
),
5406 #ifdef TARGET_SPARC64
5407 cpu_xcc
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, xcc
),
5409 cpu_asi
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, asi
),
5411 cpu_fprs
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, fprs
),
5413 cpu_gsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, gsr
),
5415 cpu_tick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5416 offsetof(CPUSPARCState
, tick_cmpr
),
5418 cpu_stick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5419 offsetof(CPUSPARCState
, stick_cmpr
),
5421 cpu_hstick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5422 offsetof(CPUSPARCState
, hstick_cmpr
),
5424 cpu_hintp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hintp
),
5426 cpu_htba
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, htba
),
5428 cpu_hver
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hver
),
5430 cpu_ssr
= tcg_global_mem_new(TCG_AREG0
,
5431 offsetof(CPUSPARCState
, ssr
), "ssr");
5432 cpu_ver
= tcg_global_mem_new(TCG_AREG0
,
5433 offsetof(CPUSPARCState
, version
), "ver");
5434 cpu_softint
= tcg_global_mem_new_i32(TCG_AREG0
,
5435 offsetof(CPUSPARCState
, softint
),
5438 cpu_wim
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, wim
),
5441 cpu_cond
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cond
),
5443 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_src
),
5445 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
,
5446 offsetof(CPUSPARCState
, cc_src2
),
5448 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_dst
),
5450 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, cc_op
),
5452 cpu_psr
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, psr
),
5454 cpu_fsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, fsr
),
5456 cpu_pc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, pc
),
5458 cpu_npc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, npc
),
5460 cpu_y
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, y
), "y");
5461 #ifndef CONFIG_USER_ONLY
5462 cpu_tbr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, tbr
),
5465 for (i
= 1; i
< 8; i
++) {
5466 cpu_gregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
5467 offsetof(CPUSPARCState
, gregs
[i
]),
5470 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5471 cpu_fpr
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
5472 offsetof(CPUSPARCState
, fpr
[i
]),
5478 void restore_state_to_opc(CPUSPARCState
*env
, TranslationBlock
*tb
, int pc_pos
)
5481 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
5482 npc
= gen_opc_npc
[pc_pos
];
5484 /* dynamic NPC: already stored */
5485 } else if (npc
== 2) {
5486 /* jump PC: use 'cond' and the jump targets of the translation */
5488 env
->npc
= gen_opc_jump_pc
[0];
5490 env
->npc
= gen_opc_jump_pc
[1];