4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "disas/disas.h"
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
41 /* global register indexes */
42 static TCGv_ptr cpu_env
, cpu_regwptr
;
43 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
44 static TCGv_i32 cpu_cc_op
;
45 static TCGv_i32 cpu_psr
;
46 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
, cpu_gregs
[8];
48 #ifndef CONFIG_USER_ONLY
53 static TCGv_i32 cpu_xcc
, cpu_asi
, cpu_fprs
;
55 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
56 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
57 static TCGv_i32 cpu_softint
;
61 /* Floating point registers */
62 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
64 static target_ulong gen_opc_npc
[OPC_BUF_SIZE
];
65 static target_ulong gen_opc_jump_pc
[2];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext
{
70 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
76 int address_mask_32bit
;
78 uint32_t cc_op
; /* current CC operation */
79 struct TranslationBlock
*tb
;
94 // This function uses non-native bit order
95 #define GET_FIELD(X, FROM, TO) \
96 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
98 // This function uses the order in the manuals, i.e. bit 0 is 2^0
99 #define GET_FIELD_SP(X, FROM, TO) \
100 GET_FIELD(X, 31 - (TO), 31 - (FROM))
102 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
103 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
105 #ifdef TARGET_SPARC64
106 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
107 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
109 #define DFPREG(r) (r & 0x1e)
110 #define QFPREG(r) (r & 0x1c)
113 #define UA2005_HTRAP_MASK 0xff
114 #define V8_TRAP_MASK 0x7f
116 static int sign_extend(int x
, int len
)
119 return (x
<< len
) >> len
;
122 #define IS_IMM (insn & (1<<13))
124 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
127 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
128 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
132 static inline TCGv
get_temp_tl(DisasContext
*dc
)
135 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
136 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
140 static inline void gen_update_fprs_dirty(int rd
)
142 #if defined(TARGET_SPARC64)
143 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, (rd
< 32) ? 1 : 2);
147 /* floating point registers moves */
148 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
150 #if TCG_TARGET_REG_BITS == 32
152 return TCGV_LOW(cpu_fpr
[src
/ 2]);
154 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
158 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr
[src
/ 2]));
160 TCGv_i32 ret
= get_temp_i32(dc
);
161 TCGv_i64 t
= tcg_temp_new_i64();
163 tcg_gen_shri_i64(t
, cpu_fpr
[src
/ 2], 32);
164 tcg_gen_trunc_i64_i32(ret
, t
);
165 tcg_temp_free_i64(t
);
172 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
174 #if TCG_TARGET_REG_BITS == 32
176 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
178 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
181 TCGv_i64 t
= MAKE_TCGV_I64(GET_TCGV_I32(v
));
182 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
183 (dst
& 1 ? 0 : 32), 32);
185 gen_update_fprs_dirty(dst
);
188 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
190 return get_temp_i32(dc
);
193 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
196 return cpu_fpr
[src
/ 2];
199 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
202 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
203 gen_update_fprs_dirty(dst
);
206 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
208 return cpu_fpr
[DFPREG(dst
) / 2];
211 static void gen_op_load_fpr_QT0(unsigned int src
)
213 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
214 offsetof(CPU_QuadU
, ll
.upper
));
215 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
216 offsetof(CPU_QuadU
, ll
.lower
));
219 static void gen_op_load_fpr_QT1(unsigned int src
)
221 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
222 offsetof(CPU_QuadU
, ll
.upper
));
223 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
224 offsetof(CPU_QuadU
, ll
.lower
));
227 static void gen_op_store_QT0_fpr(unsigned int dst
)
229 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
230 offsetof(CPU_QuadU
, ll
.upper
));
231 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
232 offsetof(CPU_QuadU
, ll
.lower
));
235 #ifdef TARGET_SPARC64
236 static void gen_move_Q(unsigned int rd
, unsigned int rs
)
241 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
242 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
243 gen_update_fprs_dirty(rd
);
248 #ifdef CONFIG_USER_ONLY
249 #define supervisor(dc) 0
250 #ifdef TARGET_SPARC64
251 #define hypervisor(dc) 0
254 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
255 #ifdef TARGET_SPARC64
256 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
261 #ifdef TARGET_SPARC64
263 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
265 #define AM_CHECK(dc) (1)
269 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
271 #ifdef TARGET_SPARC64
273 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
277 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
279 if (reg
== 0 || reg
>= 8) {
280 TCGv t
= get_temp_tl(dc
);
282 tcg_gen_movi_tl(t
, 0);
284 tcg_gen_ld_tl(t
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
288 return cpu_gregs
[reg
];
292 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
296 tcg_gen_mov_tl(cpu_gregs
[reg
], v
);
298 tcg_gen_st_tl(v
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
303 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
305 if (reg
== 0 || reg
>= 8) {
306 return get_temp_tl(dc
);
308 return cpu_gregs
[reg
];
312 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
,
313 target_ulong pc
, target_ulong npc
)
315 TranslationBlock
*tb
;
318 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
319 (npc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
321 /* jump to same page: we can use a direct jump */
322 tcg_gen_goto_tb(tb_num
);
323 tcg_gen_movi_tl(cpu_pc
, pc
);
324 tcg_gen_movi_tl(cpu_npc
, npc
);
325 tcg_gen_exit_tb((uintptr_t)tb
+ tb_num
);
327 /* jump to another page: currently not optimized */
328 tcg_gen_movi_tl(cpu_pc
, pc
);
329 tcg_gen_movi_tl(cpu_npc
, npc
);
335 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
337 tcg_gen_extu_i32_tl(reg
, src
);
338 tcg_gen_shri_tl(reg
, reg
, PSR_NEG_SHIFT
);
339 tcg_gen_andi_tl(reg
, reg
, 0x1);
342 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
344 tcg_gen_extu_i32_tl(reg
, src
);
345 tcg_gen_shri_tl(reg
, reg
, PSR_ZERO_SHIFT
);
346 tcg_gen_andi_tl(reg
, reg
, 0x1);
349 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
351 tcg_gen_extu_i32_tl(reg
, src
);
352 tcg_gen_shri_tl(reg
, reg
, PSR_OVF_SHIFT
);
353 tcg_gen_andi_tl(reg
, reg
, 0x1);
356 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
358 tcg_gen_extu_i32_tl(reg
, src
);
359 tcg_gen_shri_tl(reg
, reg
, PSR_CARRY_SHIFT
);
360 tcg_gen_andi_tl(reg
, reg
, 0x1);
363 static inline void gen_op_addi_cc(TCGv dst
, TCGv src1
, target_long src2
)
365 tcg_gen_mov_tl(cpu_cc_src
, src1
);
366 tcg_gen_movi_tl(cpu_cc_src2
, src2
);
367 tcg_gen_addi_tl(cpu_cc_dst
, cpu_cc_src
, src2
);
368 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
371 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
373 tcg_gen_mov_tl(cpu_cc_src
, src1
);
374 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
375 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
376 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
379 static TCGv_i32
gen_add32_carry32(void)
381 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
383 /* Carry is computed from a previous add: (dst < src) */
384 #if TARGET_LONG_BITS == 64
385 cc_src1_32
= tcg_temp_new_i32();
386 cc_src2_32
= tcg_temp_new_i32();
387 tcg_gen_trunc_i64_i32(cc_src1_32
, cpu_cc_dst
);
388 tcg_gen_trunc_i64_i32(cc_src2_32
, cpu_cc_src
);
390 cc_src1_32
= cpu_cc_dst
;
391 cc_src2_32
= cpu_cc_src
;
394 carry_32
= tcg_temp_new_i32();
395 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
397 #if TARGET_LONG_BITS == 64
398 tcg_temp_free_i32(cc_src1_32
);
399 tcg_temp_free_i32(cc_src2_32
);
405 static TCGv_i32
gen_sub32_carry32(void)
407 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
409 /* Carry is computed from a previous borrow: (src1 < src2) */
410 #if TARGET_LONG_BITS == 64
411 cc_src1_32
= tcg_temp_new_i32();
412 cc_src2_32
= tcg_temp_new_i32();
413 tcg_gen_trunc_i64_i32(cc_src1_32
, cpu_cc_src
);
414 tcg_gen_trunc_i64_i32(cc_src2_32
, cpu_cc_src2
);
416 cc_src1_32
= cpu_cc_src
;
417 cc_src2_32
= cpu_cc_src2
;
420 carry_32
= tcg_temp_new_i32();
421 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
423 #if TARGET_LONG_BITS == 64
424 tcg_temp_free_i32(cc_src1_32
);
425 tcg_temp_free_i32(cc_src2_32
);
431 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
432 TCGv src2
, int update_cc
)
440 /* Carry is known to be zero. Fall back to plain ADD. */
442 gen_op_add_cc(dst
, src1
, src2
);
444 tcg_gen_add_tl(dst
, src1
, src2
);
451 if (TARGET_LONG_BITS
== 32) {
452 /* We can re-use the host's hardware carry generation by using
453 an ADD2 opcode. We discard the low part of the output.
454 Ideally we'd combine this operation with the add that
455 generated the carry in the first place. */
456 carry
= tcg_temp_new();
457 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
458 tcg_temp_free(carry
);
461 carry_32
= gen_add32_carry32();
467 carry_32
= gen_sub32_carry32();
471 /* We need external help to produce the carry. */
472 carry_32
= tcg_temp_new_i32();
473 gen_helper_compute_C_icc(carry_32
, cpu_env
);
477 #if TARGET_LONG_BITS == 64
478 carry
= tcg_temp_new();
479 tcg_gen_extu_i32_i64(carry
, carry_32
);
484 tcg_gen_add_tl(dst
, src1
, src2
);
485 tcg_gen_add_tl(dst
, dst
, carry
);
487 tcg_temp_free_i32(carry_32
);
488 #if TARGET_LONG_BITS == 64
489 tcg_temp_free(carry
);
494 tcg_gen_mov_tl(cpu_cc_src
, src1
);
495 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
496 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
497 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
498 dc
->cc_op
= CC_OP_ADDX
;
502 static inline void gen_op_subi_cc(TCGv dst
, TCGv src1
, target_long src2
, DisasContext
*dc
)
504 tcg_gen_mov_tl(cpu_cc_src
, src1
);
505 tcg_gen_movi_tl(cpu_cc_src2
, src2
);
507 tcg_gen_mov_tl(cpu_cc_dst
, src1
);
508 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
509 dc
->cc_op
= CC_OP_LOGIC
;
511 tcg_gen_subi_tl(cpu_cc_dst
, cpu_cc_src
, src2
);
512 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
513 dc
->cc_op
= CC_OP_SUB
;
515 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
518 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
520 tcg_gen_mov_tl(cpu_cc_src
, src1
);
521 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
522 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
523 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
526 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
527 TCGv src2
, int update_cc
)
535 /* Carry is known to be zero. Fall back to plain SUB. */
537 gen_op_sub_cc(dst
, src1
, src2
);
539 tcg_gen_sub_tl(dst
, src1
, src2
);
546 carry_32
= gen_add32_carry32();
552 if (TARGET_LONG_BITS
== 32) {
553 /* We can re-use the host's hardware carry generation by using
554 a SUB2 opcode. We discard the low part of the output.
555 Ideally we'd combine this operation with the add that
556 generated the carry in the first place. */
557 carry
= tcg_temp_new();
558 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
559 tcg_temp_free(carry
);
562 carry_32
= gen_sub32_carry32();
566 /* We need external help to produce the carry. */
567 carry_32
= tcg_temp_new_i32();
568 gen_helper_compute_C_icc(carry_32
, cpu_env
);
572 #if TARGET_LONG_BITS == 64
573 carry
= tcg_temp_new();
574 tcg_gen_extu_i32_i64(carry
, carry_32
);
579 tcg_gen_sub_tl(dst
, src1
, src2
);
580 tcg_gen_sub_tl(dst
, dst
, carry
);
582 tcg_temp_free_i32(carry_32
);
583 #if TARGET_LONG_BITS == 64
584 tcg_temp_free(carry
);
589 tcg_gen_mov_tl(cpu_cc_src
, src1
);
590 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
591 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
592 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
593 dc
->cc_op
= CC_OP_SUBX
;
597 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
599 TCGv r_temp
, zero
, t0
;
601 r_temp
= tcg_temp_new();
608 zero
= tcg_const_tl(0);
609 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
610 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
611 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
612 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
617 // env->y = (b2 << 31) | (env->y >> 1);
618 tcg_gen_andi_tl(r_temp
, cpu_cc_src
, 0x1);
619 tcg_gen_shli_tl(r_temp
, r_temp
, 31);
620 tcg_gen_shri_tl(t0
, cpu_y
, 1);
621 tcg_gen_andi_tl(t0
, t0
, 0x7fffffff);
622 tcg_gen_or_tl(t0
, t0
, r_temp
);
623 tcg_gen_andi_tl(cpu_y
, t0
, 0xffffffff);
626 gen_mov_reg_N(t0
, cpu_psr
);
627 gen_mov_reg_V(r_temp
, cpu_psr
);
628 tcg_gen_xor_tl(t0
, t0
, r_temp
);
629 tcg_temp_free(r_temp
);
631 // T0 = (b1 << 31) | (T0 >> 1);
633 tcg_gen_shli_tl(t0
, t0
, 31);
634 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
635 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
638 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
640 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
643 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
645 #if TARGET_LONG_BITS == 32
647 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
649 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
652 TCGv t0
= tcg_temp_new_i64();
653 TCGv t1
= tcg_temp_new_i64();
656 tcg_gen_ext32s_i64(t0
, src1
);
657 tcg_gen_ext32s_i64(t1
, src2
);
659 tcg_gen_ext32u_i64(t0
, src1
);
660 tcg_gen_ext32u_i64(t1
, src2
);
663 tcg_gen_mul_i64(dst
, t0
, t1
);
667 tcg_gen_shri_i64(cpu_y
, dst
, 32);
671 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
673 /* zero-extend truncated operands before multiplication */
674 gen_op_multiply(dst
, src1
, src2
, 0);
677 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
679 /* sign-extend truncated operands before multiplication */
680 gen_op_multiply(dst
, src1
, src2
, 1);
684 static inline void gen_op_eval_ba(TCGv dst
)
686 tcg_gen_movi_tl(dst
, 1);
690 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
692 gen_mov_reg_Z(dst
, src
);
696 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
698 TCGv t0
= tcg_temp_new();
699 gen_mov_reg_N(t0
, src
);
700 gen_mov_reg_V(dst
, src
);
701 tcg_gen_xor_tl(dst
, dst
, t0
);
702 gen_mov_reg_Z(t0
, src
);
703 tcg_gen_or_tl(dst
, dst
, t0
);
708 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
710 TCGv t0
= tcg_temp_new();
711 gen_mov_reg_V(t0
, src
);
712 gen_mov_reg_N(dst
, src
);
713 tcg_gen_xor_tl(dst
, dst
, t0
);
718 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
720 TCGv t0
= tcg_temp_new();
721 gen_mov_reg_Z(t0
, src
);
722 gen_mov_reg_C(dst
, src
);
723 tcg_gen_or_tl(dst
, dst
, t0
);
728 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
730 gen_mov_reg_C(dst
, src
);
734 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
736 gen_mov_reg_V(dst
, src
);
740 static inline void gen_op_eval_bn(TCGv dst
)
742 tcg_gen_movi_tl(dst
, 0);
746 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
748 gen_mov_reg_N(dst
, src
);
752 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
754 gen_mov_reg_Z(dst
, src
);
755 tcg_gen_xori_tl(dst
, dst
, 0x1);
759 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
761 gen_op_eval_ble(dst
, src
);
762 tcg_gen_xori_tl(dst
, dst
, 0x1);
766 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
768 gen_op_eval_bl(dst
, src
);
769 tcg_gen_xori_tl(dst
, dst
, 0x1);
773 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
775 gen_op_eval_bleu(dst
, src
);
776 tcg_gen_xori_tl(dst
, dst
, 0x1);
780 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
782 gen_mov_reg_C(dst
, src
);
783 tcg_gen_xori_tl(dst
, dst
, 0x1);
787 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
789 gen_mov_reg_N(dst
, src
);
790 tcg_gen_xori_tl(dst
, dst
, 0x1);
794 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
796 gen_mov_reg_V(dst
, src
);
797 tcg_gen_xori_tl(dst
, dst
, 0x1);
801 FPSR bit field FCC1 | FCC0:
807 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
808 unsigned int fcc_offset
)
810 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
811 tcg_gen_andi_tl(reg
, reg
, 0x1);
814 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
815 unsigned int fcc_offset
)
817 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
818 tcg_gen_andi_tl(reg
, reg
, 0x1);
822 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
823 unsigned int fcc_offset
)
825 TCGv t0
= tcg_temp_new();
826 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
827 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
828 tcg_gen_or_tl(dst
, dst
, t0
);
832 // 1 or 2: FCC0 ^ FCC1
833 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
834 unsigned int fcc_offset
)
836 TCGv t0
= tcg_temp_new();
837 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
838 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
839 tcg_gen_xor_tl(dst
, dst
, t0
);
844 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
845 unsigned int fcc_offset
)
847 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
851 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
852 unsigned int fcc_offset
)
854 TCGv t0
= tcg_temp_new();
855 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
856 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
857 tcg_gen_andc_tl(dst
, dst
, t0
);
862 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
863 unsigned int fcc_offset
)
865 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
869 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
870 unsigned int fcc_offset
)
872 TCGv t0
= tcg_temp_new();
873 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
874 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
875 tcg_gen_andc_tl(dst
, t0
, dst
);
880 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
881 unsigned int fcc_offset
)
883 TCGv t0
= tcg_temp_new();
884 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
885 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
886 tcg_gen_and_tl(dst
, dst
, t0
);
891 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
892 unsigned int fcc_offset
)
894 TCGv t0
= tcg_temp_new();
895 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
896 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
897 tcg_gen_or_tl(dst
, dst
, t0
);
898 tcg_gen_xori_tl(dst
, dst
, 0x1);
902 // 0 or 3: !(FCC0 ^ FCC1)
903 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
904 unsigned int fcc_offset
)
906 TCGv t0
= tcg_temp_new();
907 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
908 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
909 tcg_gen_xor_tl(dst
, dst
, t0
);
910 tcg_gen_xori_tl(dst
, dst
, 0x1);
915 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
916 unsigned int fcc_offset
)
918 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
919 tcg_gen_xori_tl(dst
, dst
, 0x1);
922 // !1: !(FCC0 & !FCC1)
923 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
924 unsigned int fcc_offset
)
926 TCGv t0
= tcg_temp_new();
927 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
928 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
929 tcg_gen_andc_tl(dst
, dst
, t0
);
930 tcg_gen_xori_tl(dst
, dst
, 0x1);
935 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
936 unsigned int fcc_offset
)
938 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
939 tcg_gen_xori_tl(dst
, dst
, 0x1);
942 // !2: !(!FCC0 & FCC1)
943 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
944 unsigned int fcc_offset
)
946 TCGv t0
= tcg_temp_new();
947 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
948 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
949 tcg_gen_andc_tl(dst
, t0
, dst
);
950 tcg_gen_xori_tl(dst
, dst
, 0x1);
954 // !3: !(FCC0 & FCC1)
955 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
956 unsigned int fcc_offset
)
958 TCGv t0
= tcg_temp_new();
959 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
960 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
961 tcg_gen_and_tl(dst
, dst
, t0
);
962 tcg_gen_xori_tl(dst
, dst
, 0x1);
966 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
967 target_ulong pc2
, TCGv r_cond
)
971 l1
= gen_new_label();
973 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
975 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
978 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
981 static inline void gen_branch_a(DisasContext
*dc
, target_ulong pc1
,
982 target_ulong pc2
, TCGv r_cond
)
986 l1
= gen_new_label();
988 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
990 gen_goto_tb(dc
, 0, pc2
, pc1
);
993 gen_goto_tb(dc
, 1, pc2
+ 4, pc2
+ 8);
996 static inline void gen_generic_branch(DisasContext
*dc
)
998 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
999 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
1000 TCGv zero
= tcg_const_tl(0);
1002 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1004 tcg_temp_free(npc0
);
1005 tcg_temp_free(npc1
);
1006 tcg_temp_free(zero
);
1009 /* call this function before using the condition register as it may
1010 have been set for a jump */
1011 static inline void flush_cond(DisasContext
*dc
)
1013 if (dc
->npc
== JUMP_PC
) {
1014 gen_generic_branch(dc
);
1015 dc
->npc
= DYNAMIC_PC
;
1019 static inline void save_npc(DisasContext
*dc
)
1021 if (dc
->npc
== JUMP_PC
) {
1022 gen_generic_branch(dc
);
1023 dc
->npc
= DYNAMIC_PC
;
1024 } else if (dc
->npc
!= DYNAMIC_PC
) {
1025 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1029 static inline void update_psr(DisasContext
*dc
)
1031 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1032 dc
->cc_op
= CC_OP_FLAGS
;
1033 gen_helper_compute_psr(cpu_env
);
1037 static inline void save_state(DisasContext
*dc
)
1039 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1043 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1045 if (dc
->npc
== JUMP_PC
) {
1046 gen_generic_branch(dc
);
1047 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1048 dc
->pc
= DYNAMIC_PC
;
1049 } else if (dc
->npc
== DYNAMIC_PC
) {
1050 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1051 dc
->pc
= DYNAMIC_PC
;
1057 static inline void gen_op_next_insn(void)
1059 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1060 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1063 static void free_compare(DisasCompare
*cmp
)
1066 tcg_temp_free(cmp
->c1
);
1069 tcg_temp_free(cmp
->c2
);
1073 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1076 static int subcc_cond
[16] = {
1092 -1, /* no overflow */
1095 static int logic_cond
[16] = {
1097 TCG_COND_EQ
, /* eq: Z */
1098 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1099 TCG_COND_LT
, /* lt: N ^ V -> N */
1100 TCG_COND_EQ
, /* leu: C | Z -> Z */
1101 TCG_COND_NEVER
, /* ltu: C -> 0 */
1102 TCG_COND_LT
, /* neg: N */
1103 TCG_COND_NEVER
, /* vs: V -> 0 */
1105 TCG_COND_NE
, /* ne: !Z */
1106 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1107 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1108 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1109 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1110 TCG_COND_GE
, /* pos: !N */
1111 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1117 #ifdef TARGET_SPARC64
1127 switch (dc
->cc_op
) {
1129 cmp
->cond
= logic_cond
[cond
];
1131 cmp
->is_bool
= false;
1133 cmp
->c2
= tcg_const_tl(0);
1134 #ifdef TARGET_SPARC64
1137 cmp
->c1
= tcg_temp_new();
1138 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1143 cmp
->c1
= cpu_cc_dst
;
1150 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1151 goto do_compare_dst_0
;
1153 case 7: /* overflow */
1154 case 15: /* !overflow */
1158 cmp
->cond
= subcc_cond
[cond
];
1159 cmp
->is_bool
= false;
1160 #ifdef TARGET_SPARC64
1162 /* Note that sign-extension works for unsigned compares as
1163 long as both operands are sign-extended. */
1164 cmp
->g1
= cmp
->g2
= false;
1165 cmp
->c1
= tcg_temp_new();
1166 cmp
->c2
= tcg_temp_new();
1167 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1168 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1172 cmp
->g1
= cmp
->g2
= true;
1173 cmp
->c1
= cpu_cc_src
;
1174 cmp
->c2
= cpu_cc_src2
;
1181 gen_helper_compute_psr(cpu_env
);
1182 dc
->cc_op
= CC_OP_FLAGS
;
1186 /* We're going to generate a boolean result. */
1187 cmp
->cond
= TCG_COND_NE
;
1188 cmp
->is_bool
= true;
1189 cmp
->g1
= cmp
->g2
= false;
1190 cmp
->c1
= r_dst
= tcg_temp_new();
1191 cmp
->c2
= tcg_const_tl(0);
1195 gen_op_eval_bn(r_dst
);
1198 gen_op_eval_be(r_dst
, r_src
);
1201 gen_op_eval_ble(r_dst
, r_src
);
1204 gen_op_eval_bl(r_dst
, r_src
);
1207 gen_op_eval_bleu(r_dst
, r_src
);
1210 gen_op_eval_bcs(r_dst
, r_src
);
1213 gen_op_eval_bneg(r_dst
, r_src
);
1216 gen_op_eval_bvs(r_dst
, r_src
);
1219 gen_op_eval_ba(r_dst
);
1222 gen_op_eval_bne(r_dst
, r_src
);
1225 gen_op_eval_bg(r_dst
, r_src
);
1228 gen_op_eval_bge(r_dst
, r_src
);
1231 gen_op_eval_bgu(r_dst
, r_src
);
1234 gen_op_eval_bcc(r_dst
, r_src
);
1237 gen_op_eval_bpos(r_dst
, r_src
);
1240 gen_op_eval_bvc(r_dst
, r_src
);
1247 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1249 unsigned int offset
;
1252 /* For now we still generate a straight boolean result. */
1253 cmp
->cond
= TCG_COND_NE
;
1254 cmp
->is_bool
= true;
1255 cmp
->g1
= cmp
->g2
= false;
1256 cmp
->c1
= r_dst
= tcg_temp_new();
1257 cmp
->c2
= tcg_const_tl(0);
1277 gen_op_eval_bn(r_dst
);
1280 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1283 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1286 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1289 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1292 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1295 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1298 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1301 gen_op_eval_ba(r_dst
);
1304 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1307 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1310 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1313 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1316 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1319 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1322 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1327 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1331 gen_compare(&cmp
, cc
, cond
, dc
);
1333 /* The interface is to return a boolean in r_dst. */
1335 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1337 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1343 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1346 gen_fcompare(&cmp
, cc
, cond
);
1348 /* The interface is to return a boolean in r_dst. */
1350 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1352 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1358 #ifdef TARGET_SPARC64
1360 static const int gen_tcg_cond_reg
[8] = {
1371 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1373 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1374 cmp
->is_bool
= false;
1378 cmp
->c2
= tcg_const_tl(0);
1381 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1384 gen_compare_reg(&cmp
, cond
, r_src
);
1386 /* The interface is to return a boolean in r_dst. */
1387 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1393 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1395 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1396 target_ulong target
= dc
->pc
+ offset
;
1398 #ifdef TARGET_SPARC64
1399 if (unlikely(AM_CHECK(dc
))) {
1400 target
&= 0xffffffffULL
;
1404 /* unconditional not taken */
1406 dc
->pc
= dc
->npc
+ 4;
1407 dc
->npc
= dc
->pc
+ 4;
1410 dc
->npc
= dc
->pc
+ 4;
1412 } else if (cond
== 0x8) {
1413 /* unconditional taken */
1416 dc
->npc
= dc
->pc
+ 4;
1420 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1424 gen_cond(cpu_cond
, cc
, cond
, dc
);
1426 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1430 dc
->jump_pc
[0] = target
;
1431 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1432 dc
->jump_pc
[1] = DYNAMIC_PC
;
1433 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1435 dc
->jump_pc
[1] = dc
->npc
+ 4;
1442 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1444 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1445 target_ulong target
= dc
->pc
+ offset
;
1447 #ifdef TARGET_SPARC64
1448 if (unlikely(AM_CHECK(dc
))) {
1449 target
&= 0xffffffffULL
;
1453 /* unconditional not taken */
1455 dc
->pc
= dc
->npc
+ 4;
1456 dc
->npc
= dc
->pc
+ 4;
1459 dc
->npc
= dc
->pc
+ 4;
1461 } else if (cond
== 0x8) {
1462 /* unconditional taken */
1465 dc
->npc
= dc
->pc
+ 4;
1469 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1473 gen_fcond(cpu_cond
, cc
, cond
);
1475 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1479 dc
->jump_pc
[0] = target
;
1480 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1481 dc
->jump_pc
[1] = DYNAMIC_PC
;
1482 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1484 dc
->jump_pc
[1] = dc
->npc
+ 4;
1491 #ifdef TARGET_SPARC64
1492 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1495 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1496 target_ulong target
= dc
->pc
+ offset
;
1498 if (unlikely(AM_CHECK(dc
))) {
1499 target
&= 0xffffffffULL
;
1502 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1504 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1508 dc
->jump_pc
[0] = target
;
1509 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1510 dc
->jump_pc
[1] = DYNAMIC_PC
;
1511 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1513 dc
->jump_pc
[1] = dc
->npc
+ 4;
1519 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1523 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1526 gen_helper_fcmps_fcc1(cpu_env
, r_rs1
, r_rs2
);
1529 gen_helper_fcmps_fcc2(cpu_env
, r_rs1
, r_rs2
);
1532 gen_helper_fcmps_fcc3(cpu_env
, r_rs1
, r_rs2
);
1537 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1541 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1544 gen_helper_fcmpd_fcc1(cpu_env
, r_rs1
, r_rs2
);
1547 gen_helper_fcmpd_fcc2(cpu_env
, r_rs1
, r_rs2
);
1550 gen_helper_fcmpd_fcc3(cpu_env
, r_rs1
, r_rs2
);
1555 static inline void gen_op_fcmpq(int fccno
)
1559 gen_helper_fcmpq(cpu_env
);
1562 gen_helper_fcmpq_fcc1(cpu_env
);
1565 gen_helper_fcmpq_fcc2(cpu_env
);
1568 gen_helper_fcmpq_fcc3(cpu_env
);
1573 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1577 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1580 gen_helper_fcmpes_fcc1(cpu_env
, r_rs1
, r_rs2
);
1583 gen_helper_fcmpes_fcc2(cpu_env
, r_rs1
, r_rs2
);
1586 gen_helper_fcmpes_fcc3(cpu_env
, r_rs1
, r_rs2
);
1591 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1595 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1598 gen_helper_fcmped_fcc1(cpu_env
, r_rs1
, r_rs2
);
1601 gen_helper_fcmped_fcc2(cpu_env
, r_rs1
, r_rs2
);
1604 gen_helper_fcmped_fcc3(cpu_env
, r_rs1
, r_rs2
);
1609 static inline void gen_op_fcmpeq(int fccno
)
1613 gen_helper_fcmpeq(cpu_env
);
1616 gen_helper_fcmpeq_fcc1(cpu_env
);
1619 gen_helper_fcmpeq_fcc2(cpu_env
);
1622 gen_helper_fcmpeq_fcc3(cpu_env
);
1629 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1631 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1634 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1636 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1639 static inline void gen_op_fcmpq(int fccno
)
1641 gen_helper_fcmpq(cpu_env
);
1644 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1646 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1649 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1651 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1654 static inline void gen_op_fcmpeq(int fccno
)
1656 gen_helper_fcmpeq(cpu_env
);
1660 static inline void gen_op_fpexception_im(int fsr_flags
)
1664 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1665 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1666 r_const
= tcg_const_i32(TT_FP_EXCP
);
1667 gen_helper_raise_exception(cpu_env
, r_const
);
1668 tcg_temp_free_i32(r_const
);
1671 static int gen_trap_ifnofpu(DisasContext
*dc
)
1673 #if !defined(CONFIG_USER_ONLY)
1674 if (!dc
->fpu_enabled
) {
1678 r_const
= tcg_const_i32(TT_NFPU_INSN
);
1679 gen_helper_raise_exception(cpu_env
, r_const
);
1680 tcg_temp_free_i32(r_const
);
1688 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1690 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1693 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1694 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1698 src
= gen_load_fpr_F(dc
, rs
);
1699 dst
= gen_dest_fpr_F(dc
);
1701 gen(dst
, cpu_env
, src
);
1703 gen_store_fpr_F(dc
, rd
, dst
);
1706 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1707 void (*gen
)(TCGv_i32
, TCGv_i32
))
1711 src
= gen_load_fpr_F(dc
, rs
);
1712 dst
= gen_dest_fpr_F(dc
);
1716 gen_store_fpr_F(dc
, rd
, dst
);
1719 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1720 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1722 TCGv_i32 dst
, src1
, src2
;
1724 src1
= gen_load_fpr_F(dc
, rs1
);
1725 src2
= gen_load_fpr_F(dc
, rs2
);
1726 dst
= gen_dest_fpr_F(dc
);
1728 gen(dst
, cpu_env
, src1
, src2
);
1730 gen_store_fpr_F(dc
, rd
, dst
);
1733 #ifdef TARGET_SPARC64
1734 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1735 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1737 TCGv_i32 dst
, src1
, src2
;
1739 src1
= gen_load_fpr_F(dc
, rs1
);
1740 src2
= gen_load_fpr_F(dc
, rs2
);
1741 dst
= gen_dest_fpr_F(dc
);
1743 gen(dst
, src1
, src2
);
1745 gen_store_fpr_F(dc
, rd
, dst
);
1749 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1750 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1754 src
= gen_load_fpr_D(dc
, rs
);
1755 dst
= gen_dest_fpr_D(dc
, rd
);
1757 gen(dst
, cpu_env
, src
);
1759 gen_store_fpr_D(dc
, rd
, dst
);
1762 #ifdef TARGET_SPARC64
1763 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1764 void (*gen
)(TCGv_i64
, TCGv_i64
))
1768 src
= gen_load_fpr_D(dc
, rs
);
1769 dst
= gen_dest_fpr_D(dc
, rd
);
1773 gen_store_fpr_D(dc
, rd
, dst
);
1777 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1778 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1780 TCGv_i64 dst
, src1
, src2
;
1782 src1
= gen_load_fpr_D(dc
, rs1
);
1783 src2
= gen_load_fpr_D(dc
, rs2
);
1784 dst
= gen_dest_fpr_D(dc
, rd
);
1786 gen(dst
, cpu_env
, src1
, src2
);
1788 gen_store_fpr_D(dc
, rd
, dst
);
1791 #ifdef TARGET_SPARC64
1792 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1793 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1795 TCGv_i64 dst
, src1
, src2
;
1797 src1
= gen_load_fpr_D(dc
, rs1
);
1798 src2
= gen_load_fpr_D(dc
, rs2
);
1799 dst
= gen_dest_fpr_D(dc
, rd
);
1801 gen(dst
, src1
, src2
);
1803 gen_store_fpr_D(dc
, rd
, dst
);
1806 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1807 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1809 TCGv_i64 dst
, src1
, src2
;
1811 src1
= gen_load_fpr_D(dc
, rs1
);
1812 src2
= gen_load_fpr_D(dc
, rs2
);
1813 dst
= gen_dest_fpr_D(dc
, rd
);
1815 gen(dst
, cpu_gsr
, src1
, src2
);
1817 gen_store_fpr_D(dc
, rd
, dst
);
1820 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1821 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1823 TCGv_i64 dst
, src0
, src1
, src2
;
1825 src1
= gen_load_fpr_D(dc
, rs1
);
1826 src2
= gen_load_fpr_D(dc
, rs2
);
1827 src0
= gen_load_fpr_D(dc
, rd
);
1828 dst
= gen_dest_fpr_D(dc
, rd
);
1830 gen(dst
, src0
, src1
, src2
);
1832 gen_store_fpr_D(dc
, rd
, dst
);
1836 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1837 void (*gen
)(TCGv_ptr
))
1839 gen_op_load_fpr_QT1(QFPREG(rs
));
1843 gen_op_store_QT0_fpr(QFPREG(rd
));
1844 gen_update_fprs_dirty(QFPREG(rd
));
1847 #ifdef TARGET_SPARC64
1848 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1849 void (*gen
)(TCGv_ptr
))
1851 gen_op_load_fpr_QT1(QFPREG(rs
));
1855 gen_op_store_QT0_fpr(QFPREG(rd
));
1856 gen_update_fprs_dirty(QFPREG(rd
));
1860 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1861 void (*gen
)(TCGv_ptr
))
1863 gen_op_load_fpr_QT0(QFPREG(rs1
));
1864 gen_op_load_fpr_QT1(QFPREG(rs2
));
1868 gen_op_store_QT0_fpr(QFPREG(rd
));
1869 gen_update_fprs_dirty(QFPREG(rd
));
1872 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1873 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1876 TCGv_i32 src1
, src2
;
1878 src1
= gen_load_fpr_F(dc
, rs1
);
1879 src2
= gen_load_fpr_F(dc
, rs2
);
1880 dst
= gen_dest_fpr_D(dc
, rd
);
1882 gen(dst
, cpu_env
, src1
, src2
);
1884 gen_store_fpr_D(dc
, rd
, dst
);
1887 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1888 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1890 TCGv_i64 src1
, src2
;
1892 src1
= gen_load_fpr_D(dc
, rs1
);
1893 src2
= gen_load_fpr_D(dc
, rs2
);
1895 gen(cpu_env
, src1
, src2
);
1897 gen_op_store_QT0_fpr(QFPREG(rd
));
1898 gen_update_fprs_dirty(QFPREG(rd
));
1901 #ifdef TARGET_SPARC64
1902 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1903 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1908 src
= gen_load_fpr_F(dc
, rs
);
1909 dst
= gen_dest_fpr_D(dc
, rd
);
1911 gen(dst
, cpu_env
, src
);
1913 gen_store_fpr_D(dc
, rd
, dst
);
1917 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1918 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1923 src
= gen_load_fpr_F(dc
, rs
);
1924 dst
= gen_dest_fpr_D(dc
, rd
);
1926 gen(dst
, cpu_env
, src
);
1928 gen_store_fpr_D(dc
, rd
, dst
);
1931 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1932 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1937 src
= gen_load_fpr_D(dc
, rs
);
1938 dst
= gen_dest_fpr_F(dc
);
1940 gen(dst
, cpu_env
, src
);
1942 gen_store_fpr_F(dc
, rd
, dst
);
1945 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1946 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1950 gen_op_load_fpr_QT1(QFPREG(rs
));
1951 dst
= gen_dest_fpr_F(dc
);
1955 gen_store_fpr_F(dc
, rd
, dst
);
1958 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1959 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1963 gen_op_load_fpr_QT1(QFPREG(rs
));
1964 dst
= gen_dest_fpr_D(dc
, rd
);
1968 gen_store_fpr_D(dc
, rd
, dst
);
1971 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1972 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1976 src
= gen_load_fpr_F(dc
, rs
);
1980 gen_op_store_QT0_fpr(QFPREG(rd
));
1981 gen_update_fprs_dirty(QFPREG(rd
));
1984 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1985 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1989 src
= gen_load_fpr_D(dc
, rs
);
1993 gen_op_store_QT0_fpr(QFPREG(rd
));
1994 gen_update_fprs_dirty(QFPREG(rd
));
1998 #ifdef TARGET_SPARC64
1999 static inline TCGv_i32
gen_get_asi(int insn
, TCGv r_addr
)
2005 r_asi
= tcg_temp_new_i32();
2006 tcg_gen_mov_i32(r_asi
, cpu_asi
);
2008 asi
= GET_FIELD(insn
, 19, 26);
2009 r_asi
= tcg_const_i32(asi
);
2014 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2017 TCGv_i32 r_asi
, r_size
, r_sign
;
2019 r_asi
= gen_get_asi(insn
, addr
);
2020 r_size
= tcg_const_i32(size
);
2021 r_sign
= tcg_const_i32(sign
);
2022 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2023 tcg_temp_free_i32(r_sign
);
2024 tcg_temp_free_i32(r_size
);
2025 tcg_temp_free_i32(r_asi
);
2028 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2030 TCGv_i32 r_asi
, r_size
;
2032 r_asi
= gen_get_asi(insn
, addr
);
2033 r_size
= tcg_const_i32(size
);
2034 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2035 tcg_temp_free_i32(r_size
);
2036 tcg_temp_free_i32(r_asi
);
2039 static inline void gen_ldf_asi(TCGv addr
, int insn
, int size
, int rd
)
2041 TCGv_i32 r_asi
, r_size
, r_rd
;
2043 r_asi
= gen_get_asi(insn
, addr
);
2044 r_size
= tcg_const_i32(size
);
2045 r_rd
= tcg_const_i32(rd
);
2046 gen_helper_ldf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2047 tcg_temp_free_i32(r_rd
);
2048 tcg_temp_free_i32(r_size
);
2049 tcg_temp_free_i32(r_asi
);
2052 static inline void gen_stf_asi(TCGv addr
, int insn
, int size
, int rd
)
2054 TCGv_i32 r_asi
, r_size
, r_rd
;
2056 r_asi
= gen_get_asi(insn
, addr
);
2057 r_size
= tcg_const_i32(size
);
2058 r_rd
= tcg_const_i32(rd
);
2059 gen_helper_stf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2060 tcg_temp_free_i32(r_rd
);
2061 tcg_temp_free_i32(r_size
);
2062 tcg_temp_free_i32(r_asi
);
2065 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2067 TCGv_i32 r_asi
, r_size
, r_sign
;
2068 TCGv_i64 t64
= tcg_temp_new_i64();
2070 r_asi
= gen_get_asi(insn
, addr
);
2071 r_size
= tcg_const_i32(4);
2072 r_sign
= tcg_const_i32(0);
2073 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2074 tcg_temp_free_i32(r_sign
);
2075 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2076 tcg_temp_free_i32(r_size
);
2077 tcg_temp_free_i32(r_asi
);
2078 tcg_gen_trunc_i64_tl(dst
, t64
);
2079 tcg_temp_free_i64(t64
);
2082 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2085 TCGv_i32 r_asi
, r_rd
;
2087 r_asi
= gen_get_asi(insn
, addr
);
2088 r_rd
= tcg_const_i32(rd
);
2089 gen_helper_ldda_asi(cpu_env
, addr
, r_asi
, r_rd
);
2090 tcg_temp_free_i32(r_rd
);
2091 tcg_temp_free_i32(r_asi
);
2094 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2097 TCGv_i32 r_asi
, r_size
;
2098 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2099 TCGv_i64 t64
= tcg_temp_new_i64();
2101 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2102 r_asi
= gen_get_asi(insn
, addr
);
2103 r_size
= tcg_const_i32(8);
2104 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2105 tcg_temp_free_i32(r_size
);
2106 tcg_temp_free_i32(r_asi
);
2107 tcg_temp_free_i64(t64
);
2110 static inline void gen_cas_asi(DisasContext
*dc
, TCGv addr
,
2111 TCGv val2
, int insn
, int rd
)
2113 TCGv val1
= gen_load_gpr(dc
, rd
);
2114 TCGv dst
= gen_dest_gpr(dc
, rd
);
2115 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2117 gen_helper_cas_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2118 tcg_temp_free_i32(r_asi
);
2119 gen_store_gpr(dc
, rd
, dst
);
2122 static inline void gen_casx_asi(DisasContext
*dc
, TCGv addr
,
2123 TCGv val2
, int insn
, int rd
)
2125 TCGv val1
= gen_load_gpr(dc
, rd
);
2126 TCGv dst
= gen_dest_gpr(dc
, rd
);
2127 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2129 gen_helper_casx_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2130 tcg_temp_free_i32(r_asi
);
2131 gen_store_gpr(dc
, rd
, dst
);
2134 #elif !defined(CONFIG_USER_ONLY)
2136 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2139 TCGv_i32 r_asi
, r_size
, r_sign
;
2140 TCGv_i64 t64
= tcg_temp_new_i64();
2142 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2143 r_size
= tcg_const_i32(size
);
2144 r_sign
= tcg_const_i32(sign
);
2145 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2146 tcg_temp_free_i32(r_sign
);
2147 tcg_temp_free_i32(r_size
);
2148 tcg_temp_free_i32(r_asi
);
2149 tcg_gen_trunc_i64_tl(dst
, t64
);
2150 tcg_temp_free_i64(t64
);
2153 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2155 TCGv_i32 r_asi
, r_size
;
2156 TCGv_i64 t64
= tcg_temp_new_i64();
2158 tcg_gen_extu_tl_i64(t64
, src
);
2159 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2160 r_size
= tcg_const_i32(size
);
2161 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2162 tcg_temp_free_i32(r_size
);
2163 tcg_temp_free_i32(r_asi
);
2164 tcg_temp_free_i64(t64
);
2167 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2169 TCGv_i32 r_asi
, r_size
, r_sign
;
2170 TCGv_i64 r_val
, t64
;
2172 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2173 r_size
= tcg_const_i32(4);
2174 r_sign
= tcg_const_i32(0);
2175 t64
= tcg_temp_new_i64();
2176 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2177 tcg_temp_free(r_sign
);
2178 r_val
= tcg_temp_new_i64();
2179 tcg_gen_extu_tl_i64(r_val
, src
);
2180 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2181 tcg_temp_free_i64(r_val
);
2182 tcg_temp_free_i32(r_size
);
2183 tcg_temp_free_i32(r_asi
);
2184 tcg_gen_trunc_i64_tl(dst
, t64
);
2185 tcg_temp_free_i64(t64
);
2188 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2191 TCGv_i32 r_asi
, r_size
, r_sign
;
2195 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2196 r_size
= tcg_const_i32(8);
2197 r_sign
= tcg_const_i32(0);
2198 t64
= tcg_temp_new_i64();
2199 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2200 tcg_temp_free_i32(r_sign
);
2201 tcg_temp_free_i32(r_size
);
2202 tcg_temp_free_i32(r_asi
);
2204 t
= gen_dest_gpr(dc
, rd
+ 1);
2205 tcg_gen_trunc_i64_tl(t
, t64
);
2206 gen_store_gpr(dc
, rd
+ 1, t
);
2208 tcg_gen_shri_i64(t64
, t64
, 32);
2209 tcg_gen_trunc_i64_tl(hi
, t64
);
2210 tcg_temp_free_i64(t64
);
2211 gen_store_gpr(dc
, rd
, hi
);
2214 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2217 TCGv_i32 r_asi
, r_size
;
2218 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2219 TCGv_i64 t64
= tcg_temp_new_i64();
2221 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2222 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2223 r_size
= tcg_const_i32(8);
2224 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2225 tcg_temp_free_i32(r_size
);
2226 tcg_temp_free_i32(r_asi
);
2227 tcg_temp_free_i64(t64
);
2231 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2232 static inline void gen_ldstub_asi(TCGv dst
, TCGv addr
, int insn
)
2235 TCGv_i32 r_asi
, r_size
;
2237 gen_ld_asi(dst
, addr
, insn
, 1, 0);
2239 r_val
= tcg_const_i64(0xffULL
);
2240 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2241 r_size
= tcg_const_i32(1);
2242 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2243 tcg_temp_free_i32(r_size
);
2244 tcg_temp_free_i32(r_asi
);
2245 tcg_temp_free_i64(r_val
);
2249 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2251 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2252 return gen_load_gpr(dc
, rs1
);
2255 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2257 if (IS_IMM
) { /* immediate */
2258 target_long simm
= GET_FIELDs(insn
, 19, 31);
2259 TCGv t
= get_temp_tl(dc
);
2260 tcg_gen_movi_tl(t
, simm
);
2262 } else { /* register */
2263 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2264 return gen_load_gpr(dc
, rs2
);
2268 #ifdef TARGET_SPARC64
2269 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2271 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2273 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2274 or fold the comparison down to 32 bits and use movcond_i32. Choose
2276 c32
= tcg_temp_new_i32();
2278 tcg_gen_trunc_i64_i32(c32
, cmp
->c1
);
2280 TCGv_i64 c64
= tcg_temp_new_i64();
2281 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2282 tcg_gen_trunc_i64_i32(c32
, c64
);
2283 tcg_temp_free_i64(c64
);
2286 s1
= gen_load_fpr_F(dc
, rs
);
2287 s2
= gen_load_fpr_F(dc
, rd
);
2288 dst
= gen_dest_fpr_F(dc
);
2289 zero
= tcg_const_i32(0);
2291 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2293 tcg_temp_free_i32(c32
);
2294 tcg_temp_free_i32(zero
);
2295 gen_store_fpr_F(dc
, rd
, dst
);
2298 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2300 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2301 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2302 gen_load_fpr_D(dc
, rs
),
2303 gen_load_fpr_D(dc
, rd
));
2304 gen_store_fpr_D(dc
, rd
, dst
);
2307 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2309 int qd
= QFPREG(rd
);
2310 int qs
= QFPREG(rs
);
2312 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2313 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2314 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2315 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2317 gen_update_fprs_dirty(qd
);
2320 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_ptr cpu_env
)
2322 TCGv_i32 r_tl
= tcg_temp_new_i32();
2324 /* load env->tl into r_tl */
2325 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2327 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2328 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2330 /* calculate offset to current trap state from env->ts, reuse r_tl */
2331 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2332 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2334 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2336 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2337 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2338 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2339 tcg_temp_free_ptr(r_tl_tmp
);
2342 tcg_temp_free_i32(r_tl
);
2345 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2346 int width
, bool cc
, bool left
)
2348 TCGv lo1
, lo2
, t1
, t2
;
2349 uint64_t amask
, tabl
, tabr
;
2350 int shift
, imask
, omask
;
2353 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2354 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2355 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2356 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2357 dc
->cc_op
= CC_OP_SUB
;
2360 /* Theory of operation: there are two tables, left and right (not to
2361 be confused with the left and right versions of the opcode). These
2362 are indexed by the low 3 bits of the inputs. To make things "easy",
2363 these tables are loaded into two constants, TABL and TABR below.
2364 The operation index = (input & imask) << shift calculates the index
2365 into the constant, while val = (table >> index) & omask calculates
2366 the value we're looking for. */
2373 tabl
= 0x80c0e0f0f8fcfeffULL
;
2374 tabr
= 0xff7f3f1f0f070301ULL
;
2376 tabl
= 0x0103070f1f3f7fffULL
;
2377 tabr
= 0xfffefcf8f0e0c080ULL
;
2397 tabl
= (2 << 2) | 3;
2398 tabr
= (3 << 2) | 1;
2400 tabl
= (1 << 2) | 3;
2401 tabr
= (3 << 2) | 2;
2408 lo1
= tcg_temp_new();
2409 lo2
= tcg_temp_new();
2410 tcg_gen_andi_tl(lo1
, s1
, imask
);
2411 tcg_gen_andi_tl(lo2
, s2
, imask
);
2412 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2413 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2415 t1
= tcg_const_tl(tabl
);
2416 t2
= tcg_const_tl(tabr
);
2417 tcg_gen_shr_tl(lo1
, t1
, lo1
);
2418 tcg_gen_shr_tl(lo2
, t2
, lo2
);
2419 tcg_gen_andi_tl(dst
, lo1
, omask
);
2420 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2424 amask
&= 0xffffffffULL
;
2426 tcg_gen_andi_tl(s1
, s1
, amask
);
2427 tcg_gen_andi_tl(s2
, s2
, amask
);
2429 /* We want to compute
2430 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2431 We've already done dst = lo1, so this reduces to
2432 dst &= (s1 == s2 ? -1 : lo2)
2437 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
2438 tcg_gen_neg_tl(t1
, t1
);
2439 tcg_gen_or_tl(lo2
, lo2
, t1
);
2440 tcg_gen_and_tl(dst
, dst
, lo2
);
2448 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2450 TCGv tmp
= tcg_temp_new();
2452 tcg_gen_add_tl(tmp
, s1
, s2
);
2453 tcg_gen_andi_tl(dst
, tmp
, -8);
2455 tcg_gen_neg_tl(tmp
, tmp
);
2457 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2462 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2466 t1
= tcg_temp_new();
2467 t2
= tcg_temp_new();
2468 shift
= tcg_temp_new();
2470 tcg_gen_andi_tl(shift
, gsr
, 7);
2471 tcg_gen_shli_tl(shift
, shift
, 3);
2472 tcg_gen_shl_tl(t1
, s1
, shift
);
2474 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2475 shift of (up to 63) followed by a constant shift of 1. */
2476 tcg_gen_xori_tl(shift
, shift
, 63);
2477 tcg_gen_shr_tl(t2
, s2
, shift
);
2478 tcg_gen_shri_tl(t2
, t2
, 1);
2480 tcg_gen_or_tl(dst
, t1
, t2
);
2484 tcg_temp_free(shift
);
2488 #define CHECK_IU_FEATURE(dc, FEATURE) \
2489 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2491 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2492 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2495 /* before an instruction, dc->pc must be static */
2496 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2498 unsigned int opc
, rs1
, rs2
, rd
;
2499 TCGv cpu_src1
, cpu_src2
;
2500 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2501 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2504 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2505 tcg_gen_debug_insn_start(dc
->pc
);
2508 opc
= GET_FIELD(insn
, 0, 1);
2509 rd
= GET_FIELD(insn
, 2, 6);
2512 case 0: /* branches/sethi */
2514 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2517 #ifdef TARGET_SPARC64
2518 case 0x1: /* V9 BPcc */
2522 target
= GET_FIELD_SP(insn
, 0, 18);
2523 target
= sign_extend(target
, 19);
2525 cc
= GET_FIELD_SP(insn
, 20, 21);
2527 do_branch(dc
, target
, insn
, 0);
2529 do_branch(dc
, target
, insn
, 1);
2534 case 0x3: /* V9 BPr */
2536 target
= GET_FIELD_SP(insn
, 0, 13) |
2537 (GET_FIELD_SP(insn
, 20, 21) << 14);
2538 target
= sign_extend(target
, 16);
2540 cpu_src1
= get_src1(dc
, insn
);
2541 do_branch_reg(dc
, target
, insn
, cpu_src1
);
2544 case 0x5: /* V9 FBPcc */
2546 int cc
= GET_FIELD_SP(insn
, 20, 21);
2547 if (gen_trap_ifnofpu(dc
)) {
2550 target
= GET_FIELD_SP(insn
, 0, 18);
2551 target
= sign_extend(target
, 19);
2553 do_fbranch(dc
, target
, insn
, cc
);
2557 case 0x7: /* CBN+x */
2562 case 0x2: /* BN+x */
2564 target
= GET_FIELD(insn
, 10, 31);
2565 target
= sign_extend(target
, 22);
2567 do_branch(dc
, target
, insn
, 0);
2570 case 0x6: /* FBN+x */
2572 if (gen_trap_ifnofpu(dc
)) {
2575 target
= GET_FIELD(insn
, 10, 31);
2576 target
= sign_extend(target
, 22);
2578 do_fbranch(dc
, target
, insn
, 0);
2581 case 0x4: /* SETHI */
2582 /* Special-case %g0 because that's the canonical nop. */
2584 uint32_t value
= GET_FIELD(insn
, 10, 31);
2585 TCGv t
= gen_dest_gpr(dc
, rd
);
2586 tcg_gen_movi_tl(t
, value
<< 10);
2587 gen_store_gpr(dc
, rd
, t
);
2590 case 0x0: /* UNIMPL */
2599 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
2600 TCGv o7
= gen_dest_gpr(dc
, 15);
2602 tcg_gen_movi_tl(o7
, dc
->pc
);
2603 gen_store_gpr(dc
, 15, o7
);
2606 #ifdef TARGET_SPARC64
2607 if (unlikely(AM_CHECK(dc
))) {
2608 target
&= 0xffffffffULL
;
2614 case 2: /* FPU & Logical Operations */
2616 unsigned int xop
= GET_FIELD(insn
, 7, 12);
2617 TCGv cpu_dst
= get_temp_tl(dc
);
2620 if (xop
== 0x3a) { /* generate trap */
2621 int cond
= GET_FIELD(insn
, 3, 6);
2633 /* Conditional trap. */
2635 #ifdef TARGET_SPARC64
2637 int cc
= GET_FIELD_SP(insn
, 11, 12);
2639 gen_compare(&cmp
, 0, cond
, dc
);
2640 } else if (cc
== 2) {
2641 gen_compare(&cmp
, 1, cond
, dc
);
2646 gen_compare(&cmp
, 0, cond
, dc
);
2648 l1
= gen_new_label();
2649 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
2650 cmp
.c1
, cmp
.c2
, l1
);
2654 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2655 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2657 /* Don't use the normal temporaries, as they may well have
2658 gone out of scope with the branch above. While we're
2659 doing that we might as well pre-truncate to 32-bit. */
2660 trap
= tcg_temp_new_i32();
2662 rs1
= GET_FIELD_SP(insn
, 14, 18);
2664 rs2
= GET_FIELD_SP(insn
, 0, 6);
2666 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
2667 /* Signal that the trap value is fully constant. */
2670 TCGv t1
= gen_load_gpr(dc
, rs1
);
2671 tcg_gen_trunc_tl_i32(trap
, t1
);
2672 tcg_gen_addi_i32(trap
, trap
, rs2
);
2676 rs2
= GET_FIELD_SP(insn
, 0, 4);
2677 t1
= gen_load_gpr(dc
, rs1
);
2678 t2
= gen_load_gpr(dc
, rs2
);
2679 tcg_gen_add_tl(t1
, t1
, t2
);
2680 tcg_gen_trunc_tl_i32(trap
, t1
);
2683 tcg_gen_andi_i32(trap
, trap
, mask
);
2684 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2687 gen_helper_raise_exception(cpu_env
, trap
);
2688 tcg_temp_free_i32(trap
);
2691 /* An unconditional trap ends the TB. */
2695 /* A conditional trap falls through to the next insn. */
2699 } else if (xop
== 0x28) {
2700 rs1
= GET_FIELD(insn
, 13, 17);
2703 #ifndef TARGET_SPARC64
2704 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2705 manual, rdy on the microSPARC
2707 case 0x0f: /* stbar in the SPARCv8 manual,
2708 rdy on the microSPARC II */
2709 case 0x10 ... 0x1f: /* implementation-dependent in the
2710 SPARCv8 manual, rdy on the
2713 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
2714 TCGv t
= gen_dest_gpr(dc
, rd
);
2715 /* Read Asr17 for a Leon3 monoprocessor */
2716 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
2717 gen_store_gpr(dc
, rd
, t
);
2721 gen_store_gpr(dc
, rd
, cpu_y
);
2723 #ifdef TARGET_SPARC64
2724 case 0x2: /* V9 rdccr */
2726 gen_helper_rdccr(cpu_dst
, cpu_env
);
2727 gen_store_gpr(dc
, rd
, cpu_dst
);
2729 case 0x3: /* V9 rdasi */
2730 tcg_gen_ext_i32_tl(cpu_dst
, cpu_asi
);
2731 gen_store_gpr(dc
, rd
, cpu_dst
);
2733 case 0x4: /* V9 rdtick */
2737 r_tickptr
= tcg_temp_new_ptr();
2738 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2739 offsetof(CPUSPARCState
, tick
));
2740 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2741 tcg_temp_free_ptr(r_tickptr
);
2742 gen_store_gpr(dc
, rd
, cpu_dst
);
2745 case 0x5: /* V9 rdpc */
2747 TCGv t
= gen_dest_gpr(dc
, rd
);
2748 if (unlikely(AM_CHECK(dc
))) {
2749 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
2751 tcg_gen_movi_tl(t
, dc
->pc
);
2753 gen_store_gpr(dc
, rd
, t
);
2756 case 0x6: /* V9 rdfprs */
2757 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
2758 gen_store_gpr(dc
, rd
, cpu_dst
);
2760 case 0xf: /* V9 membar */
2761 break; /* no effect */
2762 case 0x13: /* Graphics Status */
2763 if (gen_trap_ifnofpu(dc
)) {
2766 gen_store_gpr(dc
, rd
, cpu_gsr
);
2768 case 0x16: /* Softint */
2769 tcg_gen_ext_i32_tl(cpu_dst
, cpu_softint
);
2770 gen_store_gpr(dc
, rd
, cpu_dst
);
2772 case 0x17: /* Tick compare */
2773 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
2775 case 0x18: /* System tick */
2779 r_tickptr
= tcg_temp_new_ptr();
2780 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2781 offsetof(CPUSPARCState
, stick
));
2782 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2783 tcg_temp_free_ptr(r_tickptr
);
2784 gen_store_gpr(dc
, rd
, cpu_dst
);
2787 case 0x19: /* System tick compare */
2788 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
2790 case 0x10: /* Performance Control */
2791 case 0x11: /* Performance Instrumentation Counter */
2792 case 0x12: /* Dispatch Control */
2793 case 0x14: /* Softint set, WO */
2794 case 0x15: /* Softint clear, WO */
2799 #if !defined(CONFIG_USER_ONLY)
2800 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
2801 #ifndef TARGET_SPARC64
2802 if (!supervisor(dc
)) {
2806 gen_helper_rdpsr(cpu_dst
, cpu_env
);
2808 CHECK_IU_FEATURE(dc
, HYPV
);
2809 if (!hypervisor(dc
))
2811 rs1
= GET_FIELD(insn
, 13, 17);
2814 // gen_op_rdhpstate();
2817 // gen_op_rdhtstate();
2820 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
2823 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
2826 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
2828 case 31: // hstick_cmpr
2829 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
2835 gen_store_gpr(dc
, rd
, cpu_dst
);
2837 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
2838 if (!supervisor(dc
)) {
2841 cpu_tmp0
= get_temp_tl(dc
);
2842 #ifdef TARGET_SPARC64
2843 rs1
= GET_FIELD(insn
, 13, 17);
2849 r_tsptr
= tcg_temp_new_ptr();
2850 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2851 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2852 offsetof(trap_state
, tpc
));
2853 tcg_temp_free_ptr(r_tsptr
);
2860 r_tsptr
= tcg_temp_new_ptr();
2861 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2862 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2863 offsetof(trap_state
, tnpc
));
2864 tcg_temp_free_ptr(r_tsptr
);
2871 r_tsptr
= tcg_temp_new_ptr();
2872 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2873 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2874 offsetof(trap_state
, tstate
));
2875 tcg_temp_free_ptr(r_tsptr
);
2880 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2882 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2883 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
2884 offsetof(trap_state
, tt
));
2885 tcg_temp_free_ptr(r_tsptr
);
2892 r_tickptr
= tcg_temp_new_ptr();
2893 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2894 offsetof(CPUSPARCState
, tick
));
2895 gen_helper_tick_get_count(cpu_tmp0
, r_tickptr
);
2896 tcg_temp_free_ptr(r_tickptr
);
2900 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
2903 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2904 offsetof(CPUSPARCState
, pstate
));
2907 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2908 offsetof(CPUSPARCState
, tl
));
2911 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2912 offsetof(CPUSPARCState
, psrpil
));
2915 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
2918 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2919 offsetof(CPUSPARCState
, cansave
));
2921 case 11: // canrestore
2922 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2923 offsetof(CPUSPARCState
, canrestore
));
2925 case 12: // cleanwin
2926 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2927 offsetof(CPUSPARCState
, cleanwin
));
2929 case 13: // otherwin
2930 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2931 offsetof(CPUSPARCState
, otherwin
));
2934 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2935 offsetof(CPUSPARCState
, wstate
));
2937 case 16: // UA2005 gl
2938 CHECK_IU_FEATURE(dc
, GL
);
2939 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2940 offsetof(CPUSPARCState
, gl
));
2942 case 26: // UA2005 strand status
2943 CHECK_IU_FEATURE(dc
, HYPV
);
2944 if (!hypervisor(dc
))
2946 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
2949 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
2956 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
2958 gen_store_gpr(dc
, rd
, cpu_tmp0
);
2960 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
2961 #ifdef TARGET_SPARC64
2963 gen_helper_flushw(cpu_env
);
2965 if (!supervisor(dc
))
2967 gen_store_gpr(dc
, rd
, cpu_tbr
);
2971 } else if (xop
== 0x34) { /* FPU Operations */
2972 if (gen_trap_ifnofpu(dc
)) {
2975 gen_op_clear_ieee_excp_and_FTT();
2976 rs1
= GET_FIELD(insn
, 13, 17);
2977 rs2
= GET_FIELD(insn
, 27, 31);
2978 xop
= GET_FIELD(insn
, 18, 26);
2981 case 0x1: /* fmovs */
2982 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
2983 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
2985 case 0x5: /* fnegs */
2986 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
2988 case 0x9: /* fabss */
2989 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
2991 case 0x29: /* fsqrts */
2992 CHECK_FPU_FEATURE(dc
, FSQRT
);
2993 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
2995 case 0x2a: /* fsqrtd */
2996 CHECK_FPU_FEATURE(dc
, FSQRT
);
2997 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
2999 case 0x2b: /* fsqrtq */
3000 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3001 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
3003 case 0x41: /* fadds */
3004 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
3006 case 0x42: /* faddd */
3007 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
3009 case 0x43: /* faddq */
3010 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3011 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
3013 case 0x45: /* fsubs */
3014 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
3016 case 0x46: /* fsubd */
3017 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3019 case 0x47: /* fsubq */
3020 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3021 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3023 case 0x49: /* fmuls */
3024 CHECK_FPU_FEATURE(dc
, FMUL
);
3025 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3027 case 0x4a: /* fmuld */
3028 CHECK_FPU_FEATURE(dc
, FMUL
);
3029 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3031 case 0x4b: /* fmulq */
3032 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3033 CHECK_FPU_FEATURE(dc
, FMUL
);
3034 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3036 case 0x4d: /* fdivs */
3037 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3039 case 0x4e: /* fdivd */
3040 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3042 case 0x4f: /* fdivq */
3043 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3044 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3046 case 0x69: /* fsmuld */
3047 CHECK_FPU_FEATURE(dc
, FSMULD
);
3048 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3050 case 0x6e: /* fdmulq */
3051 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3052 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3054 case 0xc4: /* fitos */
3055 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3057 case 0xc6: /* fdtos */
3058 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3060 case 0xc7: /* fqtos */
3061 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3062 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3064 case 0xc8: /* fitod */
3065 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3067 case 0xc9: /* fstod */
3068 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3070 case 0xcb: /* fqtod */
3071 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3072 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3074 case 0xcc: /* fitoq */
3075 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3076 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3078 case 0xcd: /* fstoq */
3079 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3080 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3082 case 0xce: /* fdtoq */
3083 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3084 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3086 case 0xd1: /* fstoi */
3087 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3089 case 0xd2: /* fdtoi */
3090 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3092 case 0xd3: /* fqtoi */
3093 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3094 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3096 #ifdef TARGET_SPARC64
3097 case 0x2: /* V9 fmovd */
3098 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3099 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3101 case 0x3: /* V9 fmovq */
3102 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3103 gen_move_Q(rd
, rs2
);
3105 case 0x6: /* V9 fnegd */
3106 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3108 case 0x7: /* V9 fnegq */
3109 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3110 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3112 case 0xa: /* V9 fabsd */
3113 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3115 case 0xb: /* V9 fabsq */
3116 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3117 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3119 case 0x81: /* V9 fstox */
3120 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3122 case 0x82: /* V9 fdtox */
3123 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3125 case 0x83: /* V9 fqtox */
3126 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3127 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3129 case 0x84: /* V9 fxtos */
3130 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3132 case 0x88: /* V9 fxtod */
3133 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3135 case 0x8c: /* V9 fxtoq */
3136 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3137 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3143 } else if (xop
== 0x35) { /* FPU Operations */
3144 #ifdef TARGET_SPARC64
3147 if (gen_trap_ifnofpu(dc
)) {
3150 gen_op_clear_ieee_excp_and_FTT();
3151 rs1
= GET_FIELD(insn
, 13, 17);
3152 rs2
= GET_FIELD(insn
, 27, 31);
3153 xop
= GET_FIELD(insn
, 18, 26);
3156 #ifdef TARGET_SPARC64
3160 cond = GET_FIELD_SP(insn, 10, 12); \
3161 cpu_src1 = get_src1(dc, insn); \
3162 gen_compare_reg(&cmp, cond, cpu_src1); \
3163 gen_fmov##sz(dc, &cmp, rd, rs2); \
3164 free_compare(&cmp); \
3167 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3170 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3173 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3174 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3181 #ifdef TARGET_SPARC64
3182 #define FMOVCC(fcc, sz) \
3185 cond = GET_FIELD_SP(insn, 14, 17); \
3186 gen_fcompare(&cmp, fcc, cond); \
3187 gen_fmov##sz(dc, &cmp, rd, rs2); \
3188 free_compare(&cmp); \
3191 case 0x001: /* V9 fmovscc %fcc0 */
3194 case 0x002: /* V9 fmovdcc %fcc0 */
3197 case 0x003: /* V9 fmovqcc %fcc0 */
3198 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3201 case 0x041: /* V9 fmovscc %fcc1 */
3204 case 0x042: /* V9 fmovdcc %fcc1 */
3207 case 0x043: /* V9 fmovqcc %fcc1 */
3208 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3211 case 0x081: /* V9 fmovscc %fcc2 */
3214 case 0x082: /* V9 fmovdcc %fcc2 */
3217 case 0x083: /* V9 fmovqcc %fcc2 */
3218 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3221 case 0x0c1: /* V9 fmovscc %fcc3 */
3224 case 0x0c2: /* V9 fmovdcc %fcc3 */
3227 case 0x0c3: /* V9 fmovqcc %fcc3 */
3228 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3232 #define FMOVCC(xcc, sz) \
3235 cond = GET_FIELD_SP(insn, 14, 17); \
3236 gen_compare(&cmp, xcc, cond, dc); \
3237 gen_fmov##sz(dc, &cmp, rd, rs2); \
3238 free_compare(&cmp); \
3241 case 0x101: /* V9 fmovscc %icc */
3244 case 0x102: /* V9 fmovdcc %icc */
3247 case 0x103: /* V9 fmovqcc %icc */
3248 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3251 case 0x181: /* V9 fmovscc %xcc */
3254 case 0x182: /* V9 fmovdcc %xcc */
3257 case 0x183: /* V9 fmovqcc %xcc */
3258 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3263 case 0x51: /* fcmps, V9 %fcc */
3264 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3265 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3266 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3268 case 0x52: /* fcmpd, V9 %fcc */
3269 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3270 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3271 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3273 case 0x53: /* fcmpq, V9 %fcc */
3274 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3275 gen_op_load_fpr_QT0(QFPREG(rs1
));
3276 gen_op_load_fpr_QT1(QFPREG(rs2
));
3277 gen_op_fcmpq(rd
& 3);
3279 case 0x55: /* fcmpes, V9 %fcc */
3280 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3281 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3282 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3284 case 0x56: /* fcmped, V9 %fcc */
3285 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3286 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3287 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3289 case 0x57: /* fcmpeq, V9 %fcc */
3290 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3291 gen_op_load_fpr_QT0(QFPREG(rs1
));
3292 gen_op_load_fpr_QT1(QFPREG(rs2
));
3293 gen_op_fcmpeq(rd
& 3);
3298 } else if (xop
== 0x2) {
3299 TCGv dst
= gen_dest_gpr(dc
, rd
);
3300 rs1
= GET_FIELD(insn
, 13, 17);
3302 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3303 if (IS_IMM
) { /* immediate */
3304 simm
= GET_FIELDs(insn
, 19, 31);
3305 tcg_gen_movi_tl(dst
, simm
);
3306 gen_store_gpr(dc
, rd
, dst
);
3307 } else { /* register */
3308 rs2
= GET_FIELD(insn
, 27, 31);
3310 tcg_gen_movi_tl(dst
, 0);
3311 gen_store_gpr(dc
, rd
, dst
);
3313 cpu_src2
= gen_load_gpr(dc
, rs2
);
3314 gen_store_gpr(dc
, rd
, cpu_src2
);
3318 cpu_src1
= get_src1(dc
, insn
);
3319 if (IS_IMM
) { /* immediate */
3320 simm
= GET_FIELDs(insn
, 19, 31);
3321 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
3322 gen_store_gpr(dc
, rd
, dst
);
3323 } else { /* register */
3324 rs2
= GET_FIELD(insn
, 27, 31);
3326 /* mov shortcut: or x, %g0, y -> mov x, y */
3327 gen_store_gpr(dc
, rd
, cpu_src1
);
3329 cpu_src2
= gen_load_gpr(dc
, rs2
);
3330 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
3331 gen_store_gpr(dc
, rd
, dst
);
3335 #ifdef TARGET_SPARC64
3336 } else if (xop
== 0x25) { /* sll, V9 sllx */
3337 cpu_src1
= get_src1(dc
, insn
);
3338 if (IS_IMM
) { /* immediate */
3339 simm
= GET_FIELDs(insn
, 20, 31);
3340 if (insn
& (1 << 12)) {
3341 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3343 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3345 } else { /* register */
3346 rs2
= GET_FIELD(insn
, 27, 31);
3347 cpu_src2
= gen_load_gpr(dc
, rs2
);
3348 cpu_tmp0
= get_temp_tl(dc
);
3349 if (insn
& (1 << 12)) {
3350 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3352 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3354 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3356 gen_store_gpr(dc
, rd
, cpu_dst
);
3357 } else if (xop
== 0x26) { /* srl, V9 srlx */
3358 cpu_src1
= get_src1(dc
, insn
);
3359 if (IS_IMM
) { /* immediate */
3360 simm
= GET_FIELDs(insn
, 20, 31);
3361 if (insn
& (1 << 12)) {
3362 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3364 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3365 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3367 } else { /* register */
3368 rs2
= GET_FIELD(insn
, 27, 31);
3369 cpu_src2
= gen_load_gpr(dc
, rs2
);
3370 cpu_tmp0
= get_temp_tl(dc
);
3371 if (insn
& (1 << 12)) {
3372 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3373 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3375 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3376 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3377 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3380 gen_store_gpr(dc
, rd
, cpu_dst
);
3381 } else if (xop
== 0x27) { /* sra, V9 srax */
3382 cpu_src1
= get_src1(dc
, insn
);
3383 if (IS_IMM
) { /* immediate */
3384 simm
= GET_FIELDs(insn
, 20, 31);
3385 if (insn
& (1 << 12)) {
3386 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3388 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3389 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3391 } else { /* register */
3392 rs2
= GET_FIELD(insn
, 27, 31);
3393 cpu_src2
= gen_load_gpr(dc
, rs2
);
3394 cpu_tmp0
= get_temp_tl(dc
);
3395 if (insn
& (1 << 12)) {
3396 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3397 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3399 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3400 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3401 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3404 gen_store_gpr(dc
, rd
, cpu_dst
);
3406 } else if (xop
< 0x36) {
3408 cpu_src1
= get_src1(dc
, insn
);
3409 cpu_src2
= get_src2(dc
, insn
);
3410 switch (xop
& ~0x10) {
3413 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3414 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3415 dc
->cc_op
= CC_OP_ADD
;
3417 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3421 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3423 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3424 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3425 dc
->cc_op
= CC_OP_LOGIC
;
3429 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3431 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3432 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3433 dc
->cc_op
= CC_OP_LOGIC
;
3437 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3439 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3440 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3441 dc
->cc_op
= CC_OP_LOGIC
;
3446 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3447 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3448 dc
->cc_op
= CC_OP_SUB
;
3450 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3453 case 0x5: /* andn */
3454 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3456 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3457 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3458 dc
->cc_op
= CC_OP_LOGIC
;
3462 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3464 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3465 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3466 dc
->cc_op
= CC_OP_LOGIC
;
3469 case 0x7: /* xorn */
3470 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3472 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3473 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3474 dc
->cc_op
= CC_OP_LOGIC
;
3477 case 0x8: /* addx, V9 addc */
3478 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3481 #ifdef TARGET_SPARC64
3482 case 0x9: /* V9 mulx */
3483 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3486 case 0xa: /* umul */
3487 CHECK_IU_FEATURE(dc
, MUL
);
3488 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3490 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3491 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3492 dc
->cc_op
= CC_OP_LOGIC
;
3495 case 0xb: /* smul */
3496 CHECK_IU_FEATURE(dc
, MUL
);
3497 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
3499 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3500 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3501 dc
->cc_op
= CC_OP_LOGIC
;
3504 case 0xc: /* subx, V9 subc */
3505 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3508 #ifdef TARGET_SPARC64
3509 case 0xd: /* V9 udivx */
3510 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3513 case 0xe: /* udiv */
3514 CHECK_IU_FEATURE(dc
, DIV
);
3516 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3518 dc
->cc_op
= CC_OP_DIV
;
3520 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
3524 case 0xf: /* sdiv */
3525 CHECK_IU_FEATURE(dc
, DIV
);
3527 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3529 dc
->cc_op
= CC_OP_DIV
;
3531 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
3538 gen_store_gpr(dc
, rd
, cpu_dst
);
3540 cpu_src1
= get_src1(dc
, insn
);
3541 cpu_src2
= get_src2(dc
, insn
);
3543 case 0x20: /* taddcc */
3544 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3545 gen_store_gpr(dc
, rd
, cpu_dst
);
3546 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
3547 dc
->cc_op
= CC_OP_TADD
;
3549 case 0x21: /* tsubcc */
3550 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3551 gen_store_gpr(dc
, rd
, cpu_dst
);
3552 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
3553 dc
->cc_op
= CC_OP_TSUB
;
3555 case 0x22: /* taddcctv */
3556 gen_helper_taddcctv(cpu_dst
, cpu_env
,
3557 cpu_src1
, cpu_src2
);
3558 gen_store_gpr(dc
, rd
, cpu_dst
);
3559 dc
->cc_op
= CC_OP_TADDTV
;
3561 case 0x23: /* tsubcctv */
3562 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
3563 cpu_src1
, cpu_src2
);
3564 gen_store_gpr(dc
, rd
, cpu_dst
);
3565 dc
->cc_op
= CC_OP_TSUBTV
;
3567 case 0x24: /* mulscc */
3569 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
3570 gen_store_gpr(dc
, rd
, cpu_dst
);
3571 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3572 dc
->cc_op
= CC_OP_ADD
;
3574 #ifndef TARGET_SPARC64
3575 case 0x25: /* sll */
3576 if (IS_IMM
) { /* immediate */
3577 simm
= GET_FIELDs(insn
, 20, 31);
3578 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3579 } else { /* register */
3580 cpu_tmp0
= get_temp_tl(dc
);
3581 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3582 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3584 gen_store_gpr(dc
, rd
, cpu_dst
);
3586 case 0x26: /* srl */
3587 if (IS_IMM
) { /* immediate */
3588 simm
= GET_FIELDs(insn
, 20, 31);
3589 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3590 } else { /* register */
3591 cpu_tmp0
= get_temp_tl(dc
);
3592 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3593 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3595 gen_store_gpr(dc
, rd
, cpu_dst
);
3597 case 0x27: /* sra */
3598 if (IS_IMM
) { /* immediate */
3599 simm
= GET_FIELDs(insn
, 20, 31);
3600 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3601 } else { /* register */
3602 cpu_tmp0
= get_temp_tl(dc
);
3603 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3604 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3606 gen_store_gpr(dc
, rd
, cpu_dst
);
3611 cpu_tmp0
= get_temp_tl(dc
);
3614 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3615 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
3617 #ifndef TARGET_SPARC64
3618 case 0x01 ... 0x0f: /* undefined in the
3622 case 0x10 ... 0x1f: /* implementation-dependent
3626 if ((rd
== 0x13) && (dc
->def
->features
&
3627 CPU_FEATURE_POWERDOWN
)) {
3628 /* LEON3 power-down */
3630 gen_helper_power_down(cpu_env
);
3634 case 0x2: /* V9 wrccr */
3635 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3636 gen_helper_wrccr(cpu_env
, cpu_tmp0
);
3637 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3638 dc
->cc_op
= CC_OP_FLAGS
;
3640 case 0x3: /* V9 wrasi */
3641 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3642 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
3643 tcg_gen_trunc_tl_i32(cpu_asi
, cpu_tmp0
);
3645 case 0x6: /* V9 wrfprs */
3646 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3647 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
3653 case 0xf: /* V9 sir, nop if user */
3654 #if !defined(CONFIG_USER_ONLY)
3655 if (supervisor(dc
)) {
3660 case 0x13: /* Graphics Status */
3661 if (gen_trap_ifnofpu(dc
)) {
3664 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
3666 case 0x14: /* Softint set */
3667 if (!supervisor(dc
))
3669 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3670 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
3672 case 0x15: /* Softint clear */
3673 if (!supervisor(dc
))
3675 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3676 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
3678 case 0x16: /* Softint write */
3679 if (!supervisor(dc
))
3681 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3682 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
3684 case 0x17: /* Tick compare */
3685 #if !defined(CONFIG_USER_ONLY)
3686 if (!supervisor(dc
))
3692 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
3694 r_tickptr
= tcg_temp_new_ptr();
3695 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3696 offsetof(CPUSPARCState
, tick
));
3697 gen_helper_tick_set_limit(r_tickptr
,
3699 tcg_temp_free_ptr(r_tickptr
);
3702 case 0x18: /* System tick */
3703 #if !defined(CONFIG_USER_ONLY)
3704 if (!supervisor(dc
))
3710 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
3712 r_tickptr
= tcg_temp_new_ptr();
3713 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3714 offsetof(CPUSPARCState
, stick
));
3715 gen_helper_tick_set_count(r_tickptr
,
3717 tcg_temp_free_ptr(r_tickptr
);
3720 case 0x19: /* System tick compare */
3721 #if !defined(CONFIG_USER_ONLY)
3722 if (!supervisor(dc
))
3728 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
3730 r_tickptr
= tcg_temp_new_ptr();
3731 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3732 offsetof(CPUSPARCState
, stick
));
3733 gen_helper_tick_set_limit(r_tickptr
,
3735 tcg_temp_free_ptr(r_tickptr
);
3739 case 0x10: /* Performance Control */
3740 case 0x11: /* Performance Instrumentation
3742 case 0x12: /* Dispatch Control */
3749 #if !defined(CONFIG_USER_ONLY)
3750 case 0x31: /* wrpsr, V9 saved, restored */
3752 if (!supervisor(dc
))
3754 #ifdef TARGET_SPARC64
3757 gen_helper_saved(cpu_env
);
3760 gen_helper_restored(cpu_env
);
3762 case 2: /* UA2005 allclean */
3763 case 3: /* UA2005 otherw */
3764 case 4: /* UA2005 normalw */
3765 case 5: /* UA2005 invalw */
3771 cpu_tmp0
= get_temp_tl(dc
);
3772 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3773 gen_helper_wrpsr(cpu_env
, cpu_tmp0
);
3774 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3775 dc
->cc_op
= CC_OP_FLAGS
;
3783 case 0x32: /* wrwim, V9 wrpr */
3785 if (!supervisor(dc
))
3787 cpu_tmp0
= get_temp_tl(dc
);
3788 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3789 #ifdef TARGET_SPARC64
3795 r_tsptr
= tcg_temp_new_ptr();
3796 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3797 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3798 offsetof(trap_state
, tpc
));
3799 tcg_temp_free_ptr(r_tsptr
);
3806 r_tsptr
= tcg_temp_new_ptr();
3807 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3808 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3809 offsetof(trap_state
, tnpc
));
3810 tcg_temp_free_ptr(r_tsptr
);
3817 r_tsptr
= tcg_temp_new_ptr();
3818 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3819 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3820 offsetof(trap_state
,
3822 tcg_temp_free_ptr(r_tsptr
);
3829 r_tsptr
= tcg_temp_new_ptr();
3830 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3831 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
3832 offsetof(trap_state
, tt
));
3833 tcg_temp_free_ptr(r_tsptr
);
3840 r_tickptr
= tcg_temp_new_ptr();
3841 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3842 offsetof(CPUSPARCState
, tick
));
3843 gen_helper_tick_set_count(r_tickptr
,
3845 tcg_temp_free_ptr(r_tickptr
);
3849 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
3853 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
3854 dc
->npc
= DYNAMIC_PC
;
3858 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3859 offsetof(CPUSPARCState
, tl
));
3860 dc
->npc
= DYNAMIC_PC
;
3863 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
3866 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
3869 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3870 offsetof(CPUSPARCState
,
3873 case 11: // canrestore
3874 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3875 offsetof(CPUSPARCState
,
3878 case 12: // cleanwin
3879 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3880 offsetof(CPUSPARCState
,
3883 case 13: // otherwin
3884 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3885 offsetof(CPUSPARCState
,
3889 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3890 offsetof(CPUSPARCState
,
3893 case 16: // UA2005 gl
3894 CHECK_IU_FEATURE(dc
, GL
);
3895 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3896 offsetof(CPUSPARCState
, gl
));
3898 case 26: // UA2005 strand status
3899 CHECK_IU_FEATURE(dc
, HYPV
);
3900 if (!hypervisor(dc
))
3902 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
3908 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
3909 if (dc
->def
->nwindows
!= 32) {
3910 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
3911 (1 << dc
->def
->nwindows
) - 1);
3916 case 0x33: /* wrtbr, UA2005 wrhpr */
3918 #ifndef TARGET_SPARC64
3919 if (!supervisor(dc
))
3921 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
3923 CHECK_IU_FEATURE(dc
, HYPV
);
3924 if (!hypervisor(dc
))
3926 cpu_tmp0
= get_temp_tl(dc
);
3927 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3930 // XXX gen_op_wrhpstate();
3937 // XXX gen_op_wrhtstate();
3940 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
3943 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
3945 case 31: // hstick_cmpr
3949 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
3950 r_tickptr
= tcg_temp_new_ptr();
3951 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3952 offsetof(CPUSPARCState
, hstick
));
3953 gen_helper_tick_set_limit(r_tickptr
,
3955 tcg_temp_free_ptr(r_tickptr
);
3958 case 6: // hver readonly
3966 #ifdef TARGET_SPARC64
3967 case 0x2c: /* V9 movcc */
3969 int cc
= GET_FIELD_SP(insn
, 11, 12);
3970 int cond
= GET_FIELD_SP(insn
, 14, 17);
3974 if (insn
& (1 << 18)) {
3976 gen_compare(&cmp
, 0, cond
, dc
);
3977 } else if (cc
== 2) {
3978 gen_compare(&cmp
, 1, cond
, dc
);
3983 gen_fcompare(&cmp
, cc
, cond
);
3986 /* The get_src2 above loaded the normal 13-bit
3987 immediate field, not the 11-bit field we have
3988 in movcc. But it did handle the reg case. */
3990 simm
= GET_FIELD_SPs(insn
, 0, 10);
3991 tcg_gen_movi_tl(cpu_src2
, simm
);
3994 dst
= gen_load_gpr(dc
, rd
);
3995 tcg_gen_movcond_tl(cmp
.cond
, dst
,
3999 gen_store_gpr(dc
, rd
, dst
);
4002 case 0x2d: /* V9 sdivx */
4003 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
4004 gen_store_gpr(dc
, rd
, cpu_dst
);
4006 case 0x2e: /* V9 popc */
4007 gen_helper_popc(cpu_dst
, cpu_src2
);
4008 gen_store_gpr(dc
, rd
, cpu_dst
);
4010 case 0x2f: /* V9 movr */
4012 int cond
= GET_FIELD_SP(insn
, 10, 12);
4016 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4018 /* The get_src2 above loaded the normal 13-bit
4019 immediate field, not the 10-bit field we have
4020 in movr. But it did handle the reg case. */
4022 simm
= GET_FIELD_SPs(insn
, 0, 9);
4023 tcg_gen_movi_tl(cpu_src2
, simm
);
4026 dst
= gen_load_gpr(dc
, rd
);
4027 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4031 gen_store_gpr(dc
, rd
, dst
);
4039 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4040 #ifdef TARGET_SPARC64
4041 int opf
= GET_FIELD_SP(insn
, 5, 13);
4042 rs1
= GET_FIELD(insn
, 13, 17);
4043 rs2
= GET_FIELD(insn
, 27, 31);
4044 if (gen_trap_ifnofpu(dc
)) {
4049 case 0x000: /* VIS I edge8cc */
4050 CHECK_FPU_FEATURE(dc
, VIS1
);
4051 cpu_src1
= gen_load_gpr(dc
, rs1
);
4052 cpu_src2
= gen_load_gpr(dc
, rs2
);
4053 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4054 gen_store_gpr(dc
, rd
, cpu_dst
);
4056 case 0x001: /* VIS II edge8n */
4057 CHECK_FPU_FEATURE(dc
, VIS2
);
4058 cpu_src1
= gen_load_gpr(dc
, rs1
);
4059 cpu_src2
= gen_load_gpr(dc
, rs2
);
4060 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4061 gen_store_gpr(dc
, rd
, cpu_dst
);
4063 case 0x002: /* VIS I edge8lcc */
4064 CHECK_FPU_FEATURE(dc
, VIS1
);
4065 cpu_src1
= gen_load_gpr(dc
, rs1
);
4066 cpu_src2
= gen_load_gpr(dc
, rs2
);
4067 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4068 gen_store_gpr(dc
, rd
, cpu_dst
);
4070 case 0x003: /* VIS II edge8ln */
4071 CHECK_FPU_FEATURE(dc
, VIS2
);
4072 cpu_src1
= gen_load_gpr(dc
, rs1
);
4073 cpu_src2
= gen_load_gpr(dc
, rs2
);
4074 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4075 gen_store_gpr(dc
, rd
, cpu_dst
);
4077 case 0x004: /* VIS I edge16cc */
4078 CHECK_FPU_FEATURE(dc
, VIS1
);
4079 cpu_src1
= gen_load_gpr(dc
, rs1
);
4080 cpu_src2
= gen_load_gpr(dc
, rs2
);
4081 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4082 gen_store_gpr(dc
, rd
, cpu_dst
);
4084 case 0x005: /* VIS II edge16n */
4085 CHECK_FPU_FEATURE(dc
, VIS2
);
4086 cpu_src1
= gen_load_gpr(dc
, rs1
);
4087 cpu_src2
= gen_load_gpr(dc
, rs2
);
4088 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4089 gen_store_gpr(dc
, rd
, cpu_dst
);
4091 case 0x006: /* VIS I edge16lcc */
4092 CHECK_FPU_FEATURE(dc
, VIS1
);
4093 cpu_src1
= gen_load_gpr(dc
, rs1
);
4094 cpu_src2
= gen_load_gpr(dc
, rs2
);
4095 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4096 gen_store_gpr(dc
, rd
, cpu_dst
);
4098 case 0x007: /* VIS II edge16ln */
4099 CHECK_FPU_FEATURE(dc
, VIS2
);
4100 cpu_src1
= gen_load_gpr(dc
, rs1
);
4101 cpu_src2
= gen_load_gpr(dc
, rs2
);
4102 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4103 gen_store_gpr(dc
, rd
, cpu_dst
);
4105 case 0x008: /* VIS I edge32cc */
4106 CHECK_FPU_FEATURE(dc
, VIS1
);
4107 cpu_src1
= gen_load_gpr(dc
, rs1
);
4108 cpu_src2
= gen_load_gpr(dc
, rs2
);
4109 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4110 gen_store_gpr(dc
, rd
, cpu_dst
);
4112 case 0x009: /* VIS II edge32n */
4113 CHECK_FPU_FEATURE(dc
, VIS2
);
4114 cpu_src1
= gen_load_gpr(dc
, rs1
);
4115 cpu_src2
= gen_load_gpr(dc
, rs2
);
4116 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4117 gen_store_gpr(dc
, rd
, cpu_dst
);
4119 case 0x00a: /* VIS I edge32lcc */
4120 CHECK_FPU_FEATURE(dc
, VIS1
);
4121 cpu_src1
= gen_load_gpr(dc
, rs1
);
4122 cpu_src2
= gen_load_gpr(dc
, rs2
);
4123 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4124 gen_store_gpr(dc
, rd
, cpu_dst
);
4126 case 0x00b: /* VIS II edge32ln */
4127 CHECK_FPU_FEATURE(dc
, VIS2
);
4128 cpu_src1
= gen_load_gpr(dc
, rs1
);
4129 cpu_src2
= gen_load_gpr(dc
, rs2
);
4130 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4131 gen_store_gpr(dc
, rd
, cpu_dst
);
4133 case 0x010: /* VIS I array8 */
4134 CHECK_FPU_FEATURE(dc
, VIS1
);
4135 cpu_src1
= gen_load_gpr(dc
, rs1
);
4136 cpu_src2
= gen_load_gpr(dc
, rs2
);
4137 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4138 gen_store_gpr(dc
, rd
, cpu_dst
);
4140 case 0x012: /* VIS I array16 */
4141 CHECK_FPU_FEATURE(dc
, VIS1
);
4142 cpu_src1
= gen_load_gpr(dc
, rs1
);
4143 cpu_src2
= gen_load_gpr(dc
, rs2
);
4144 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4145 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4146 gen_store_gpr(dc
, rd
, cpu_dst
);
4148 case 0x014: /* VIS I array32 */
4149 CHECK_FPU_FEATURE(dc
, VIS1
);
4150 cpu_src1
= gen_load_gpr(dc
, rs1
);
4151 cpu_src2
= gen_load_gpr(dc
, rs2
);
4152 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4153 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4154 gen_store_gpr(dc
, rd
, cpu_dst
);
4156 case 0x018: /* VIS I alignaddr */
4157 CHECK_FPU_FEATURE(dc
, VIS1
);
4158 cpu_src1
= gen_load_gpr(dc
, rs1
);
4159 cpu_src2
= gen_load_gpr(dc
, rs2
);
4160 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4161 gen_store_gpr(dc
, rd
, cpu_dst
);
4163 case 0x01a: /* VIS I alignaddrl */
4164 CHECK_FPU_FEATURE(dc
, VIS1
);
4165 cpu_src1
= gen_load_gpr(dc
, rs1
);
4166 cpu_src2
= gen_load_gpr(dc
, rs2
);
4167 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4168 gen_store_gpr(dc
, rd
, cpu_dst
);
4170 case 0x019: /* VIS II bmask */
4171 CHECK_FPU_FEATURE(dc
, VIS2
);
4172 cpu_src1
= gen_load_gpr(dc
, rs1
);
4173 cpu_src2
= gen_load_gpr(dc
, rs2
);
4174 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4175 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4176 gen_store_gpr(dc
, rd
, cpu_dst
);
4178 case 0x020: /* VIS I fcmple16 */
4179 CHECK_FPU_FEATURE(dc
, VIS1
);
4180 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4181 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4182 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4183 gen_store_gpr(dc
, rd
, cpu_dst
);
4185 case 0x022: /* VIS I fcmpne16 */
4186 CHECK_FPU_FEATURE(dc
, VIS1
);
4187 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4188 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4189 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4190 gen_store_gpr(dc
, rd
, cpu_dst
);
4192 case 0x024: /* VIS I fcmple32 */
4193 CHECK_FPU_FEATURE(dc
, VIS1
);
4194 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4195 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4196 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4197 gen_store_gpr(dc
, rd
, cpu_dst
);
4199 case 0x026: /* VIS I fcmpne32 */
4200 CHECK_FPU_FEATURE(dc
, VIS1
);
4201 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4202 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4203 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4204 gen_store_gpr(dc
, rd
, cpu_dst
);
4206 case 0x028: /* VIS I fcmpgt16 */
4207 CHECK_FPU_FEATURE(dc
, VIS1
);
4208 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4209 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4210 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4211 gen_store_gpr(dc
, rd
, cpu_dst
);
4213 case 0x02a: /* VIS I fcmpeq16 */
4214 CHECK_FPU_FEATURE(dc
, VIS1
);
4215 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4216 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4217 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4218 gen_store_gpr(dc
, rd
, cpu_dst
);
4220 case 0x02c: /* VIS I fcmpgt32 */
4221 CHECK_FPU_FEATURE(dc
, VIS1
);
4222 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4223 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4224 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4225 gen_store_gpr(dc
, rd
, cpu_dst
);
4227 case 0x02e: /* VIS I fcmpeq32 */
4228 CHECK_FPU_FEATURE(dc
, VIS1
);
4229 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4230 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4231 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4232 gen_store_gpr(dc
, rd
, cpu_dst
);
4234 case 0x031: /* VIS I fmul8x16 */
4235 CHECK_FPU_FEATURE(dc
, VIS1
);
4236 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4238 case 0x033: /* VIS I fmul8x16au */
4239 CHECK_FPU_FEATURE(dc
, VIS1
);
4240 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4242 case 0x035: /* VIS I fmul8x16al */
4243 CHECK_FPU_FEATURE(dc
, VIS1
);
4244 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4246 case 0x036: /* VIS I fmul8sux16 */
4247 CHECK_FPU_FEATURE(dc
, VIS1
);
4248 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4250 case 0x037: /* VIS I fmul8ulx16 */
4251 CHECK_FPU_FEATURE(dc
, VIS1
);
4252 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4254 case 0x038: /* VIS I fmuld8sux16 */
4255 CHECK_FPU_FEATURE(dc
, VIS1
);
4256 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4258 case 0x039: /* VIS I fmuld8ulx16 */
4259 CHECK_FPU_FEATURE(dc
, VIS1
);
4260 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4262 case 0x03a: /* VIS I fpack32 */
4263 CHECK_FPU_FEATURE(dc
, VIS1
);
4264 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4266 case 0x03b: /* VIS I fpack16 */
4267 CHECK_FPU_FEATURE(dc
, VIS1
);
4268 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4269 cpu_dst_32
= gen_dest_fpr_F(dc
);
4270 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4271 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4273 case 0x03d: /* VIS I fpackfix */
4274 CHECK_FPU_FEATURE(dc
, VIS1
);
4275 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4276 cpu_dst_32
= gen_dest_fpr_F(dc
);
4277 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4278 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4280 case 0x03e: /* VIS I pdist */
4281 CHECK_FPU_FEATURE(dc
, VIS1
);
4282 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4284 case 0x048: /* VIS I faligndata */
4285 CHECK_FPU_FEATURE(dc
, VIS1
);
4286 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4288 case 0x04b: /* VIS I fpmerge */
4289 CHECK_FPU_FEATURE(dc
, VIS1
);
4290 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4292 case 0x04c: /* VIS II bshuffle */
4293 CHECK_FPU_FEATURE(dc
, VIS2
);
4294 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4296 case 0x04d: /* VIS I fexpand */
4297 CHECK_FPU_FEATURE(dc
, VIS1
);
4298 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4300 case 0x050: /* VIS I fpadd16 */
4301 CHECK_FPU_FEATURE(dc
, VIS1
);
4302 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4304 case 0x051: /* VIS I fpadd16s */
4305 CHECK_FPU_FEATURE(dc
, VIS1
);
4306 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4308 case 0x052: /* VIS I fpadd32 */
4309 CHECK_FPU_FEATURE(dc
, VIS1
);
4310 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4312 case 0x053: /* VIS I fpadd32s */
4313 CHECK_FPU_FEATURE(dc
, VIS1
);
4314 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4316 case 0x054: /* VIS I fpsub16 */
4317 CHECK_FPU_FEATURE(dc
, VIS1
);
4318 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4320 case 0x055: /* VIS I fpsub16s */
4321 CHECK_FPU_FEATURE(dc
, VIS1
);
4322 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4324 case 0x056: /* VIS I fpsub32 */
4325 CHECK_FPU_FEATURE(dc
, VIS1
);
4326 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4328 case 0x057: /* VIS I fpsub32s */
4329 CHECK_FPU_FEATURE(dc
, VIS1
);
4330 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4332 case 0x060: /* VIS I fzero */
4333 CHECK_FPU_FEATURE(dc
, VIS1
);
4334 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4335 tcg_gen_movi_i64(cpu_dst_64
, 0);
4336 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4338 case 0x061: /* VIS I fzeros */
4339 CHECK_FPU_FEATURE(dc
, VIS1
);
4340 cpu_dst_32
= gen_dest_fpr_F(dc
);
4341 tcg_gen_movi_i32(cpu_dst_32
, 0);
4342 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4344 case 0x062: /* VIS I fnor */
4345 CHECK_FPU_FEATURE(dc
, VIS1
);
4346 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4348 case 0x063: /* VIS I fnors */
4349 CHECK_FPU_FEATURE(dc
, VIS1
);
4350 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4352 case 0x064: /* VIS I fandnot2 */
4353 CHECK_FPU_FEATURE(dc
, VIS1
);
4354 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4356 case 0x065: /* VIS I fandnot2s */
4357 CHECK_FPU_FEATURE(dc
, VIS1
);
4358 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4360 case 0x066: /* VIS I fnot2 */
4361 CHECK_FPU_FEATURE(dc
, VIS1
);
4362 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4364 case 0x067: /* VIS I fnot2s */
4365 CHECK_FPU_FEATURE(dc
, VIS1
);
4366 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4368 case 0x068: /* VIS I fandnot1 */
4369 CHECK_FPU_FEATURE(dc
, VIS1
);
4370 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4372 case 0x069: /* VIS I fandnot1s */
4373 CHECK_FPU_FEATURE(dc
, VIS1
);
4374 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4376 case 0x06a: /* VIS I fnot1 */
4377 CHECK_FPU_FEATURE(dc
, VIS1
);
4378 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4380 case 0x06b: /* VIS I fnot1s */
4381 CHECK_FPU_FEATURE(dc
, VIS1
);
4382 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4384 case 0x06c: /* VIS I fxor */
4385 CHECK_FPU_FEATURE(dc
, VIS1
);
4386 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4388 case 0x06d: /* VIS I fxors */
4389 CHECK_FPU_FEATURE(dc
, VIS1
);
4390 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4392 case 0x06e: /* VIS I fnand */
4393 CHECK_FPU_FEATURE(dc
, VIS1
);
4394 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4396 case 0x06f: /* VIS I fnands */
4397 CHECK_FPU_FEATURE(dc
, VIS1
);
4398 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4400 case 0x070: /* VIS I fand */
4401 CHECK_FPU_FEATURE(dc
, VIS1
);
4402 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4404 case 0x071: /* VIS I fands */
4405 CHECK_FPU_FEATURE(dc
, VIS1
);
4406 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4408 case 0x072: /* VIS I fxnor */
4409 CHECK_FPU_FEATURE(dc
, VIS1
);
4410 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4412 case 0x073: /* VIS I fxnors */
4413 CHECK_FPU_FEATURE(dc
, VIS1
);
4414 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4416 case 0x074: /* VIS I fsrc1 */
4417 CHECK_FPU_FEATURE(dc
, VIS1
);
4418 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4419 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4421 case 0x075: /* VIS I fsrc1s */
4422 CHECK_FPU_FEATURE(dc
, VIS1
);
4423 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4424 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4426 case 0x076: /* VIS I fornot2 */
4427 CHECK_FPU_FEATURE(dc
, VIS1
);
4428 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4430 case 0x077: /* VIS I fornot2s */
4431 CHECK_FPU_FEATURE(dc
, VIS1
);
4432 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4434 case 0x078: /* VIS I fsrc2 */
4435 CHECK_FPU_FEATURE(dc
, VIS1
);
4436 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4437 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4439 case 0x079: /* VIS I fsrc2s */
4440 CHECK_FPU_FEATURE(dc
, VIS1
);
4441 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4442 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4444 case 0x07a: /* VIS I fornot1 */
4445 CHECK_FPU_FEATURE(dc
, VIS1
);
4446 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4448 case 0x07b: /* VIS I fornot1s */
4449 CHECK_FPU_FEATURE(dc
, VIS1
);
4450 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4452 case 0x07c: /* VIS I for */
4453 CHECK_FPU_FEATURE(dc
, VIS1
);
4454 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4456 case 0x07d: /* VIS I fors */
4457 CHECK_FPU_FEATURE(dc
, VIS1
);
4458 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4460 case 0x07e: /* VIS I fone */
4461 CHECK_FPU_FEATURE(dc
, VIS1
);
4462 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4463 tcg_gen_movi_i64(cpu_dst_64
, -1);
4464 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4466 case 0x07f: /* VIS I fones */
4467 CHECK_FPU_FEATURE(dc
, VIS1
);
4468 cpu_dst_32
= gen_dest_fpr_F(dc
);
4469 tcg_gen_movi_i32(cpu_dst_32
, -1);
4470 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4472 case 0x080: /* VIS I shutdown */
4473 case 0x081: /* VIS II siam */
4482 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
4483 #ifdef TARGET_SPARC64
4488 #ifdef TARGET_SPARC64
4489 } else if (xop
== 0x39) { /* V9 return */
4493 cpu_src1
= get_src1(dc
, insn
);
4494 cpu_tmp0
= get_temp_tl(dc
);
4495 if (IS_IMM
) { /* immediate */
4496 simm
= GET_FIELDs(insn
, 19, 31);
4497 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4498 } else { /* register */
4499 rs2
= GET_FIELD(insn
, 27, 31);
4501 cpu_src2
= gen_load_gpr(dc
, rs2
);
4502 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4504 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4507 gen_helper_restore(cpu_env
);
4509 r_const
= tcg_const_i32(3);
4510 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4511 tcg_temp_free_i32(r_const
);
4512 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4513 dc
->npc
= DYNAMIC_PC
;
4517 cpu_src1
= get_src1(dc
, insn
);
4518 cpu_tmp0
= get_temp_tl(dc
);
4519 if (IS_IMM
) { /* immediate */
4520 simm
= GET_FIELDs(insn
, 19, 31);
4521 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4522 } else { /* register */
4523 rs2
= GET_FIELD(insn
, 27, 31);
4525 cpu_src2
= gen_load_gpr(dc
, rs2
);
4526 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4528 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4532 case 0x38: /* jmpl */
4537 t
= gen_dest_gpr(dc
, rd
);
4538 tcg_gen_movi_tl(t
, dc
->pc
);
4539 gen_store_gpr(dc
, rd
, t
);
4541 r_const
= tcg_const_i32(3);
4542 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4543 tcg_temp_free_i32(r_const
);
4544 gen_address_mask(dc
, cpu_tmp0
);
4545 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4546 dc
->npc
= DYNAMIC_PC
;
4549 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4550 case 0x39: /* rett, V9 return */
4554 if (!supervisor(dc
))
4557 r_const
= tcg_const_i32(3);
4558 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4559 tcg_temp_free_i32(r_const
);
4560 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4561 dc
->npc
= DYNAMIC_PC
;
4562 gen_helper_rett(cpu_env
);
4566 case 0x3b: /* flush */
4567 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
4571 case 0x3c: /* save */
4573 gen_helper_save(cpu_env
);
4574 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4576 case 0x3d: /* restore */
4578 gen_helper_restore(cpu_env
);
4579 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4581 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4582 case 0x3e: /* V9 done/retry */
4586 if (!supervisor(dc
))
4588 dc
->npc
= DYNAMIC_PC
;
4589 dc
->pc
= DYNAMIC_PC
;
4590 gen_helper_done(cpu_env
);
4593 if (!supervisor(dc
))
4595 dc
->npc
= DYNAMIC_PC
;
4596 dc
->pc
= DYNAMIC_PC
;
4597 gen_helper_retry(cpu_env
);
4612 case 3: /* load/store instructions */
4614 unsigned int xop
= GET_FIELD(insn
, 7, 12);
4615 /* ??? gen_address_mask prevents us from using a source
4616 register directly. Always generate a temporary. */
4617 TCGv cpu_addr
= get_temp_tl(dc
);
4619 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
4620 if (xop
== 0x3c || xop
== 0x3e) {
4621 /* V9 casa/casxa : no offset */
4622 } else if (IS_IMM
) { /* immediate */
4623 simm
= GET_FIELDs(insn
, 19, 31);
4625 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
4627 } else { /* register */
4628 rs2
= GET_FIELD(insn
, 27, 31);
4630 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
4633 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
4634 (xop
> 0x17 && xop
<= 0x1d ) ||
4635 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
4636 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
4639 case 0x0: /* ld, V9 lduw, load unsigned word */
4640 gen_address_mask(dc
, cpu_addr
);
4641 tcg_gen_qemu_ld32u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4643 case 0x1: /* ldub, load unsigned byte */
4644 gen_address_mask(dc
, cpu_addr
);
4645 tcg_gen_qemu_ld8u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4647 case 0x2: /* lduh, load unsigned halfword */
4648 gen_address_mask(dc
, cpu_addr
);
4649 tcg_gen_qemu_ld16u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4651 case 0x3: /* ldd, load double word */
4659 r_const
= tcg_const_i32(7);
4660 /* XXX remove alignment check */
4661 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4662 tcg_temp_free_i32(r_const
);
4663 gen_address_mask(dc
, cpu_addr
);
4664 t64
= tcg_temp_new_i64();
4665 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4666 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4667 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4668 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
4669 tcg_gen_shri_i64(t64
, t64
, 32);
4670 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4671 tcg_temp_free_i64(t64
);
4672 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4675 case 0x9: /* ldsb, load signed byte */
4676 gen_address_mask(dc
, cpu_addr
);
4677 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4679 case 0xa: /* ldsh, load signed halfword */
4680 gen_address_mask(dc
, cpu_addr
);
4681 tcg_gen_qemu_ld16s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4683 case 0xd: /* ldstub -- XXX: should be atomically */
4687 gen_address_mask(dc
, cpu_addr
);
4688 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4689 r_const
= tcg_const_tl(0xff);
4690 tcg_gen_qemu_st8(r_const
, cpu_addr
, dc
->mem_idx
);
4691 tcg_temp_free(r_const
);
4695 /* swap, swap register with memory. Also atomically */
4697 TCGv t0
= get_temp_tl(dc
);
4698 CHECK_IU_FEATURE(dc
, SWAP
);
4699 cpu_src1
= gen_load_gpr(dc
, rd
);
4700 gen_address_mask(dc
, cpu_addr
);
4701 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4702 tcg_gen_qemu_st32(cpu_src1
, cpu_addr
, dc
->mem_idx
);
4703 tcg_gen_mov_tl(cpu_val
, t0
);
4706 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4707 case 0x10: /* lda, V9 lduwa, load word alternate */
4708 #ifndef TARGET_SPARC64
4711 if (!supervisor(dc
))
4715 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 0);
4717 case 0x11: /* lduba, load unsigned byte alternate */
4718 #ifndef TARGET_SPARC64
4721 if (!supervisor(dc
))
4725 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 0);
4727 case 0x12: /* lduha, load unsigned halfword alternate */
4728 #ifndef TARGET_SPARC64
4731 if (!supervisor(dc
))
4735 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 0);
4737 case 0x13: /* ldda, load double word alternate */
4738 #ifndef TARGET_SPARC64
4741 if (!supervisor(dc
))
4747 gen_ldda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4749 case 0x19: /* ldsba, load signed byte alternate */
4750 #ifndef TARGET_SPARC64
4753 if (!supervisor(dc
))
4757 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 1);
4759 case 0x1a: /* ldsha, load signed halfword alternate */
4760 #ifndef TARGET_SPARC64
4763 if (!supervisor(dc
))
4767 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 1);
4769 case 0x1d: /* ldstuba -- XXX: should be atomically */
4770 #ifndef TARGET_SPARC64
4773 if (!supervisor(dc
))
4777 gen_ldstub_asi(cpu_val
, cpu_addr
, insn
);
4779 case 0x1f: /* swapa, swap reg with alt. memory. Also
4781 CHECK_IU_FEATURE(dc
, SWAP
);
4782 #ifndef TARGET_SPARC64
4785 if (!supervisor(dc
))
4789 cpu_src1
= gen_load_gpr(dc
, rd
);
4790 gen_swap_asi(cpu_val
, cpu_src1
, cpu_addr
, insn
);
4793 #ifndef TARGET_SPARC64
4794 case 0x30: /* ldc */
4795 case 0x31: /* ldcsr */
4796 case 0x33: /* lddc */
4800 #ifdef TARGET_SPARC64
4801 case 0x08: /* V9 ldsw */
4802 gen_address_mask(dc
, cpu_addr
);
4803 tcg_gen_qemu_ld32s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4805 case 0x0b: /* V9 ldx */
4806 gen_address_mask(dc
, cpu_addr
);
4807 tcg_gen_qemu_ld64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4809 case 0x18: /* V9 ldswa */
4811 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 1);
4813 case 0x1b: /* V9 ldxa */
4815 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 8, 0);
4817 case 0x2d: /* V9 prefetch, no effect */
4819 case 0x30: /* V9 ldfa */
4820 if (gen_trap_ifnofpu(dc
)) {
4824 gen_ldf_asi(cpu_addr
, insn
, 4, rd
);
4825 gen_update_fprs_dirty(rd
);
4827 case 0x33: /* V9 lddfa */
4828 if (gen_trap_ifnofpu(dc
)) {
4832 gen_ldf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
4833 gen_update_fprs_dirty(DFPREG(rd
));
4835 case 0x3d: /* V9 prefetcha, no effect */
4837 case 0x32: /* V9 ldqfa */
4838 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4839 if (gen_trap_ifnofpu(dc
)) {
4843 gen_ldf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
4844 gen_update_fprs_dirty(QFPREG(rd
));
4850 gen_store_gpr(dc
, rd
, cpu_val
);
4851 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4854 } else if (xop
>= 0x20 && xop
< 0x24) {
4857 if (gen_trap_ifnofpu(dc
)) {
4862 case 0x20: /* ldf, load fpreg */
4863 gen_address_mask(dc
, cpu_addr
);
4864 t0
= get_temp_tl(dc
);
4865 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4866 cpu_dst_32
= gen_dest_fpr_F(dc
);
4867 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4868 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4870 case 0x21: /* ldfsr, V9 ldxfsr */
4871 #ifdef TARGET_SPARC64
4872 gen_address_mask(dc
, cpu_addr
);
4874 TCGv_i64 t64
= tcg_temp_new_i64();
4875 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4876 gen_helper_ldxfsr(cpu_env
, t64
);
4877 tcg_temp_free_i64(t64
);
4881 cpu_dst_32
= get_temp_i32(dc
);
4882 t0
= get_temp_tl(dc
);
4883 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4884 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4885 gen_helper_ldfsr(cpu_env
, cpu_dst_32
);
4887 case 0x22: /* ldqf, load quad fpreg */
4891 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4892 r_const
= tcg_const_i32(dc
->mem_idx
);
4893 gen_address_mask(dc
, cpu_addr
);
4894 gen_helper_ldqf(cpu_env
, cpu_addr
, r_const
);
4895 tcg_temp_free_i32(r_const
);
4896 gen_op_store_QT0_fpr(QFPREG(rd
));
4897 gen_update_fprs_dirty(QFPREG(rd
));
4900 case 0x23: /* lddf, load double fpreg */
4901 gen_address_mask(dc
, cpu_addr
);
4902 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4903 tcg_gen_qemu_ld64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
);
4904 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4909 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
4910 xop
== 0xe || xop
== 0x1e) {
4911 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
4914 case 0x4: /* st, store word */
4915 gen_address_mask(dc
, cpu_addr
);
4916 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
4918 case 0x5: /* stb, store byte */
4919 gen_address_mask(dc
, cpu_addr
);
4920 tcg_gen_qemu_st8(cpu_val
, cpu_addr
, dc
->mem_idx
);
4922 case 0x6: /* sth, store halfword */
4923 gen_address_mask(dc
, cpu_addr
);
4924 tcg_gen_qemu_st16(cpu_val
, cpu_addr
, dc
->mem_idx
);
4926 case 0x7: /* std, store double word */
4935 gen_address_mask(dc
, cpu_addr
);
4936 r_const
= tcg_const_i32(7);
4937 /* XXX remove alignment check */
4938 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4939 tcg_temp_free_i32(r_const
);
4940 lo
= gen_load_gpr(dc
, rd
+ 1);
4942 t64
= tcg_temp_new_i64();
4943 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
4944 tcg_gen_qemu_st64(t64
, cpu_addr
, dc
->mem_idx
);
4945 tcg_temp_free_i64(t64
);
4948 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4949 case 0x14: /* sta, V9 stwa, store word alternate */
4950 #ifndef TARGET_SPARC64
4953 if (!supervisor(dc
))
4957 gen_st_asi(cpu_val
, cpu_addr
, insn
, 4);
4958 dc
->npc
= DYNAMIC_PC
;
4960 case 0x15: /* stba, store byte alternate */
4961 #ifndef TARGET_SPARC64
4964 if (!supervisor(dc
))
4968 gen_st_asi(cpu_val
, cpu_addr
, insn
, 1);
4969 dc
->npc
= DYNAMIC_PC
;
4971 case 0x16: /* stha, store halfword alternate */
4972 #ifndef TARGET_SPARC64
4975 if (!supervisor(dc
))
4979 gen_st_asi(cpu_val
, cpu_addr
, insn
, 2);
4980 dc
->npc
= DYNAMIC_PC
;
4982 case 0x17: /* stda, store double word alternate */
4983 #ifndef TARGET_SPARC64
4986 if (!supervisor(dc
))
4993 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4997 #ifdef TARGET_SPARC64
4998 case 0x0e: /* V9 stx */
4999 gen_address_mask(dc
, cpu_addr
);
5000 tcg_gen_qemu_st64(cpu_val
, cpu_addr
, dc
->mem_idx
);
5002 case 0x1e: /* V9 stxa */
5004 gen_st_asi(cpu_val
, cpu_addr
, insn
, 8);
5005 dc
->npc
= DYNAMIC_PC
;
5011 } else if (xop
> 0x23 && xop
< 0x28) {
5012 if (gen_trap_ifnofpu(dc
)) {
5017 case 0x24: /* stf, store fpreg */
5019 TCGv t
= get_temp_tl(dc
);
5020 gen_address_mask(dc
, cpu_addr
);
5021 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5022 tcg_gen_ext_i32_tl(t
, cpu_src1_32
);
5023 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5026 case 0x25: /* stfsr, V9 stxfsr */
5028 TCGv t
= get_temp_tl(dc
);
5030 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUSPARCState
, fsr
));
5031 #ifdef TARGET_SPARC64
5032 gen_address_mask(dc
, cpu_addr
);
5034 tcg_gen_qemu_st64(t
, cpu_addr
, dc
->mem_idx
);
5038 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5042 #ifdef TARGET_SPARC64
5043 /* V9 stqf, store quad fpreg */
5047 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5048 gen_op_load_fpr_QT0(QFPREG(rd
));
5049 r_const
= tcg_const_i32(dc
->mem_idx
);
5050 gen_address_mask(dc
, cpu_addr
);
5051 gen_helper_stqf(cpu_env
, cpu_addr
, r_const
);
5052 tcg_temp_free_i32(r_const
);
5055 #else /* !TARGET_SPARC64 */
5056 /* stdfq, store floating point queue */
5057 #if defined(CONFIG_USER_ONLY)
5060 if (!supervisor(dc
))
5062 if (gen_trap_ifnofpu(dc
)) {
5068 case 0x27: /* stdf, store double fpreg */
5069 gen_address_mask(dc
, cpu_addr
);
5070 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5071 tcg_gen_qemu_st64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
);
5076 } else if (xop
> 0x33 && xop
< 0x3f) {
5079 #ifdef TARGET_SPARC64
5080 case 0x34: /* V9 stfa */
5081 if (gen_trap_ifnofpu(dc
)) {
5084 gen_stf_asi(cpu_addr
, insn
, 4, rd
);
5086 case 0x36: /* V9 stqfa */
5090 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5091 if (gen_trap_ifnofpu(dc
)) {
5094 r_const
= tcg_const_i32(7);
5095 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
5096 tcg_temp_free_i32(r_const
);
5097 gen_stf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
5100 case 0x37: /* V9 stdfa */
5101 if (gen_trap_ifnofpu(dc
)) {
5104 gen_stf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
5106 case 0x3c: /* V9 casa */
5107 rs2
= GET_FIELD(insn
, 27, 31);
5108 cpu_src2
= gen_load_gpr(dc
, rs2
);
5109 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5111 case 0x3e: /* V9 casxa */
5112 rs2
= GET_FIELD(insn
, 27, 31);
5113 cpu_src2
= gen_load_gpr(dc
, rs2
);
5114 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5117 case 0x34: /* stc */
5118 case 0x35: /* stcsr */
5119 case 0x36: /* stdcq */
5120 case 0x37: /* stdc */
5132 /* default case for non jump instructions */
5133 if (dc
->npc
== DYNAMIC_PC
) {
5134 dc
->pc
= DYNAMIC_PC
;
5136 } else if (dc
->npc
== JUMP_PC
) {
5137 /* we can do a static jump */
5138 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5142 dc
->npc
= dc
->npc
+ 4;
5151 r_const
= tcg_const_i32(TT_ILL_INSN
);
5152 gen_helper_raise_exception(cpu_env
, r_const
);
5153 tcg_temp_free_i32(r_const
);
5162 r_const
= tcg_const_i32(TT_UNIMP_FLUSH
);
5163 gen_helper_raise_exception(cpu_env
, r_const
);
5164 tcg_temp_free_i32(r_const
);
5168 #if !defined(CONFIG_USER_ONLY)
5174 r_const
= tcg_const_i32(TT_PRIV_INSN
);
5175 gen_helper_raise_exception(cpu_env
, r_const
);
5176 tcg_temp_free_i32(r_const
);
5183 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP
);
5186 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5189 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR
);
5193 #ifndef TARGET_SPARC64
5199 r_const
= tcg_const_i32(TT_NCP_INSN
);
5200 gen_helper_raise_exception(cpu_env
, r_const
);
5201 tcg_temp_free(r_const
);
5207 if (dc
->n_t32
!= 0) {
5209 for (i
= dc
->n_t32
- 1; i
>= 0; --i
) {
5210 tcg_temp_free_i32(dc
->t32
[i
]);
5214 if (dc
->n_ttl
!= 0) {
5216 for (i
= dc
->n_ttl
- 1; i
>= 0; --i
) {
5217 tcg_temp_free(dc
->ttl
[i
]);
5223 static inline void gen_intermediate_code_internal(SPARCCPU
*cpu
,
5224 TranslationBlock
*tb
,
5227 CPUState
*cs
= CPU(cpu
);
5228 CPUSPARCState
*env
= &cpu
->env
;
5229 target_ulong pc_start
, last_pc
;
5230 uint16_t *gen_opc_end
;
5231 DisasContext dc1
, *dc
= &dc1
;
5238 memset(dc
, 0, sizeof(DisasContext
));
5243 dc
->npc
= (target_ulong
) tb
->cs_base
;
5244 dc
->cc_op
= CC_OP_DYNAMIC
;
5245 dc
->mem_idx
= cpu_mmu_index(env
);
5247 dc
->fpu_enabled
= tb_fpu_enabled(tb
->flags
);
5248 dc
->address_mask_32bit
= tb_am_enabled(tb
->flags
);
5249 dc
->singlestep
= (cs
->singlestep_enabled
|| singlestep
);
5250 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
5253 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5255 max_insns
= CF_COUNT_MASK
;
5258 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
5259 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
5260 if (bp
->pc
== dc
->pc
) {
5261 if (dc
->pc
!= pc_start
)
5263 gen_helper_debug(cpu_env
);
5271 qemu_log("Search PC...\n");
5272 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
5276 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5277 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
5278 gen_opc_npc
[lj
] = dc
->npc
;
5279 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
5280 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
5283 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
5286 insn
= cpu_ldl_code(env
, dc
->pc
);
5288 disas_sparc_insn(dc
, insn
);
5293 /* if the next PC is different, we abort now */
5294 if (dc
->pc
!= (last_pc
+ 4))
5296 /* if we reach a page boundary, we stop generation so that the
5297 PC of a TT_TFAULT exception is always in the right page */
5298 if ((dc
->pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
5300 /* if single step mode, we generate only one instruction and
5301 generate an exception */
5302 if (dc
->singlestep
) {
5305 } while ((tcg_ctx
.gen_opc_ptr
< gen_opc_end
) &&
5306 (dc
->pc
- pc_start
) < (TARGET_PAGE_SIZE
- 32) &&
5307 num_insns
< max_insns
);
5310 if (tb
->cflags
& CF_LAST_IO
) {
5314 if (dc
->pc
!= DYNAMIC_PC
&&
5315 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
5316 /* static PC and NPC: we can use direct chaining */
5317 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5319 if (dc
->pc
!= DYNAMIC_PC
) {
5320 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5326 gen_tb_end(tb
, num_insns
);
5327 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
5329 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
5332 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5336 gen_opc_jump_pc
[0] = dc
->jump_pc
[0];
5337 gen_opc_jump_pc
[1] = dc
->jump_pc
[1];
5339 tb
->size
= last_pc
+ 4 - pc_start
;
5340 tb
->icount
= num_insns
;
5343 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5344 qemu_log("--------------\n");
5345 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5346 log_target_disas(env
, pc_start
, last_pc
+ 4 - pc_start
, 0);
5352 void gen_intermediate_code(CPUSPARCState
* env
, TranslationBlock
* tb
)
5354 gen_intermediate_code_internal(sparc_env_get_cpu(env
), tb
, false);
5357 void gen_intermediate_code_pc(CPUSPARCState
* env
, TranslationBlock
* tb
)
5359 gen_intermediate_code_internal(sparc_env_get_cpu(env
), tb
, true);
5362 void gen_intermediate_code_init(CPUSPARCState
*env
)
5366 static const char * const gregnames
[8] = {
5367 NULL
, // g0 not used
5376 static const char * const fregnames
[32] = {
5377 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5378 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5379 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5380 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5383 /* init various static tables */
5387 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
5388 cpu_regwptr
= tcg_global_mem_new_ptr(TCG_AREG0
,
5389 offsetof(CPUSPARCState
, regwptr
),
5391 #ifdef TARGET_SPARC64
5392 cpu_xcc
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, xcc
),
5394 cpu_asi
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, asi
),
5396 cpu_fprs
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, fprs
),
5398 cpu_gsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, gsr
),
5400 cpu_tick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5401 offsetof(CPUSPARCState
, tick_cmpr
),
5403 cpu_stick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5404 offsetof(CPUSPARCState
, stick_cmpr
),
5406 cpu_hstick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5407 offsetof(CPUSPARCState
, hstick_cmpr
),
5409 cpu_hintp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hintp
),
5411 cpu_htba
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, htba
),
5413 cpu_hver
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hver
),
5415 cpu_ssr
= tcg_global_mem_new(TCG_AREG0
,
5416 offsetof(CPUSPARCState
, ssr
), "ssr");
5417 cpu_ver
= tcg_global_mem_new(TCG_AREG0
,
5418 offsetof(CPUSPARCState
, version
), "ver");
5419 cpu_softint
= tcg_global_mem_new_i32(TCG_AREG0
,
5420 offsetof(CPUSPARCState
, softint
),
5423 cpu_wim
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, wim
),
5426 cpu_cond
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cond
),
5428 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_src
),
5430 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
,
5431 offsetof(CPUSPARCState
, cc_src2
),
5433 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_dst
),
5435 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, cc_op
),
5437 cpu_psr
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, psr
),
5439 cpu_fsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, fsr
),
5441 cpu_pc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, pc
),
5443 cpu_npc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, npc
),
5445 cpu_y
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, y
), "y");
5446 #ifndef CONFIG_USER_ONLY
5447 cpu_tbr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, tbr
),
5450 for (i
= 1; i
< 8; i
++) {
5451 cpu_gregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
5452 offsetof(CPUSPARCState
, gregs
[i
]),
5455 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5456 cpu_fpr
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
5457 offsetof(CPUSPARCState
, fpr
[i
]),
5463 void restore_state_to_opc(CPUSPARCState
*env
, TranslationBlock
*tb
, int pc_pos
)
5466 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
5467 npc
= gen_opc_npc
[pc_pos
];
5469 /* dynamic NPC: already stored */
5470 } else if (npc
== 2) {
5471 /* jump PC: use 'cond' and the jump targets of the translation */
5473 env
->npc
= gen_opc_jump_pc
[0];
5475 env
->npc
= gen_opc_jump_pc
[1];