4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "disas/disas.h"
29 #include "exec/helper-proto.h"
31 #include "exec/cpu_ldst.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
40 #define DYNAMIC_PC 1 /* dynamic pc value */
41 #define JUMP_PC 2 /* dynamic pc value which takes only two values
42 according to jump_pc[T2] */
44 /* global register indexes */
45 static TCGv_ptr cpu_env
, cpu_regwptr
;
46 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
47 static TCGv_i32 cpu_cc_op
;
48 static TCGv_i32 cpu_psr
;
49 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
, cpu_gregs
[8];
51 #ifndef CONFIG_USER_ONLY
56 static TCGv_i32 cpu_xcc
, cpu_asi
, cpu_fprs
;
58 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
59 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
60 static TCGv_i32 cpu_softint
;
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext
{
70 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
76 int address_mask_32bit
;
78 uint32_t cc_op
; /* current CC operation */
79 struct TranslationBlock
*tb
;
94 // This function uses non-native bit order
95 #define GET_FIELD(X, FROM, TO) \
96 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
98 // This function uses the order in the manuals, i.e. bit 0 is 2^0
99 #define GET_FIELD_SP(X, FROM, TO) \
100 GET_FIELD(X, 31 - (TO), 31 - (FROM))
102 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
103 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
105 #ifdef TARGET_SPARC64
106 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
107 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
109 #define DFPREG(r) (r & 0x1e)
110 #define QFPREG(r) (r & 0x1c)
113 #define UA2005_HTRAP_MASK 0xff
114 #define V8_TRAP_MASK 0x7f
116 static int sign_extend(int x
, int len
)
119 return (x
<< len
) >> len
;
122 #define IS_IMM (insn & (1<<13))
124 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
127 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
128 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
132 static inline TCGv
get_temp_tl(DisasContext
*dc
)
135 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
136 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
140 static inline void gen_update_fprs_dirty(int rd
)
142 #if defined(TARGET_SPARC64)
143 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, (rd
< 32) ? 1 : 2);
147 /* floating point registers moves */
148 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
150 #if TCG_TARGET_REG_BITS == 32
152 return TCGV_LOW(cpu_fpr
[src
/ 2]);
154 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
158 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr
[src
/ 2]));
160 TCGv_i32 ret
= get_temp_i32(dc
);
161 TCGv_i64 t
= tcg_temp_new_i64();
163 tcg_gen_shri_i64(t
, cpu_fpr
[src
/ 2], 32);
164 tcg_gen_extrl_i64_i32(ret
, t
);
165 tcg_temp_free_i64(t
);
172 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
174 #if TCG_TARGET_REG_BITS == 32
176 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
178 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
181 TCGv_i64 t
= MAKE_TCGV_I64(GET_TCGV_I32(v
));
182 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
183 (dst
& 1 ? 0 : 32), 32);
185 gen_update_fprs_dirty(dst
);
188 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
190 return get_temp_i32(dc
);
193 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
196 return cpu_fpr
[src
/ 2];
199 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
202 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
203 gen_update_fprs_dirty(dst
);
206 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
208 return cpu_fpr
[DFPREG(dst
) / 2];
211 static void gen_op_load_fpr_QT0(unsigned int src
)
213 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
214 offsetof(CPU_QuadU
, ll
.upper
));
215 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
216 offsetof(CPU_QuadU
, ll
.lower
));
219 static void gen_op_load_fpr_QT1(unsigned int src
)
221 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
222 offsetof(CPU_QuadU
, ll
.upper
));
223 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
224 offsetof(CPU_QuadU
, ll
.lower
));
227 static void gen_op_store_QT0_fpr(unsigned int dst
)
229 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
230 offsetof(CPU_QuadU
, ll
.upper
));
231 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
232 offsetof(CPU_QuadU
, ll
.lower
));
235 #ifdef TARGET_SPARC64
236 static void gen_move_Q(unsigned int rd
, unsigned int rs
)
241 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
242 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
243 gen_update_fprs_dirty(rd
);
248 #ifdef CONFIG_USER_ONLY
249 #define supervisor(dc) 0
250 #ifdef TARGET_SPARC64
251 #define hypervisor(dc) 0
254 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
255 #ifdef TARGET_SPARC64
256 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
261 #ifdef TARGET_SPARC64
263 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
265 #define AM_CHECK(dc) (1)
269 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
271 #ifdef TARGET_SPARC64
273 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
277 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
279 if (reg
== 0 || reg
>= 8) {
280 TCGv t
= get_temp_tl(dc
);
282 tcg_gen_movi_tl(t
, 0);
284 tcg_gen_ld_tl(t
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
288 return cpu_gregs
[reg
];
292 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
296 tcg_gen_mov_tl(cpu_gregs
[reg
], v
);
298 tcg_gen_st_tl(v
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
303 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
305 if (reg
== 0 || reg
>= 8) {
306 return get_temp_tl(dc
);
308 return cpu_gregs
[reg
];
312 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
,
313 target_ulong pc
, target_ulong npc
)
315 TranslationBlock
*tb
;
318 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
319 (npc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
321 /* jump to same page: we can use a direct jump */
322 tcg_gen_goto_tb(tb_num
);
323 tcg_gen_movi_tl(cpu_pc
, pc
);
324 tcg_gen_movi_tl(cpu_npc
, npc
);
325 tcg_gen_exit_tb((uintptr_t)tb
+ tb_num
);
327 /* jump to another page: currently not optimized */
328 tcg_gen_movi_tl(cpu_pc
, pc
);
329 tcg_gen_movi_tl(cpu_npc
, npc
);
335 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
337 tcg_gen_extu_i32_tl(reg
, src
);
338 tcg_gen_shri_tl(reg
, reg
, PSR_NEG_SHIFT
);
339 tcg_gen_andi_tl(reg
, reg
, 0x1);
342 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
344 tcg_gen_extu_i32_tl(reg
, src
);
345 tcg_gen_shri_tl(reg
, reg
, PSR_ZERO_SHIFT
);
346 tcg_gen_andi_tl(reg
, reg
, 0x1);
349 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
351 tcg_gen_extu_i32_tl(reg
, src
);
352 tcg_gen_shri_tl(reg
, reg
, PSR_OVF_SHIFT
);
353 tcg_gen_andi_tl(reg
, reg
, 0x1);
356 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
358 tcg_gen_extu_i32_tl(reg
, src
);
359 tcg_gen_shri_tl(reg
, reg
, PSR_CARRY_SHIFT
);
360 tcg_gen_andi_tl(reg
, reg
, 0x1);
363 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
365 tcg_gen_mov_tl(cpu_cc_src
, src1
);
366 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
367 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
368 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
371 static TCGv_i32
gen_add32_carry32(void)
373 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
375 /* Carry is computed from a previous add: (dst < src) */
376 #if TARGET_LONG_BITS == 64
377 cc_src1_32
= tcg_temp_new_i32();
378 cc_src2_32
= tcg_temp_new_i32();
379 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
380 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
382 cc_src1_32
= cpu_cc_dst
;
383 cc_src2_32
= cpu_cc_src
;
386 carry_32
= tcg_temp_new_i32();
387 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
389 #if TARGET_LONG_BITS == 64
390 tcg_temp_free_i32(cc_src1_32
);
391 tcg_temp_free_i32(cc_src2_32
);
397 static TCGv_i32
gen_sub32_carry32(void)
399 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
401 /* Carry is computed from a previous borrow: (src1 < src2) */
402 #if TARGET_LONG_BITS == 64
403 cc_src1_32
= tcg_temp_new_i32();
404 cc_src2_32
= tcg_temp_new_i32();
405 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
406 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
408 cc_src1_32
= cpu_cc_src
;
409 cc_src2_32
= cpu_cc_src2
;
412 carry_32
= tcg_temp_new_i32();
413 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
415 #if TARGET_LONG_BITS == 64
416 tcg_temp_free_i32(cc_src1_32
);
417 tcg_temp_free_i32(cc_src2_32
);
423 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
424 TCGv src2
, int update_cc
)
432 /* Carry is known to be zero. Fall back to plain ADD. */
434 gen_op_add_cc(dst
, src1
, src2
);
436 tcg_gen_add_tl(dst
, src1
, src2
);
443 if (TARGET_LONG_BITS
== 32) {
444 /* We can re-use the host's hardware carry generation by using
445 an ADD2 opcode. We discard the low part of the output.
446 Ideally we'd combine this operation with the add that
447 generated the carry in the first place. */
448 carry
= tcg_temp_new();
449 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
450 tcg_temp_free(carry
);
453 carry_32
= gen_add32_carry32();
459 carry_32
= gen_sub32_carry32();
463 /* We need external help to produce the carry. */
464 carry_32
= tcg_temp_new_i32();
465 gen_helper_compute_C_icc(carry_32
, cpu_env
);
469 #if TARGET_LONG_BITS == 64
470 carry
= tcg_temp_new();
471 tcg_gen_extu_i32_i64(carry
, carry_32
);
476 tcg_gen_add_tl(dst
, src1
, src2
);
477 tcg_gen_add_tl(dst
, dst
, carry
);
479 tcg_temp_free_i32(carry_32
);
480 #if TARGET_LONG_BITS == 64
481 tcg_temp_free(carry
);
486 tcg_gen_mov_tl(cpu_cc_src
, src1
);
487 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
488 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
489 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
490 dc
->cc_op
= CC_OP_ADDX
;
494 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
496 tcg_gen_mov_tl(cpu_cc_src
, src1
);
497 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
498 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
499 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
502 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
503 TCGv src2
, int update_cc
)
511 /* Carry is known to be zero. Fall back to plain SUB. */
513 gen_op_sub_cc(dst
, src1
, src2
);
515 tcg_gen_sub_tl(dst
, src1
, src2
);
522 carry_32
= gen_add32_carry32();
528 if (TARGET_LONG_BITS
== 32) {
529 /* We can re-use the host's hardware carry generation by using
530 a SUB2 opcode. We discard the low part of the output.
531 Ideally we'd combine this operation with the add that
532 generated the carry in the first place. */
533 carry
= tcg_temp_new();
534 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
535 tcg_temp_free(carry
);
538 carry_32
= gen_sub32_carry32();
542 /* We need external help to produce the carry. */
543 carry_32
= tcg_temp_new_i32();
544 gen_helper_compute_C_icc(carry_32
, cpu_env
);
548 #if TARGET_LONG_BITS == 64
549 carry
= tcg_temp_new();
550 tcg_gen_extu_i32_i64(carry
, carry_32
);
555 tcg_gen_sub_tl(dst
, src1
, src2
);
556 tcg_gen_sub_tl(dst
, dst
, carry
);
558 tcg_temp_free_i32(carry_32
);
559 #if TARGET_LONG_BITS == 64
560 tcg_temp_free(carry
);
565 tcg_gen_mov_tl(cpu_cc_src
, src1
);
566 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
567 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
568 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
569 dc
->cc_op
= CC_OP_SUBX
;
573 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
575 TCGv r_temp
, zero
, t0
;
577 r_temp
= tcg_temp_new();
584 zero
= tcg_const_tl(0);
585 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
586 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
587 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
588 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
593 // env->y = (b2 << 31) | (env->y >> 1);
594 tcg_gen_andi_tl(r_temp
, cpu_cc_src
, 0x1);
595 tcg_gen_shli_tl(r_temp
, r_temp
, 31);
596 tcg_gen_shri_tl(t0
, cpu_y
, 1);
597 tcg_gen_andi_tl(t0
, t0
, 0x7fffffff);
598 tcg_gen_or_tl(t0
, t0
, r_temp
);
599 tcg_gen_andi_tl(cpu_y
, t0
, 0xffffffff);
602 gen_mov_reg_N(t0
, cpu_psr
);
603 gen_mov_reg_V(r_temp
, cpu_psr
);
604 tcg_gen_xor_tl(t0
, t0
, r_temp
);
605 tcg_temp_free(r_temp
);
607 // T0 = (b1 << 31) | (T0 >> 1);
609 tcg_gen_shli_tl(t0
, t0
, 31);
610 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
611 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
614 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
616 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
619 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
621 #if TARGET_LONG_BITS == 32
623 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
625 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
628 TCGv t0
= tcg_temp_new_i64();
629 TCGv t1
= tcg_temp_new_i64();
632 tcg_gen_ext32s_i64(t0
, src1
);
633 tcg_gen_ext32s_i64(t1
, src2
);
635 tcg_gen_ext32u_i64(t0
, src1
);
636 tcg_gen_ext32u_i64(t1
, src2
);
639 tcg_gen_mul_i64(dst
, t0
, t1
);
643 tcg_gen_shri_i64(cpu_y
, dst
, 32);
647 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
649 /* zero-extend truncated operands before multiplication */
650 gen_op_multiply(dst
, src1
, src2
, 0);
653 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
655 /* sign-extend truncated operands before multiplication */
656 gen_op_multiply(dst
, src1
, src2
, 1);
660 static inline void gen_op_eval_ba(TCGv dst
)
662 tcg_gen_movi_tl(dst
, 1);
666 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
668 gen_mov_reg_Z(dst
, src
);
672 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
674 TCGv t0
= tcg_temp_new();
675 gen_mov_reg_N(t0
, src
);
676 gen_mov_reg_V(dst
, src
);
677 tcg_gen_xor_tl(dst
, dst
, t0
);
678 gen_mov_reg_Z(t0
, src
);
679 tcg_gen_or_tl(dst
, dst
, t0
);
684 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
686 TCGv t0
= tcg_temp_new();
687 gen_mov_reg_V(t0
, src
);
688 gen_mov_reg_N(dst
, src
);
689 tcg_gen_xor_tl(dst
, dst
, t0
);
694 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
696 TCGv t0
= tcg_temp_new();
697 gen_mov_reg_Z(t0
, src
);
698 gen_mov_reg_C(dst
, src
);
699 tcg_gen_or_tl(dst
, dst
, t0
);
704 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
706 gen_mov_reg_C(dst
, src
);
710 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
712 gen_mov_reg_V(dst
, src
);
716 static inline void gen_op_eval_bn(TCGv dst
)
718 tcg_gen_movi_tl(dst
, 0);
722 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
724 gen_mov_reg_N(dst
, src
);
728 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
730 gen_mov_reg_Z(dst
, src
);
731 tcg_gen_xori_tl(dst
, dst
, 0x1);
735 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
737 gen_op_eval_ble(dst
, src
);
738 tcg_gen_xori_tl(dst
, dst
, 0x1);
742 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
744 gen_op_eval_bl(dst
, src
);
745 tcg_gen_xori_tl(dst
, dst
, 0x1);
749 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
751 gen_op_eval_bleu(dst
, src
);
752 tcg_gen_xori_tl(dst
, dst
, 0x1);
756 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
758 gen_mov_reg_C(dst
, src
);
759 tcg_gen_xori_tl(dst
, dst
, 0x1);
763 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
765 gen_mov_reg_N(dst
, src
);
766 tcg_gen_xori_tl(dst
, dst
, 0x1);
770 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
772 gen_mov_reg_V(dst
, src
);
773 tcg_gen_xori_tl(dst
, dst
, 0x1);
777 FPSR bit field FCC1 | FCC0:
783 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
784 unsigned int fcc_offset
)
786 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
787 tcg_gen_andi_tl(reg
, reg
, 0x1);
790 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
791 unsigned int fcc_offset
)
793 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
794 tcg_gen_andi_tl(reg
, reg
, 0x1);
798 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
799 unsigned int fcc_offset
)
801 TCGv t0
= tcg_temp_new();
802 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
803 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
804 tcg_gen_or_tl(dst
, dst
, t0
);
808 // 1 or 2: FCC0 ^ FCC1
809 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
810 unsigned int fcc_offset
)
812 TCGv t0
= tcg_temp_new();
813 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
814 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
815 tcg_gen_xor_tl(dst
, dst
, t0
);
820 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
821 unsigned int fcc_offset
)
823 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
827 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
828 unsigned int fcc_offset
)
830 TCGv t0
= tcg_temp_new();
831 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
832 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
833 tcg_gen_andc_tl(dst
, dst
, t0
);
838 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
839 unsigned int fcc_offset
)
841 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
845 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
846 unsigned int fcc_offset
)
848 TCGv t0
= tcg_temp_new();
849 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
850 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
851 tcg_gen_andc_tl(dst
, t0
, dst
);
856 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
857 unsigned int fcc_offset
)
859 TCGv t0
= tcg_temp_new();
860 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
861 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
862 tcg_gen_and_tl(dst
, dst
, t0
);
867 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
868 unsigned int fcc_offset
)
870 TCGv t0
= tcg_temp_new();
871 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
872 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
873 tcg_gen_or_tl(dst
, dst
, t0
);
874 tcg_gen_xori_tl(dst
, dst
, 0x1);
878 // 0 or 3: !(FCC0 ^ FCC1)
879 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
880 unsigned int fcc_offset
)
882 TCGv t0
= tcg_temp_new();
883 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
884 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
885 tcg_gen_xor_tl(dst
, dst
, t0
);
886 tcg_gen_xori_tl(dst
, dst
, 0x1);
891 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
892 unsigned int fcc_offset
)
894 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
895 tcg_gen_xori_tl(dst
, dst
, 0x1);
898 // !1: !(FCC0 & !FCC1)
899 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
900 unsigned int fcc_offset
)
902 TCGv t0
= tcg_temp_new();
903 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
904 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
905 tcg_gen_andc_tl(dst
, dst
, t0
);
906 tcg_gen_xori_tl(dst
, dst
, 0x1);
911 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
912 unsigned int fcc_offset
)
914 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
915 tcg_gen_xori_tl(dst
, dst
, 0x1);
918 // !2: !(!FCC0 & FCC1)
919 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
920 unsigned int fcc_offset
)
922 TCGv t0
= tcg_temp_new();
923 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
924 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
925 tcg_gen_andc_tl(dst
, t0
, dst
);
926 tcg_gen_xori_tl(dst
, dst
, 0x1);
930 // !3: !(FCC0 & FCC1)
931 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
932 unsigned int fcc_offset
)
934 TCGv t0
= tcg_temp_new();
935 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
936 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
937 tcg_gen_and_tl(dst
, dst
, t0
);
938 tcg_gen_xori_tl(dst
, dst
, 0x1);
942 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
943 target_ulong pc2
, TCGv r_cond
)
945 TCGLabel
*l1
= gen_new_label();
947 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
949 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
952 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
955 static void gen_branch_a(DisasContext
*dc
, target_ulong pc1
)
957 TCGLabel
*l1
= gen_new_label();
958 target_ulong npc
= dc
->npc
;
960 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cond
, 0, l1
);
962 gen_goto_tb(dc
, 0, npc
, pc1
);
965 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
970 static void gen_branch_n(DisasContext
*dc
, target_ulong pc1
)
972 target_ulong npc
= dc
->npc
;
974 if (likely(npc
!= DYNAMIC_PC
)) {
976 dc
->jump_pc
[0] = pc1
;
977 dc
->jump_pc
[1] = npc
+ 4;
982 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
984 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
985 t
= tcg_const_tl(pc1
);
987 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, z
, t
, cpu_npc
);
995 static inline void gen_generic_branch(DisasContext
*dc
)
997 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
998 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
999 TCGv zero
= tcg_const_tl(0);
1001 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1003 tcg_temp_free(npc0
);
1004 tcg_temp_free(npc1
);
1005 tcg_temp_free(zero
);
1008 /* call this function before using the condition register as it may
1009 have been set for a jump */
1010 static inline void flush_cond(DisasContext
*dc
)
1012 if (dc
->npc
== JUMP_PC
) {
1013 gen_generic_branch(dc
);
1014 dc
->npc
= DYNAMIC_PC
;
1018 static inline void save_npc(DisasContext
*dc
)
1020 if (dc
->npc
== JUMP_PC
) {
1021 gen_generic_branch(dc
);
1022 dc
->npc
= DYNAMIC_PC
;
1023 } else if (dc
->npc
!= DYNAMIC_PC
) {
1024 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1028 static inline void update_psr(DisasContext
*dc
)
1030 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1031 dc
->cc_op
= CC_OP_FLAGS
;
1032 gen_helper_compute_psr(cpu_env
);
1036 static inline void save_state(DisasContext
*dc
)
1038 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1042 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1044 if (dc
->npc
== JUMP_PC
) {
1045 gen_generic_branch(dc
);
1046 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1047 dc
->pc
= DYNAMIC_PC
;
1048 } else if (dc
->npc
== DYNAMIC_PC
) {
1049 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1050 dc
->pc
= DYNAMIC_PC
;
1056 static inline void gen_op_next_insn(void)
1058 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1059 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1062 static void free_compare(DisasCompare
*cmp
)
1065 tcg_temp_free(cmp
->c1
);
1068 tcg_temp_free(cmp
->c2
);
1072 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1075 static int subcc_cond
[16] = {
1091 -1, /* no overflow */
1094 static int logic_cond
[16] = {
1096 TCG_COND_EQ
, /* eq: Z */
1097 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1098 TCG_COND_LT
, /* lt: N ^ V -> N */
1099 TCG_COND_EQ
, /* leu: C | Z -> Z */
1100 TCG_COND_NEVER
, /* ltu: C -> 0 */
1101 TCG_COND_LT
, /* neg: N */
1102 TCG_COND_NEVER
, /* vs: V -> 0 */
1104 TCG_COND_NE
, /* ne: !Z */
1105 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1106 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1107 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1108 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1109 TCG_COND_GE
, /* pos: !N */
1110 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1116 #ifdef TARGET_SPARC64
1126 switch (dc
->cc_op
) {
1128 cmp
->cond
= logic_cond
[cond
];
1130 cmp
->is_bool
= false;
1132 cmp
->c2
= tcg_const_tl(0);
1133 #ifdef TARGET_SPARC64
1136 cmp
->c1
= tcg_temp_new();
1137 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1142 cmp
->c1
= cpu_cc_dst
;
1149 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1150 goto do_compare_dst_0
;
1152 case 7: /* overflow */
1153 case 15: /* !overflow */
1157 cmp
->cond
= subcc_cond
[cond
];
1158 cmp
->is_bool
= false;
1159 #ifdef TARGET_SPARC64
1161 /* Note that sign-extension works for unsigned compares as
1162 long as both operands are sign-extended. */
1163 cmp
->g1
= cmp
->g2
= false;
1164 cmp
->c1
= tcg_temp_new();
1165 cmp
->c2
= tcg_temp_new();
1166 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1167 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1171 cmp
->g1
= cmp
->g2
= true;
1172 cmp
->c1
= cpu_cc_src
;
1173 cmp
->c2
= cpu_cc_src2
;
1180 gen_helper_compute_psr(cpu_env
);
1181 dc
->cc_op
= CC_OP_FLAGS
;
1185 /* We're going to generate a boolean result. */
1186 cmp
->cond
= TCG_COND_NE
;
1187 cmp
->is_bool
= true;
1188 cmp
->g1
= cmp
->g2
= false;
1189 cmp
->c1
= r_dst
= tcg_temp_new();
1190 cmp
->c2
= tcg_const_tl(0);
1194 gen_op_eval_bn(r_dst
);
1197 gen_op_eval_be(r_dst
, r_src
);
1200 gen_op_eval_ble(r_dst
, r_src
);
1203 gen_op_eval_bl(r_dst
, r_src
);
1206 gen_op_eval_bleu(r_dst
, r_src
);
1209 gen_op_eval_bcs(r_dst
, r_src
);
1212 gen_op_eval_bneg(r_dst
, r_src
);
1215 gen_op_eval_bvs(r_dst
, r_src
);
1218 gen_op_eval_ba(r_dst
);
1221 gen_op_eval_bne(r_dst
, r_src
);
1224 gen_op_eval_bg(r_dst
, r_src
);
1227 gen_op_eval_bge(r_dst
, r_src
);
1230 gen_op_eval_bgu(r_dst
, r_src
);
1233 gen_op_eval_bcc(r_dst
, r_src
);
1236 gen_op_eval_bpos(r_dst
, r_src
);
1239 gen_op_eval_bvc(r_dst
, r_src
);
1246 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1248 unsigned int offset
;
1251 /* For now we still generate a straight boolean result. */
1252 cmp
->cond
= TCG_COND_NE
;
1253 cmp
->is_bool
= true;
1254 cmp
->g1
= cmp
->g2
= false;
1255 cmp
->c1
= r_dst
= tcg_temp_new();
1256 cmp
->c2
= tcg_const_tl(0);
1276 gen_op_eval_bn(r_dst
);
1279 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1282 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1285 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1288 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1291 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1294 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1297 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1300 gen_op_eval_ba(r_dst
);
1303 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1306 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1309 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1312 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1315 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1318 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1321 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1326 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1330 gen_compare(&cmp
, cc
, cond
, dc
);
1332 /* The interface is to return a boolean in r_dst. */
1334 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1336 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1342 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1345 gen_fcompare(&cmp
, cc
, cond
);
1347 /* The interface is to return a boolean in r_dst. */
1349 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1351 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1357 #ifdef TARGET_SPARC64
1359 static const int gen_tcg_cond_reg
[8] = {
1370 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1372 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1373 cmp
->is_bool
= false;
1377 cmp
->c2
= tcg_const_tl(0);
1380 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1383 gen_compare_reg(&cmp
, cond
, r_src
);
1385 /* The interface is to return a boolean in r_dst. */
1386 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1392 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1394 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1395 target_ulong target
= dc
->pc
+ offset
;
1397 #ifdef TARGET_SPARC64
1398 if (unlikely(AM_CHECK(dc
))) {
1399 target
&= 0xffffffffULL
;
1403 /* unconditional not taken */
1405 dc
->pc
= dc
->npc
+ 4;
1406 dc
->npc
= dc
->pc
+ 4;
1409 dc
->npc
= dc
->pc
+ 4;
1411 } else if (cond
== 0x8) {
1412 /* unconditional taken */
1415 dc
->npc
= dc
->pc
+ 4;
1419 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1423 gen_cond(cpu_cond
, cc
, cond
, dc
);
1425 gen_branch_a(dc
, target
);
1427 gen_branch_n(dc
, target
);
1432 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1434 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1435 target_ulong target
= dc
->pc
+ offset
;
1437 #ifdef TARGET_SPARC64
1438 if (unlikely(AM_CHECK(dc
))) {
1439 target
&= 0xffffffffULL
;
1443 /* unconditional not taken */
1445 dc
->pc
= dc
->npc
+ 4;
1446 dc
->npc
= dc
->pc
+ 4;
1449 dc
->npc
= dc
->pc
+ 4;
1451 } else if (cond
== 0x8) {
1452 /* unconditional taken */
1455 dc
->npc
= dc
->pc
+ 4;
1459 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1463 gen_fcond(cpu_cond
, cc
, cond
);
1465 gen_branch_a(dc
, target
);
1467 gen_branch_n(dc
, target
);
1472 #ifdef TARGET_SPARC64
1473 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1476 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1477 target_ulong target
= dc
->pc
+ offset
;
1479 if (unlikely(AM_CHECK(dc
))) {
1480 target
&= 0xffffffffULL
;
1483 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1485 gen_branch_a(dc
, target
);
1487 gen_branch_n(dc
, target
);
1491 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1495 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1498 gen_helper_fcmps_fcc1(cpu_env
, r_rs1
, r_rs2
);
1501 gen_helper_fcmps_fcc2(cpu_env
, r_rs1
, r_rs2
);
1504 gen_helper_fcmps_fcc3(cpu_env
, r_rs1
, r_rs2
);
1509 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1513 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1516 gen_helper_fcmpd_fcc1(cpu_env
, r_rs1
, r_rs2
);
1519 gen_helper_fcmpd_fcc2(cpu_env
, r_rs1
, r_rs2
);
1522 gen_helper_fcmpd_fcc3(cpu_env
, r_rs1
, r_rs2
);
1527 static inline void gen_op_fcmpq(int fccno
)
1531 gen_helper_fcmpq(cpu_env
);
1534 gen_helper_fcmpq_fcc1(cpu_env
);
1537 gen_helper_fcmpq_fcc2(cpu_env
);
1540 gen_helper_fcmpq_fcc3(cpu_env
);
1545 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1549 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1552 gen_helper_fcmpes_fcc1(cpu_env
, r_rs1
, r_rs2
);
1555 gen_helper_fcmpes_fcc2(cpu_env
, r_rs1
, r_rs2
);
1558 gen_helper_fcmpes_fcc3(cpu_env
, r_rs1
, r_rs2
);
1563 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1567 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1570 gen_helper_fcmped_fcc1(cpu_env
, r_rs1
, r_rs2
);
1573 gen_helper_fcmped_fcc2(cpu_env
, r_rs1
, r_rs2
);
1576 gen_helper_fcmped_fcc3(cpu_env
, r_rs1
, r_rs2
);
1581 static inline void gen_op_fcmpeq(int fccno
)
1585 gen_helper_fcmpeq(cpu_env
);
1588 gen_helper_fcmpeq_fcc1(cpu_env
);
1591 gen_helper_fcmpeq_fcc2(cpu_env
);
1594 gen_helper_fcmpeq_fcc3(cpu_env
);
1601 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1603 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1606 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1608 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1611 static inline void gen_op_fcmpq(int fccno
)
1613 gen_helper_fcmpq(cpu_env
);
1616 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1618 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1621 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1623 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1626 static inline void gen_op_fcmpeq(int fccno
)
1628 gen_helper_fcmpeq(cpu_env
);
1632 static inline void gen_op_fpexception_im(int fsr_flags
)
1636 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1637 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1638 r_const
= tcg_const_i32(TT_FP_EXCP
);
1639 gen_helper_raise_exception(cpu_env
, r_const
);
1640 tcg_temp_free_i32(r_const
);
1643 static int gen_trap_ifnofpu(DisasContext
*dc
)
1645 #if !defined(CONFIG_USER_ONLY)
1646 if (!dc
->fpu_enabled
) {
1650 r_const
= tcg_const_i32(TT_NFPU_INSN
);
1651 gen_helper_raise_exception(cpu_env
, r_const
);
1652 tcg_temp_free_i32(r_const
);
1660 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1662 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1665 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1666 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1670 src
= gen_load_fpr_F(dc
, rs
);
1671 dst
= gen_dest_fpr_F(dc
);
1673 gen(dst
, cpu_env
, src
);
1675 gen_store_fpr_F(dc
, rd
, dst
);
1678 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1679 void (*gen
)(TCGv_i32
, TCGv_i32
))
1683 src
= gen_load_fpr_F(dc
, rs
);
1684 dst
= gen_dest_fpr_F(dc
);
1688 gen_store_fpr_F(dc
, rd
, dst
);
1691 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1692 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1694 TCGv_i32 dst
, src1
, src2
;
1696 src1
= gen_load_fpr_F(dc
, rs1
);
1697 src2
= gen_load_fpr_F(dc
, rs2
);
1698 dst
= gen_dest_fpr_F(dc
);
1700 gen(dst
, cpu_env
, src1
, src2
);
1702 gen_store_fpr_F(dc
, rd
, dst
);
1705 #ifdef TARGET_SPARC64
1706 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1707 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1709 TCGv_i32 dst
, src1
, src2
;
1711 src1
= gen_load_fpr_F(dc
, rs1
);
1712 src2
= gen_load_fpr_F(dc
, rs2
);
1713 dst
= gen_dest_fpr_F(dc
);
1715 gen(dst
, src1
, src2
);
1717 gen_store_fpr_F(dc
, rd
, dst
);
1721 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1722 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1726 src
= gen_load_fpr_D(dc
, rs
);
1727 dst
= gen_dest_fpr_D(dc
, rd
);
1729 gen(dst
, cpu_env
, src
);
1731 gen_store_fpr_D(dc
, rd
, dst
);
1734 #ifdef TARGET_SPARC64
1735 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1736 void (*gen
)(TCGv_i64
, TCGv_i64
))
1740 src
= gen_load_fpr_D(dc
, rs
);
1741 dst
= gen_dest_fpr_D(dc
, rd
);
1745 gen_store_fpr_D(dc
, rd
, dst
);
1749 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1750 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1752 TCGv_i64 dst
, src1
, src2
;
1754 src1
= gen_load_fpr_D(dc
, rs1
);
1755 src2
= gen_load_fpr_D(dc
, rs2
);
1756 dst
= gen_dest_fpr_D(dc
, rd
);
1758 gen(dst
, cpu_env
, src1
, src2
);
1760 gen_store_fpr_D(dc
, rd
, dst
);
1763 #ifdef TARGET_SPARC64
1764 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1765 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1767 TCGv_i64 dst
, src1
, src2
;
1769 src1
= gen_load_fpr_D(dc
, rs1
);
1770 src2
= gen_load_fpr_D(dc
, rs2
);
1771 dst
= gen_dest_fpr_D(dc
, rd
);
1773 gen(dst
, src1
, src2
);
1775 gen_store_fpr_D(dc
, rd
, dst
);
1778 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1779 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1781 TCGv_i64 dst
, src1
, src2
;
1783 src1
= gen_load_fpr_D(dc
, rs1
);
1784 src2
= gen_load_fpr_D(dc
, rs2
);
1785 dst
= gen_dest_fpr_D(dc
, rd
);
1787 gen(dst
, cpu_gsr
, src1
, src2
);
1789 gen_store_fpr_D(dc
, rd
, dst
);
1792 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1793 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1795 TCGv_i64 dst
, src0
, src1
, src2
;
1797 src1
= gen_load_fpr_D(dc
, rs1
);
1798 src2
= gen_load_fpr_D(dc
, rs2
);
1799 src0
= gen_load_fpr_D(dc
, rd
);
1800 dst
= gen_dest_fpr_D(dc
, rd
);
1802 gen(dst
, src0
, src1
, src2
);
1804 gen_store_fpr_D(dc
, rd
, dst
);
1808 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1809 void (*gen
)(TCGv_ptr
))
1811 gen_op_load_fpr_QT1(QFPREG(rs
));
1815 gen_op_store_QT0_fpr(QFPREG(rd
));
1816 gen_update_fprs_dirty(QFPREG(rd
));
1819 #ifdef TARGET_SPARC64
1820 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1821 void (*gen
)(TCGv_ptr
))
1823 gen_op_load_fpr_QT1(QFPREG(rs
));
1827 gen_op_store_QT0_fpr(QFPREG(rd
));
1828 gen_update_fprs_dirty(QFPREG(rd
));
1832 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1833 void (*gen
)(TCGv_ptr
))
1835 gen_op_load_fpr_QT0(QFPREG(rs1
));
1836 gen_op_load_fpr_QT1(QFPREG(rs2
));
1840 gen_op_store_QT0_fpr(QFPREG(rd
));
1841 gen_update_fprs_dirty(QFPREG(rd
));
1844 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1845 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1848 TCGv_i32 src1
, src2
;
1850 src1
= gen_load_fpr_F(dc
, rs1
);
1851 src2
= gen_load_fpr_F(dc
, rs2
);
1852 dst
= gen_dest_fpr_D(dc
, rd
);
1854 gen(dst
, cpu_env
, src1
, src2
);
1856 gen_store_fpr_D(dc
, rd
, dst
);
1859 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1860 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1862 TCGv_i64 src1
, src2
;
1864 src1
= gen_load_fpr_D(dc
, rs1
);
1865 src2
= gen_load_fpr_D(dc
, rs2
);
1867 gen(cpu_env
, src1
, src2
);
1869 gen_op_store_QT0_fpr(QFPREG(rd
));
1870 gen_update_fprs_dirty(QFPREG(rd
));
1873 #ifdef TARGET_SPARC64
1874 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1875 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1880 src
= gen_load_fpr_F(dc
, rs
);
1881 dst
= gen_dest_fpr_D(dc
, rd
);
1883 gen(dst
, cpu_env
, src
);
1885 gen_store_fpr_D(dc
, rd
, dst
);
1889 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1890 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1895 src
= gen_load_fpr_F(dc
, rs
);
1896 dst
= gen_dest_fpr_D(dc
, rd
);
1898 gen(dst
, cpu_env
, src
);
1900 gen_store_fpr_D(dc
, rd
, dst
);
1903 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1904 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1909 src
= gen_load_fpr_D(dc
, rs
);
1910 dst
= gen_dest_fpr_F(dc
);
1912 gen(dst
, cpu_env
, src
);
1914 gen_store_fpr_F(dc
, rd
, dst
);
1917 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1918 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1922 gen_op_load_fpr_QT1(QFPREG(rs
));
1923 dst
= gen_dest_fpr_F(dc
);
1927 gen_store_fpr_F(dc
, rd
, dst
);
1930 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1931 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1935 gen_op_load_fpr_QT1(QFPREG(rs
));
1936 dst
= gen_dest_fpr_D(dc
, rd
);
1940 gen_store_fpr_D(dc
, rd
, dst
);
1943 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1944 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1948 src
= gen_load_fpr_F(dc
, rs
);
1952 gen_op_store_QT0_fpr(QFPREG(rd
));
1953 gen_update_fprs_dirty(QFPREG(rd
));
1956 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1957 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1961 src
= gen_load_fpr_D(dc
, rs
);
1965 gen_op_store_QT0_fpr(QFPREG(rd
));
1966 gen_update_fprs_dirty(QFPREG(rd
));
1970 #ifdef TARGET_SPARC64
1971 static inline TCGv_i32
gen_get_asi(int insn
, TCGv r_addr
)
1977 r_asi
= tcg_temp_new_i32();
1978 tcg_gen_mov_i32(r_asi
, cpu_asi
);
1980 asi
= GET_FIELD(insn
, 19, 26);
1981 r_asi
= tcg_const_i32(asi
);
1986 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
1989 TCGv_i32 r_asi
, r_size
, r_sign
;
1991 r_asi
= gen_get_asi(insn
, addr
);
1992 r_size
= tcg_const_i32(size
);
1993 r_sign
= tcg_const_i32(sign
);
1994 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
1995 tcg_temp_free_i32(r_sign
);
1996 tcg_temp_free_i32(r_size
);
1997 tcg_temp_free_i32(r_asi
);
2000 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2002 TCGv_i32 r_asi
, r_size
;
2004 r_asi
= gen_get_asi(insn
, addr
);
2005 r_size
= tcg_const_i32(size
);
2006 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2007 tcg_temp_free_i32(r_size
);
2008 tcg_temp_free_i32(r_asi
);
2011 static inline void gen_ldf_asi(TCGv addr
, int insn
, int size
, int rd
)
2013 TCGv_i32 r_asi
, r_size
, r_rd
;
2015 r_asi
= gen_get_asi(insn
, addr
);
2016 r_size
= tcg_const_i32(size
);
2017 r_rd
= tcg_const_i32(rd
);
2018 gen_helper_ldf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2019 tcg_temp_free_i32(r_rd
);
2020 tcg_temp_free_i32(r_size
);
2021 tcg_temp_free_i32(r_asi
);
2024 static inline void gen_stf_asi(TCGv addr
, int insn
, int size
, int rd
)
2026 TCGv_i32 r_asi
, r_size
, r_rd
;
2028 r_asi
= gen_get_asi(insn
, addr
);
2029 r_size
= tcg_const_i32(size
);
2030 r_rd
= tcg_const_i32(rd
);
2031 gen_helper_stf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2032 tcg_temp_free_i32(r_rd
);
2033 tcg_temp_free_i32(r_size
);
2034 tcg_temp_free_i32(r_asi
);
2037 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2039 TCGv_i32 r_asi
, r_size
, r_sign
;
2040 TCGv_i64 t64
= tcg_temp_new_i64();
2042 r_asi
= gen_get_asi(insn
, addr
);
2043 r_size
= tcg_const_i32(4);
2044 r_sign
= tcg_const_i32(0);
2045 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2046 tcg_temp_free_i32(r_sign
);
2047 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2048 tcg_temp_free_i32(r_size
);
2049 tcg_temp_free_i32(r_asi
);
2050 tcg_gen_trunc_i64_tl(dst
, t64
);
2051 tcg_temp_free_i64(t64
);
2054 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2057 TCGv_i32 r_asi
, r_rd
;
2059 r_asi
= gen_get_asi(insn
, addr
);
2060 r_rd
= tcg_const_i32(rd
);
2061 gen_helper_ldda_asi(cpu_env
, addr
, r_asi
, r_rd
);
2062 tcg_temp_free_i32(r_rd
);
2063 tcg_temp_free_i32(r_asi
);
2066 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2069 TCGv_i32 r_asi
, r_size
;
2070 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2071 TCGv_i64 t64
= tcg_temp_new_i64();
2073 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2074 r_asi
= gen_get_asi(insn
, addr
);
2075 r_size
= tcg_const_i32(8);
2076 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2077 tcg_temp_free_i32(r_size
);
2078 tcg_temp_free_i32(r_asi
);
2079 tcg_temp_free_i64(t64
);
2082 static inline void gen_casx_asi(DisasContext
*dc
, TCGv addr
,
2083 TCGv val2
, int insn
, int rd
)
2085 TCGv val1
= gen_load_gpr(dc
, rd
);
2086 TCGv dst
= gen_dest_gpr(dc
, rd
);
2087 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2089 gen_helper_casx_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2090 tcg_temp_free_i32(r_asi
);
2091 gen_store_gpr(dc
, rd
, dst
);
2094 #elif !defined(CONFIG_USER_ONLY)
2096 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2099 TCGv_i32 r_asi
, r_size
, r_sign
;
2100 TCGv_i64 t64
= tcg_temp_new_i64();
2102 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2103 r_size
= tcg_const_i32(size
);
2104 r_sign
= tcg_const_i32(sign
);
2105 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2106 tcg_temp_free_i32(r_sign
);
2107 tcg_temp_free_i32(r_size
);
2108 tcg_temp_free_i32(r_asi
);
2109 tcg_gen_trunc_i64_tl(dst
, t64
);
2110 tcg_temp_free_i64(t64
);
2113 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2115 TCGv_i32 r_asi
, r_size
;
2116 TCGv_i64 t64
= tcg_temp_new_i64();
2118 tcg_gen_extu_tl_i64(t64
, src
);
2119 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2120 r_size
= tcg_const_i32(size
);
2121 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2122 tcg_temp_free_i32(r_size
);
2123 tcg_temp_free_i32(r_asi
);
2124 tcg_temp_free_i64(t64
);
2127 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2129 TCGv_i32 r_asi
, r_size
, r_sign
;
2130 TCGv_i64 r_val
, t64
;
2132 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2133 r_size
= tcg_const_i32(4);
2134 r_sign
= tcg_const_i32(0);
2135 t64
= tcg_temp_new_i64();
2136 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2137 tcg_temp_free(r_sign
);
2138 r_val
= tcg_temp_new_i64();
2139 tcg_gen_extu_tl_i64(r_val
, src
);
2140 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2141 tcg_temp_free_i64(r_val
);
2142 tcg_temp_free_i32(r_size
);
2143 tcg_temp_free_i32(r_asi
);
2144 tcg_gen_trunc_i64_tl(dst
, t64
);
2145 tcg_temp_free_i64(t64
);
2148 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2151 TCGv_i32 r_asi
, r_size
, r_sign
;
2155 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2156 r_size
= tcg_const_i32(8);
2157 r_sign
= tcg_const_i32(0);
2158 t64
= tcg_temp_new_i64();
2159 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2160 tcg_temp_free_i32(r_sign
);
2161 tcg_temp_free_i32(r_size
);
2162 tcg_temp_free_i32(r_asi
);
2164 t
= gen_dest_gpr(dc
, rd
+ 1);
2165 tcg_gen_trunc_i64_tl(t
, t64
);
2166 gen_store_gpr(dc
, rd
+ 1, t
);
2168 tcg_gen_shri_i64(t64
, t64
, 32);
2169 tcg_gen_trunc_i64_tl(hi
, t64
);
2170 tcg_temp_free_i64(t64
);
2171 gen_store_gpr(dc
, rd
, hi
);
2174 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2177 TCGv_i32 r_asi
, r_size
;
2178 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2179 TCGv_i64 t64
= tcg_temp_new_i64();
2181 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2182 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2183 r_size
= tcg_const_i32(8);
2184 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2185 tcg_temp_free_i32(r_size
);
2186 tcg_temp_free_i32(r_asi
);
2187 tcg_temp_free_i64(t64
);
2191 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2192 static inline void gen_cas_asi(DisasContext
*dc
, TCGv addr
,
2193 TCGv val2
, int insn
, int rd
)
2195 TCGv val1
= gen_load_gpr(dc
, rd
);
2196 TCGv dst
= gen_dest_gpr(dc
, rd
);
2197 #ifdef TARGET_SPARC64
2198 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2200 TCGv_i32 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2203 gen_helper_cas_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2204 tcg_temp_free_i32(r_asi
);
2205 gen_store_gpr(dc
, rd
, dst
);
2208 static inline void gen_ldstub_asi(TCGv dst
, TCGv addr
, int insn
)
2211 TCGv_i32 r_asi
, r_size
;
2213 gen_ld_asi(dst
, addr
, insn
, 1, 0);
2215 r_val
= tcg_const_i64(0xffULL
);
2216 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2217 r_size
= tcg_const_i32(1);
2218 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2219 tcg_temp_free_i32(r_size
);
2220 tcg_temp_free_i32(r_asi
);
2221 tcg_temp_free_i64(r_val
);
2225 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2227 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2228 return gen_load_gpr(dc
, rs1
);
2231 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2233 if (IS_IMM
) { /* immediate */
2234 target_long simm
= GET_FIELDs(insn
, 19, 31);
2235 TCGv t
= get_temp_tl(dc
);
2236 tcg_gen_movi_tl(t
, simm
);
2238 } else { /* register */
2239 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2240 return gen_load_gpr(dc
, rs2
);
2244 #ifdef TARGET_SPARC64
2245 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2247 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2249 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2250 or fold the comparison down to 32 bits and use movcond_i32. Choose
2252 c32
= tcg_temp_new_i32();
2254 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2256 TCGv_i64 c64
= tcg_temp_new_i64();
2257 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2258 tcg_gen_extrl_i64_i32(c32
, c64
);
2259 tcg_temp_free_i64(c64
);
2262 s1
= gen_load_fpr_F(dc
, rs
);
2263 s2
= gen_load_fpr_F(dc
, rd
);
2264 dst
= gen_dest_fpr_F(dc
);
2265 zero
= tcg_const_i32(0);
2267 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2269 tcg_temp_free_i32(c32
);
2270 tcg_temp_free_i32(zero
);
2271 gen_store_fpr_F(dc
, rd
, dst
);
2274 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2276 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2277 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2278 gen_load_fpr_D(dc
, rs
),
2279 gen_load_fpr_D(dc
, rd
));
2280 gen_store_fpr_D(dc
, rd
, dst
);
2283 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2285 int qd
= QFPREG(rd
);
2286 int qs
= QFPREG(rs
);
2288 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2289 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2290 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2291 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2293 gen_update_fprs_dirty(qd
);
2296 #ifndef CONFIG_USER_ONLY
2297 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_ptr cpu_env
)
2299 TCGv_i32 r_tl
= tcg_temp_new_i32();
2301 /* load env->tl into r_tl */
2302 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2304 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2305 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2307 /* calculate offset to current trap state from env->ts, reuse r_tl */
2308 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2309 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2311 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2313 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2314 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2315 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2316 tcg_temp_free_ptr(r_tl_tmp
);
2319 tcg_temp_free_i32(r_tl
);
2323 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2324 int width
, bool cc
, bool left
)
2326 TCGv lo1
, lo2
, t1
, t2
;
2327 uint64_t amask
, tabl
, tabr
;
2328 int shift
, imask
, omask
;
2331 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2332 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2333 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2334 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2335 dc
->cc_op
= CC_OP_SUB
;
2338 /* Theory of operation: there are two tables, left and right (not to
2339 be confused with the left and right versions of the opcode). These
2340 are indexed by the low 3 bits of the inputs. To make things "easy",
2341 these tables are loaded into two constants, TABL and TABR below.
2342 The operation index = (input & imask) << shift calculates the index
2343 into the constant, while val = (table >> index) & omask calculates
2344 the value we're looking for. */
2351 tabl
= 0x80c0e0f0f8fcfeffULL
;
2352 tabr
= 0xff7f3f1f0f070301ULL
;
2354 tabl
= 0x0103070f1f3f7fffULL
;
2355 tabr
= 0xfffefcf8f0e0c080ULL
;
2375 tabl
= (2 << 2) | 3;
2376 tabr
= (3 << 2) | 1;
2378 tabl
= (1 << 2) | 3;
2379 tabr
= (3 << 2) | 2;
2386 lo1
= tcg_temp_new();
2387 lo2
= tcg_temp_new();
2388 tcg_gen_andi_tl(lo1
, s1
, imask
);
2389 tcg_gen_andi_tl(lo2
, s2
, imask
);
2390 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2391 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2393 t1
= tcg_const_tl(tabl
);
2394 t2
= tcg_const_tl(tabr
);
2395 tcg_gen_shr_tl(lo1
, t1
, lo1
);
2396 tcg_gen_shr_tl(lo2
, t2
, lo2
);
2397 tcg_gen_andi_tl(dst
, lo1
, omask
);
2398 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2402 amask
&= 0xffffffffULL
;
2404 tcg_gen_andi_tl(s1
, s1
, amask
);
2405 tcg_gen_andi_tl(s2
, s2
, amask
);
2407 /* We want to compute
2408 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2409 We've already done dst = lo1, so this reduces to
2410 dst &= (s1 == s2 ? -1 : lo2)
2415 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
2416 tcg_gen_neg_tl(t1
, t1
);
2417 tcg_gen_or_tl(lo2
, lo2
, t1
);
2418 tcg_gen_and_tl(dst
, dst
, lo2
);
2426 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2428 TCGv tmp
= tcg_temp_new();
2430 tcg_gen_add_tl(tmp
, s1
, s2
);
2431 tcg_gen_andi_tl(dst
, tmp
, -8);
2433 tcg_gen_neg_tl(tmp
, tmp
);
2435 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2440 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2444 t1
= tcg_temp_new();
2445 t2
= tcg_temp_new();
2446 shift
= tcg_temp_new();
2448 tcg_gen_andi_tl(shift
, gsr
, 7);
2449 tcg_gen_shli_tl(shift
, shift
, 3);
2450 tcg_gen_shl_tl(t1
, s1
, shift
);
2452 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2453 shift of (up to 63) followed by a constant shift of 1. */
2454 tcg_gen_xori_tl(shift
, shift
, 63);
2455 tcg_gen_shr_tl(t2
, s2
, shift
);
2456 tcg_gen_shri_tl(t2
, t2
, 1);
2458 tcg_gen_or_tl(dst
, t1
, t2
);
2462 tcg_temp_free(shift
);
2466 #define CHECK_IU_FEATURE(dc, FEATURE) \
2467 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2469 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2470 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2473 /* before an instruction, dc->pc must be static */
2474 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2476 unsigned int opc
, rs1
, rs2
, rd
;
2477 TCGv cpu_src1
, cpu_src2
;
2478 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2479 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2482 opc
= GET_FIELD(insn
, 0, 1);
2483 rd
= GET_FIELD(insn
, 2, 6);
2486 case 0: /* branches/sethi */
2488 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2491 #ifdef TARGET_SPARC64
2492 case 0x1: /* V9 BPcc */
2496 target
= GET_FIELD_SP(insn
, 0, 18);
2497 target
= sign_extend(target
, 19);
2499 cc
= GET_FIELD_SP(insn
, 20, 21);
2501 do_branch(dc
, target
, insn
, 0);
2503 do_branch(dc
, target
, insn
, 1);
2508 case 0x3: /* V9 BPr */
2510 target
= GET_FIELD_SP(insn
, 0, 13) |
2511 (GET_FIELD_SP(insn
, 20, 21) << 14);
2512 target
= sign_extend(target
, 16);
2514 cpu_src1
= get_src1(dc
, insn
);
2515 do_branch_reg(dc
, target
, insn
, cpu_src1
);
2518 case 0x5: /* V9 FBPcc */
2520 int cc
= GET_FIELD_SP(insn
, 20, 21);
2521 if (gen_trap_ifnofpu(dc
)) {
2524 target
= GET_FIELD_SP(insn
, 0, 18);
2525 target
= sign_extend(target
, 19);
2527 do_fbranch(dc
, target
, insn
, cc
);
2531 case 0x7: /* CBN+x */
2536 case 0x2: /* BN+x */
2538 target
= GET_FIELD(insn
, 10, 31);
2539 target
= sign_extend(target
, 22);
2541 do_branch(dc
, target
, insn
, 0);
2544 case 0x6: /* FBN+x */
2546 if (gen_trap_ifnofpu(dc
)) {
2549 target
= GET_FIELD(insn
, 10, 31);
2550 target
= sign_extend(target
, 22);
2552 do_fbranch(dc
, target
, insn
, 0);
2555 case 0x4: /* SETHI */
2556 /* Special-case %g0 because that's the canonical nop. */
2558 uint32_t value
= GET_FIELD(insn
, 10, 31);
2559 TCGv t
= gen_dest_gpr(dc
, rd
);
2560 tcg_gen_movi_tl(t
, value
<< 10);
2561 gen_store_gpr(dc
, rd
, t
);
2564 case 0x0: /* UNIMPL */
2573 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
2574 TCGv o7
= gen_dest_gpr(dc
, 15);
2576 tcg_gen_movi_tl(o7
, dc
->pc
);
2577 gen_store_gpr(dc
, 15, o7
);
2580 #ifdef TARGET_SPARC64
2581 if (unlikely(AM_CHECK(dc
))) {
2582 target
&= 0xffffffffULL
;
2588 case 2: /* FPU & Logical Operations */
2590 unsigned int xop
= GET_FIELD(insn
, 7, 12);
2591 TCGv cpu_dst
= get_temp_tl(dc
);
2594 if (xop
== 0x3a) { /* generate trap */
2595 int cond
= GET_FIELD(insn
, 3, 6);
2597 TCGLabel
*l1
= NULL
;
2608 /* Conditional trap. */
2610 #ifdef TARGET_SPARC64
2612 int cc
= GET_FIELD_SP(insn
, 11, 12);
2614 gen_compare(&cmp
, 0, cond
, dc
);
2615 } else if (cc
== 2) {
2616 gen_compare(&cmp
, 1, cond
, dc
);
2621 gen_compare(&cmp
, 0, cond
, dc
);
2623 l1
= gen_new_label();
2624 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
2625 cmp
.c1
, cmp
.c2
, l1
);
2629 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2630 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2632 /* Don't use the normal temporaries, as they may well have
2633 gone out of scope with the branch above. While we're
2634 doing that we might as well pre-truncate to 32-bit. */
2635 trap
= tcg_temp_new_i32();
2637 rs1
= GET_FIELD_SP(insn
, 14, 18);
2639 rs2
= GET_FIELD_SP(insn
, 0, 6);
2641 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
2642 /* Signal that the trap value is fully constant. */
2645 TCGv t1
= gen_load_gpr(dc
, rs1
);
2646 tcg_gen_trunc_tl_i32(trap
, t1
);
2647 tcg_gen_addi_i32(trap
, trap
, rs2
);
2651 rs2
= GET_FIELD_SP(insn
, 0, 4);
2652 t1
= gen_load_gpr(dc
, rs1
);
2653 t2
= gen_load_gpr(dc
, rs2
);
2654 tcg_gen_add_tl(t1
, t1
, t2
);
2655 tcg_gen_trunc_tl_i32(trap
, t1
);
2658 tcg_gen_andi_i32(trap
, trap
, mask
);
2659 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2662 gen_helper_raise_exception(cpu_env
, trap
);
2663 tcg_temp_free_i32(trap
);
2666 /* An unconditional trap ends the TB. */
2670 /* A conditional trap falls through to the next insn. */
2674 } else if (xop
== 0x28) {
2675 rs1
= GET_FIELD(insn
, 13, 17);
2678 #ifndef TARGET_SPARC64
2679 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2680 manual, rdy on the microSPARC
2682 case 0x0f: /* stbar in the SPARCv8 manual,
2683 rdy on the microSPARC II */
2684 case 0x10 ... 0x1f: /* implementation-dependent in the
2685 SPARCv8 manual, rdy on the
2688 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
2689 TCGv t
= gen_dest_gpr(dc
, rd
);
2690 /* Read Asr17 for a Leon3 monoprocessor */
2691 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
2692 gen_store_gpr(dc
, rd
, t
);
2696 gen_store_gpr(dc
, rd
, cpu_y
);
2698 #ifdef TARGET_SPARC64
2699 case 0x2: /* V9 rdccr */
2701 gen_helper_rdccr(cpu_dst
, cpu_env
);
2702 gen_store_gpr(dc
, rd
, cpu_dst
);
2704 case 0x3: /* V9 rdasi */
2705 tcg_gen_ext_i32_tl(cpu_dst
, cpu_asi
);
2706 gen_store_gpr(dc
, rd
, cpu_dst
);
2708 case 0x4: /* V9 rdtick */
2712 r_tickptr
= tcg_temp_new_ptr();
2713 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2714 offsetof(CPUSPARCState
, tick
));
2715 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2716 tcg_temp_free_ptr(r_tickptr
);
2717 gen_store_gpr(dc
, rd
, cpu_dst
);
2720 case 0x5: /* V9 rdpc */
2722 TCGv t
= gen_dest_gpr(dc
, rd
);
2723 if (unlikely(AM_CHECK(dc
))) {
2724 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
2726 tcg_gen_movi_tl(t
, dc
->pc
);
2728 gen_store_gpr(dc
, rd
, t
);
2731 case 0x6: /* V9 rdfprs */
2732 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
2733 gen_store_gpr(dc
, rd
, cpu_dst
);
2735 case 0xf: /* V9 membar */
2736 break; /* no effect */
2737 case 0x13: /* Graphics Status */
2738 if (gen_trap_ifnofpu(dc
)) {
2741 gen_store_gpr(dc
, rd
, cpu_gsr
);
2743 case 0x16: /* Softint */
2744 tcg_gen_ext_i32_tl(cpu_dst
, cpu_softint
);
2745 gen_store_gpr(dc
, rd
, cpu_dst
);
2747 case 0x17: /* Tick compare */
2748 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
2750 case 0x18: /* System tick */
2754 r_tickptr
= tcg_temp_new_ptr();
2755 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2756 offsetof(CPUSPARCState
, stick
));
2757 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2758 tcg_temp_free_ptr(r_tickptr
);
2759 gen_store_gpr(dc
, rd
, cpu_dst
);
2762 case 0x19: /* System tick compare */
2763 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
2765 case 0x10: /* Performance Control */
2766 case 0x11: /* Performance Instrumentation Counter */
2767 case 0x12: /* Dispatch Control */
2768 case 0x14: /* Softint set, WO */
2769 case 0x15: /* Softint clear, WO */
2774 #if !defined(CONFIG_USER_ONLY)
2775 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
2776 #ifndef TARGET_SPARC64
2777 if (!supervisor(dc
)) {
2781 gen_helper_rdpsr(cpu_dst
, cpu_env
);
2783 CHECK_IU_FEATURE(dc
, HYPV
);
2784 if (!hypervisor(dc
))
2786 rs1
= GET_FIELD(insn
, 13, 17);
2789 // gen_op_rdhpstate();
2792 // gen_op_rdhtstate();
2795 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
2798 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
2801 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
2803 case 31: // hstick_cmpr
2804 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
2810 gen_store_gpr(dc
, rd
, cpu_dst
);
2812 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
2813 if (!supervisor(dc
)) {
2816 cpu_tmp0
= get_temp_tl(dc
);
2817 #ifdef TARGET_SPARC64
2818 rs1
= GET_FIELD(insn
, 13, 17);
2824 r_tsptr
= tcg_temp_new_ptr();
2825 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2826 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2827 offsetof(trap_state
, tpc
));
2828 tcg_temp_free_ptr(r_tsptr
);
2835 r_tsptr
= tcg_temp_new_ptr();
2836 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2837 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2838 offsetof(trap_state
, tnpc
));
2839 tcg_temp_free_ptr(r_tsptr
);
2846 r_tsptr
= tcg_temp_new_ptr();
2847 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2848 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2849 offsetof(trap_state
, tstate
));
2850 tcg_temp_free_ptr(r_tsptr
);
2855 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2857 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2858 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
2859 offsetof(trap_state
, tt
));
2860 tcg_temp_free_ptr(r_tsptr
);
2867 r_tickptr
= tcg_temp_new_ptr();
2868 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2869 offsetof(CPUSPARCState
, tick
));
2870 gen_helper_tick_get_count(cpu_tmp0
, r_tickptr
);
2871 tcg_temp_free_ptr(r_tickptr
);
2875 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
2878 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2879 offsetof(CPUSPARCState
, pstate
));
2882 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2883 offsetof(CPUSPARCState
, tl
));
2886 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2887 offsetof(CPUSPARCState
, psrpil
));
2890 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
2893 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2894 offsetof(CPUSPARCState
, cansave
));
2896 case 11: // canrestore
2897 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2898 offsetof(CPUSPARCState
, canrestore
));
2900 case 12: // cleanwin
2901 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2902 offsetof(CPUSPARCState
, cleanwin
));
2904 case 13: // otherwin
2905 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2906 offsetof(CPUSPARCState
, otherwin
));
2909 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2910 offsetof(CPUSPARCState
, wstate
));
2912 case 16: // UA2005 gl
2913 CHECK_IU_FEATURE(dc
, GL
);
2914 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2915 offsetof(CPUSPARCState
, gl
));
2917 case 26: // UA2005 strand status
2918 CHECK_IU_FEATURE(dc
, HYPV
);
2919 if (!hypervisor(dc
))
2921 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
2924 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
2931 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
2933 gen_store_gpr(dc
, rd
, cpu_tmp0
);
2935 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
2936 #ifdef TARGET_SPARC64
2938 gen_helper_flushw(cpu_env
);
2940 if (!supervisor(dc
))
2942 gen_store_gpr(dc
, rd
, cpu_tbr
);
2946 } else if (xop
== 0x34) { /* FPU Operations */
2947 if (gen_trap_ifnofpu(dc
)) {
2950 gen_op_clear_ieee_excp_and_FTT();
2951 rs1
= GET_FIELD(insn
, 13, 17);
2952 rs2
= GET_FIELD(insn
, 27, 31);
2953 xop
= GET_FIELD(insn
, 18, 26);
2956 case 0x1: /* fmovs */
2957 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
2958 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
2960 case 0x5: /* fnegs */
2961 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
2963 case 0x9: /* fabss */
2964 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
2966 case 0x29: /* fsqrts */
2967 CHECK_FPU_FEATURE(dc
, FSQRT
);
2968 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
2970 case 0x2a: /* fsqrtd */
2971 CHECK_FPU_FEATURE(dc
, FSQRT
);
2972 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
2974 case 0x2b: /* fsqrtq */
2975 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2976 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
2978 case 0x41: /* fadds */
2979 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
2981 case 0x42: /* faddd */
2982 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
2984 case 0x43: /* faddq */
2985 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2986 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
2988 case 0x45: /* fsubs */
2989 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
2991 case 0x46: /* fsubd */
2992 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
2994 case 0x47: /* fsubq */
2995 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2996 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
2998 case 0x49: /* fmuls */
2999 CHECK_FPU_FEATURE(dc
, FMUL
);
3000 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3002 case 0x4a: /* fmuld */
3003 CHECK_FPU_FEATURE(dc
, FMUL
);
3004 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3006 case 0x4b: /* fmulq */
3007 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3008 CHECK_FPU_FEATURE(dc
, FMUL
);
3009 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3011 case 0x4d: /* fdivs */
3012 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3014 case 0x4e: /* fdivd */
3015 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3017 case 0x4f: /* fdivq */
3018 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3019 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3021 case 0x69: /* fsmuld */
3022 CHECK_FPU_FEATURE(dc
, FSMULD
);
3023 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3025 case 0x6e: /* fdmulq */
3026 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3027 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3029 case 0xc4: /* fitos */
3030 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3032 case 0xc6: /* fdtos */
3033 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3035 case 0xc7: /* fqtos */
3036 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3037 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3039 case 0xc8: /* fitod */
3040 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3042 case 0xc9: /* fstod */
3043 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3045 case 0xcb: /* fqtod */
3046 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3047 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3049 case 0xcc: /* fitoq */
3050 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3051 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3053 case 0xcd: /* fstoq */
3054 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3055 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3057 case 0xce: /* fdtoq */
3058 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3059 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3061 case 0xd1: /* fstoi */
3062 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3064 case 0xd2: /* fdtoi */
3065 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3067 case 0xd3: /* fqtoi */
3068 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3069 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3071 #ifdef TARGET_SPARC64
3072 case 0x2: /* V9 fmovd */
3073 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3074 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3076 case 0x3: /* V9 fmovq */
3077 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3078 gen_move_Q(rd
, rs2
);
3080 case 0x6: /* V9 fnegd */
3081 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3083 case 0x7: /* V9 fnegq */
3084 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3085 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3087 case 0xa: /* V9 fabsd */
3088 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3090 case 0xb: /* V9 fabsq */
3091 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3092 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3094 case 0x81: /* V9 fstox */
3095 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3097 case 0x82: /* V9 fdtox */
3098 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3100 case 0x83: /* V9 fqtox */
3101 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3102 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3104 case 0x84: /* V9 fxtos */
3105 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3107 case 0x88: /* V9 fxtod */
3108 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3110 case 0x8c: /* V9 fxtoq */
3111 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3112 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3118 } else if (xop
== 0x35) { /* FPU Operations */
3119 #ifdef TARGET_SPARC64
3122 if (gen_trap_ifnofpu(dc
)) {
3125 gen_op_clear_ieee_excp_and_FTT();
3126 rs1
= GET_FIELD(insn
, 13, 17);
3127 rs2
= GET_FIELD(insn
, 27, 31);
3128 xop
= GET_FIELD(insn
, 18, 26);
3131 #ifdef TARGET_SPARC64
3135 cond = GET_FIELD_SP(insn, 10, 12); \
3136 cpu_src1 = get_src1(dc, insn); \
3137 gen_compare_reg(&cmp, cond, cpu_src1); \
3138 gen_fmov##sz(dc, &cmp, rd, rs2); \
3139 free_compare(&cmp); \
3142 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3145 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3148 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3149 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3156 #ifdef TARGET_SPARC64
3157 #define FMOVCC(fcc, sz) \
3160 cond = GET_FIELD_SP(insn, 14, 17); \
3161 gen_fcompare(&cmp, fcc, cond); \
3162 gen_fmov##sz(dc, &cmp, rd, rs2); \
3163 free_compare(&cmp); \
3166 case 0x001: /* V9 fmovscc %fcc0 */
3169 case 0x002: /* V9 fmovdcc %fcc0 */
3172 case 0x003: /* V9 fmovqcc %fcc0 */
3173 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3176 case 0x041: /* V9 fmovscc %fcc1 */
3179 case 0x042: /* V9 fmovdcc %fcc1 */
3182 case 0x043: /* V9 fmovqcc %fcc1 */
3183 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3186 case 0x081: /* V9 fmovscc %fcc2 */
3189 case 0x082: /* V9 fmovdcc %fcc2 */
3192 case 0x083: /* V9 fmovqcc %fcc2 */
3193 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3196 case 0x0c1: /* V9 fmovscc %fcc3 */
3199 case 0x0c2: /* V9 fmovdcc %fcc3 */
3202 case 0x0c3: /* V9 fmovqcc %fcc3 */
3203 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3207 #define FMOVCC(xcc, sz) \
3210 cond = GET_FIELD_SP(insn, 14, 17); \
3211 gen_compare(&cmp, xcc, cond, dc); \
3212 gen_fmov##sz(dc, &cmp, rd, rs2); \
3213 free_compare(&cmp); \
3216 case 0x101: /* V9 fmovscc %icc */
3219 case 0x102: /* V9 fmovdcc %icc */
3222 case 0x103: /* V9 fmovqcc %icc */
3223 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3226 case 0x181: /* V9 fmovscc %xcc */
3229 case 0x182: /* V9 fmovdcc %xcc */
3232 case 0x183: /* V9 fmovqcc %xcc */
3233 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3238 case 0x51: /* fcmps, V9 %fcc */
3239 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3240 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3241 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3243 case 0x52: /* fcmpd, V9 %fcc */
3244 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3245 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3246 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3248 case 0x53: /* fcmpq, V9 %fcc */
3249 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3250 gen_op_load_fpr_QT0(QFPREG(rs1
));
3251 gen_op_load_fpr_QT1(QFPREG(rs2
));
3252 gen_op_fcmpq(rd
& 3);
3254 case 0x55: /* fcmpes, V9 %fcc */
3255 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3256 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3257 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3259 case 0x56: /* fcmped, V9 %fcc */
3260 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3261 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3262 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3264 case 0x57: /* fcmpeq, V9 %fcc */
3265 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3266 gen_op_load_fpr_QT0(QFPREG(rs1
));
3267 gen_op_load_fpr_QT1(QFPREG(rs2
));
3268 gen_op_fcmpeq(rd
& 3);
3273 } else if (xop
== 0x2) {
3274 TCGv dst
= gen_dest_gpr(dc
, rd
);
3275 rs1
= GET_FIELD(insn
, 13, 17);
3277 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3278 if (IS_IMM
) { /* immediate */
3279 simm
= GET_FIELDs(insn
, 19, 31);
3280 tcg_gen_movi_tl(dst
, simm
);
3281 gen_store_gpr(dc
, rd
, dst
);
3282 } else { /* register */
3283 rs2
= GET_FIELD(insn
, 27, 31);
3285 tcg_gen_movi_tl(dst
, 0);
3286 gen_store_gpr(dc
, rd
, dst
);
3288 cpu_src2
= gen_load_gpr(dc
, rs2
);
3289 gen_store_gpr(dc
, rd
, cpu_src2
);
3293 cpu_src1
= get_src1(dc
, insn
);
3294 if (IS_IMM
) { /* immediate */
3295 simm
= GET_FIELDs(insn
, 19, 31);
3296 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
3297 gen_store_gpr(dc
, rd
, dst
);
3298 } else { /* register */
3299 rs2
= GET_FIELD(insn
, 27, 31);
3301 /* mov shortcut: or x, %g0, y -> mov x, y */
3302 gen_store_gpr(dc
, rd
, cpu_src1
);
3304 cpu_src2
= gen_load_gpr(dc
, rs2
);
3305 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
3306 gen_store_gpr(dc
, rd
, dst
);
3310 #ifdef TARGET_SPARC64
3311 } else if (xop
== 0x25) { /* sll, V9 sllx */
3312 cpu_src1
= get_src1(dc
, insn
);
3313 if (IS_IMM
) { /* immediate */
3314 simm
= GET_FIELDs(insn
, 20, 31);
3315 if (insn
& (1 << 12)) {
3316 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3318 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3320 } else { /* register */
3321 rs2
= GET_FIELD(insn
, 27, 31);
3322 cpu_src2
= gen_load_gpr(dc
, rs2
);
3323 cpu_tmp0
= get_temp_tl(dc
);
3324 if (insn
& (1 << 12)) {
3325 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3327 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3329 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3331 gen_store_gpr(dc
, rd
, cpu_dst
);
3332 } else if (xop
== 0x26) { /* srl, V9 srlx */
3333 cpu_src1
= get_src1(dc
, insn
);
3334 if (IS_IMM
) { /* immediate */
3335 simm
= GET_FIELDs(insn
, 20, 31);
3336 if (insn
& (1 << 12)) {
3337 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3339 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3340 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3342 } else { /* register */
3343 rs2
= GET_FIELD(insn
, 27, 31);
3344 cpu_src2
= gen_load_gpr(dc
, rs2
);
3345 cpu_tmp0
= get_temp_tl(dc
);
3346 if (insn
& (1 << 12)) {
3347 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3348 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3350 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3351 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3352 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3355 gen_store_gpr(dc
, rd
, cpu_dst
);
3356 } else if (xop
== 0x27) { /* sra, V9 srax */
3357 cpu_src1
= get_src1(dc
, insn
);
3358 if (IS_IMM
) { /* immediate */
3359 simm
= GET_FIELDs(insn
, 20, 31);
3360 if (insn
& (1 << 12)) {
3361 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3363 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3364 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3366 } else { /* register */
3367 rs2
= GET_FIELD(insn
, 27, 31);
3368 cpu_src2
= gen_load_gpr(dc
, rs2
);
3369 cpu_tmp0
= get_temp_tl(dc
);
3370 if (insn
& (1 << 12)) {
3371 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3372 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3374 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3375 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3376 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3379 gen_store_gpr(dc
, rd
, cpu_dst
);
3381 } else if (xop
< 0x36) {
3383 cpu_src1
= get_src1(dc
, insn
);
3384 cpu_src2
= get_src2(dc
, insn
);
3385 switch (xop
& ~0x10) {
3388 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3389 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3390 dc
->cc_op
= CC_OP_ADD
;
3392 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3396 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3398 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3399 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3400 dc
->cc_op
= CC_OP_LOGIC
;
3404 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3406 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3407 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3408 dc
->cc_op
= CC_OP_LOGIC
;
3412 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3414 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3415 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3416 dc
->cc_op
= CC_OP_LOGIC
;
3421 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3422 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3423 dc
->cc_op
= CC_OP_SUB
;
3425 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3428 case 0x5: /* andn */
3429 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3431 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3432 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3433 dc
->cc_op
= CC_OP_LOGIC
;
3437 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3439 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3440 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3441 dc
->cc_op
= CC_OP_LOGIC
;
3444 case 0x7: /* xorn */
3445 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3447 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3448 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3449 dc
->cc_op
= CC_OP_LOGIC
;
3452 case 0x8: /* addx, V9 addc */
3453 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3456 #ifdef TARGET_SPARC64
3457 case 0x9: /* V9 mulx */
3458 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3461 case 0xa: /* umul */
3462 CHECK_IU_FEATURE(dc
, MUL
);
3463 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3465 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3466 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3467 dc
->cc_op
= CC_OP_LOGIC
;
3470 case 0xb: /* smul */
3471 CHECK_IU_FEATURE(dc
, MUL
);
3472 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
3474 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3475 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3476 dc
->cc_op
= CC_OP_LOGIC
;
3479 case 0xc: /* subx, V9 subc */
3480 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3483 #ifdef TARGET_SPARC64
3484 case 0xd: /* V9 udivx */
3485 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3488 case 0xe: /* udiv */
3489 CHECK_IU_FEATURE(dc
, DIV
);
3491 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3493 dc
->cc_op
= CC_OP_DIV
;
3495 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
3499 case 0xf: /* sdiv */
3500 CHECK_IU_FEATURE(dc
, DIV
);
3502 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3504 dc
->cc_op
= CC_OP_DIV
;
3506 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
3513 gen_store_gpr(dc
, rd
, cpu_dst
);
3515 cpu_src1
= get_src1(dc
, insn
);
3516 cpu_src2
= get_src2(dc
, insn
);
3518 case 0x20: /* taddcc */
3519 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3520 gen_store_gpr(dc
, rd
, cpu_dst
);
3521 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
3522 dc
->cc_op
= CC_OP_TADD
;
3524 case 0x21: /* tsubcc */
3525 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3526 gen_store_gpr(dc
, rd
, cpu_dst
);
3527 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
3528 dc
->cc_op
= CC_OP_TSUB
;
3530 case 0x22: /* taddcctv */
3531 gen_helper_taddcctv(cpu_dst
, cpu_env
,
3532 cpu_src1
, cpu_src2
);
3533 gen_store_gpr(dc
, rd
, cpu_dst
);
3534 dc
->cc_op
= CC_OP_TADDTV
;
3536 case 0x23: /* tsubcctv */
3537 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
3538 cpu_src1
, cpu_src2
);
3539 gen_store_gpr(dc
, rd
, cpu_dst
);
3540 dc
->cc_op
= CC_OP_TSUBTV
;
3542 case 0x24: /* mulscc */
3544 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
3545 gen_store_gpr(dc
, rd
, cpu_dst
);
3546 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3547 dc
->cc_op
= CC_OP_ADD
;
3549 #ifndef TARGET_SPARC64
3550 case 0x25: /* sll */
3551 if (IS_IMM
) { /* immediate */
3552 simm
= GET_FIELDs(insn
, 20, 31);
3553 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3554 } else { /* register */
3555 cpu_tmp0
= get_temp_tl(dc
);
3556 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3557 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3559 gen_store_gpr(dc
, rd
, cpu_dst
);
3561 case 0x26: /* srl */
3562 if (IS_IMM
) { /* immediate */
3563 simm
= GET_FIELDs(insn
, 20, 31);
3564 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3565 } else { /* register */
3566 cpu_tmp0
= get_temp_tl(dc
);
3567 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3568 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3570 gen_store_gpr(dc
, rd
, cpu_dst
);
3572 case 0x27: /* sra */
3573 if (IS_IMM
) { /* immediate */
3574 simm
= GET_FIELDs(insn
, 20, 31);
3575 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3576 } else { /* register */
3577 cpu_tmp0
= get_temp_tl(dc
);
3578 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3579 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3581 gen_store_gpr(dc
, rd
, cpu_dst
);
3586 cpu_tmp0
= get_temp_tl(dc
);
3589 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3590 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
3592 #ifndef TARGET_SPARC64
3593 case 0x01 ... 0x0f: /* undefined in the
3597 case 0x10 ... 0x1f: /* implementation-dependent
3601 if ((rd
== 0x13) && (dc
->def
->features
&
3602 CPU_FEATURE_POWERDOWN
)) {
3603 /* LEON3 power-down */
3605 gen_helper_power_down(cpu_env
);
3609 case 0x2: /* V9 wrccr */
3610 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3611 gen_helper_wrccr(cpu_env
, cpu_tmp0
);
3612 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3613 dc
->cc_op
= CC_OP_FLAGS
;
3615 case 0x3: /* V9 wrasi */
3616 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3617 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
3618 tcg_gen_trunc_tl_i32(cpu_asi
, cpu_tmp0
);
3620 case 0x6: /* V9 wrfprs */
3621 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3622 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
3628 case 0xf: /* V9 sir, nop if user */
3629 #if !defined(CONFIG_USER_ONLY)
3630 if (supervisor(dc
)) {
3635 case 0x13: /* Graphics Status */
3636 if (gen_trap_ifnofpu(dc
)) {
3639 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
3641 case 0x14: /* Softint set */
3642 if (!supervisor(dc
))
3644 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3645 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
3647 case 0x15: /* Softint clear */
3648 if (!supervisor(dc
))
3650 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3651 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
3653 case 0x16: /* Softint write */
3654 if (!supervisor(dc
))
3656 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3657 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
3659 case 0x17: /* Tick compare */
3660 #if !defined(CONFIG_USER_ONLY)
3661 if (!supervisor(dc
))
3667 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
3669 r_tickptr
= tcg_temp_new_ptr();
3670 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3671 offsetof(CPUSPARCState
, tick
));
3672 gen_helper_tick_set_limit(r_tickptr
,
3674 tcg_temp_free_ptr(r_tickptr
);
3677 case 0x18: /* System tick */
3678 #if !defined(CONFIG_USER_ONLY)
3679 if (!supervisor(dc
))
3685 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
3687 r_tickptr
= tcg_temp_new_ptr();
3688 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3689 offsetof(CPUSPARCState
, stick
));
3690 gen_helper_tick_set_count(r_tickptr
,
3692 tcg_temp_free_ptr(r_tickptr
);
3695 case 0x19: /* System tick compare */
3696 #if !defined(CONFIG_USER_ONLY)
3697 if (!supervisor(dc
))
3703 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
3705 r_tickptr
= tcg_temp_new_ptr();
3706 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3707 offsetof(CPUSPARCState
, stick
));
3708 gen_helper_tick_set_limit(r_tickptr
,
3710 tcg_temp_free_ptr(r_tickptr
);
3714 case 0x10: /* Performance Control */
3715 case 0x11: /* Performance Instrumentation
3717 case 0x12: /* Dispatch Control */
3724 #if !defined(CONFIG_USER_ONLY)
3725 case 0x31: /* wrpsr, V9 saved, restored */
3727 if (!supervisor(dc
))
3729 #ifdef TARGET_SPARC64
3732 gen_helper_saved(cpu_env
);
3735 gen_helper_restored(cpu_env
);
3737 case 2: /* UA2005 allclean */
3738 case 3: /* UA2005 otherw */
3739 case 4: /* UA2005 normalw */
3740 case 5: /* UA2005 invalw */
3746 cpu_tmp0
= get_temp_tl(dc
);
3747 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3748 gen_helper_wrpsr(cpu_env
, cpu_tmp0
);
3749 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3750 dc
->cc_op
= CC_OP_FLAGS
;
3758 case 0x32: /* wrwim, V9 wrpr */
3760 if (!supervisor(dc
))
3762 cpu_tmp0
= get_temp_tl(dc
);
3763 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3764 #ifdef TARGET_SPARC64
3770 r_tsptr
= tcg_temp_new_ptr();
3771 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3772 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3773 offsetof(trap_state
, tpc
));
3774 tcg_temp_free_ptr(r_tsptr
);
3781 r_tsptr
= tcg_temp_new_ptr();
3782 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3783 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3784 offsetof(trap_state
, tnpc
));
3785 tcg_temp_free_ptr(r_tsptr
);
3792 r_tsptr
= tcg_temp_new_ptr();
3793 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3794 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3795 offsetof(trap_state
,
3797 tcg_temp_free_ptr(r_tsptr
);
3804 r_tsptr
= tcg_temp_new_ptr();
3805 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3806 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
3807 offsetof(trap_state
, tt
));
3808 tcg_temp_free_ptr(r_tsptr
);
3815 r_tickptr
= tcg_temp_new_ptr();
3816 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3817 offsetof(CPUSPARCState
, tick
));
3818 gen_helper_tick_set_count(r_tickptr
,
3820 tcg_temp_free_ptr(r_tickptr
);
3824 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
3828 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
3829 dc
->npc
= DYNAMIC_PC
;
3833 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3834 offsetof(CPUSPARCState
, tl
));
3835 dc
->npc
= DYNAMIC_PC
;
3838 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
3841 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
3844 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3845 offsetof(CPUSPARCState
,
3848 case 11: // canrestore
3849 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3850 offsetof(CPUSPARCState
,
3853 case 12: // cleanwin
3854 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3855 offsetof(CPUSPARCState
,
3858 case 13: // otherwin
3859 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3860 offsetof(CPUSPARCState
,
3864 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3865 offsetof(CPUSPARCState
,
3868 case 16: // UA2005 gl
3869 CHECK_IU_FEATURE(dc
, GL
);
3870 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3871 offsetof(CPUSPARCState
, gl
));
3873 case 26: // UA2005 strand status
3874 CHECK_IU_FEATURE(dc
, HYPV
);
3875 if (!hypervisor(dc
))
3877 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
3883 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
3884 if (dc
->def
->nwindows
!= 32) {
3885 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
3886 (1 << dc
->def
->nwindows
) - 1);
3891 case 0x33: /* wrtbr, UA2005 wrhpr */
3893 #ifndef TARGET_SPARC64
3894 if (!supervisor(dc
))
3896 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
3898 CHECK_IU_FEATURE(dc
, HYPV
);
3899 if (!hypervisor(dc
))
3901 cpu_tmp0
= get_temp_tl(dc
);
3902 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3905 // XXX gen_op_wrhpstate();
3912 // XXX gen_op_wrhtstate();
3915 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
3918 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
3920 case 31: // hstick_cmpr
3924 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
3925 r_tickptr
= tcg_temp_new_ptr();
3926 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3927 offsetof(CPUSPARCState
, hstick
));
3928 gen_helper_tick_set_limit(r_tickptr
,
3930 tcg_temp_free_ptr(r_tickptr
);
3933 case 6: // hver readonly
3941 #ifdef TARGET_SPARC64
3942 case 0x2c: /* V9 movcc */
3944 int cc
= GET_FIELD_SP(insn
, 11, 12);
3945 int cond
= GET_FIELD_SP(insn
, 14, 17);
3949 if (insn
& (1 << 18)) {
3951 gen_compare(&cmp
, 0, cond
, dc
);
3952 } else if (cc
== 2) {
3953 gen_compare(&cmp
, 1, cond
, dc
);
3958 gen_fcompare(&cmp
, cc
, cond
);
3961 /* The get_src2 above loaded the normal 13-bit
3962 immediate field, not the 11-bit field we have
3963 in movcc. But it did handle the reg case. */
3965 simm
= GET_FIELD_SPs(insn
, 0, 10);
3966 tcg_gen_movi_tl(cpu_src2
, simm
);
3969 dst
= gen_load_gpr(dc
, rd
);
3970 tcg_gen_movcond_tl(cmp
.cond
, dst
,
3974 gen_store_gpr(dc
, rd
, dst
);
3977 case 0x2d: /* V9 sdivx */
3978 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3979 gen_store_gpr(dc
, rd
, cpu_dst
);
3981 case 0x2e: /* V9 popc */
3982 gen_helper_popc(cpu_dst
, cpu_src2
);
3983 gen_store_gpr(dc
, rd
, cpu_dst
);
3985 case 0x2f: /* V9 movr */
3987 int cond
= GET_FIELD_SP(insn
, 10, 12);
3991 gen_compare_reg(&cmp
, cond
, cpu_src1
);
3993 /* The get_src2 above loaded the normal 13-bit
3994 immediate field, not the 10-bit field we have
3995 in movr. But it did handle the reg case. */
3997 simm
= GET_FIELD_SPs(insn
, 0, 9);
3998 tcg_gen_movi_tl(cpu_src2
, simm
);
4001 dst
= gen_load_gpr(dc
, rd
);
4002 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4006 gen_store_gpr(dc
, rd
, dst
);
4014 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4015 #ifdef TARGET_SPARC64
4016 int opf
= GET_FIELD_SP(insn
, 5, 13);
4017 rs1
= GET_FIELD(insn
, 13, 17);
4018 rs2
= GET_FIELD(insn
, 27, 31);
4019 if (gen_trap_ifnofpu(dc
)) {
4024 case 0x000: /* VIS I edge8cc */
4025 CHECK_FPU_FEATURE(dc
, VIS1
);
4026 cpu_src1
= gen_load_gpr(dc
, rs1
);
4027 cpu_src2
= gen_load_gpr(dc
, rs2
);
4028 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4029 gen_store_gpr(dc
, rd
, cpu_dst
);
4031 case 0x001: /* VIS II edge8n */
4032 CHECK_FPU_FEATURE(dc
, VIS2
);
4033 cpu_src1
= gen_load_gpr(dc
, rs1
);
4034 cpu_src2
= gen_load_gpr(dc
, rs2
);
4035 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4036 gen_store_gpr(dc
, rd
, cpu_dst
);
4038 case 0x002: /* VIS I edge8lcc */
4039 CHECK_FPU_FEATURE(dc
, VIS1
);
4040 cpu_src1
= gen_load_gpr(dc
, rs1
);
4041 cpu_src2
= gen_load_gpr(dc
, rs2
);
4042 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4043 gen_store_gpr(dc
, rd
, cpu_dst
);
4045 case 0x003: /* VIS II edge8ln */
4046 CHECK_FPU_FEATURE(dc
, VIS2
);
4047 cpu_src1
= gen_load_gpr(dc
, rs1
);
4048 cpu_src2
= gen_load_gpr(dc
, rs2
);
4049 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4050 gen_store_gpr(dc
, rd
, cpu_dst
);
4052 case 0x004: /* VIS I edge16cc */
4053 CHECK_FPU_FEATURE(dc
, VIS1
);
4054 cpu_src1
= gen_load_gpr(dc
, rs1
);
4055 cpu_src2
= gen_load_gpr(dc
, rs2
);
4056 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4057 gen_store_gpr(dc
, rd
, cpu_dst
);
4059 case 0x005: /* VIS II edge16n */
4060 CHECK_FPU_FEATURE(dc
, VIS2
);
4061 cpu_src1
= gen_load_gpr(dc
, rs1
);
4062 cpu_src2
= gen_load_gpr(dc
, rs2
);
4063 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4064 gen_store_gpr(dc
, rd
, cpu_dst
);
4066 case 0x006: /* VIS I edge16lcc */
4067 CHECK_FPU_FEATURE(dc
, VIS1
);
4068 cpu_src1
= gen_load_gpr(dc
, rs1
);
4069 cpu_src2
= gen_load_gpr(dc
, rs2
);
4070 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4071 gen_store_gpr(dc
, rd
, cpu_dst
);
4073 case 0x007: /* VIS II edge16ln */
4074 CHECK_FPU_FEATURE(dc
, VIS2
);
4075 cpu_src1
= gen_load_gpr(dc
, rs1
);
4076 cpu_src2
= gen_load_gpr(dc
, rs2
);
4077 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4078 gen_store_gpr(dc
, rd
, cpu_dst
);
4080 case 0x008: /* VIS I edge32cc */
4081 CHECK_FPU_FEATURE(dc
, VIS1
);
4082 cpu_src1
= gen_load_gpr(dc
, rs1
);
4083 cpu_src2
= gen_load_gpr(dc
, rs2
);
4084 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4085 gen_store_gpr(dc
, rd
, cpu_dst
);
4087 case 0x009: /* VIS II edge32n */
4088 CHECK_FPU_FEATURE(dc
, VIS2
);
4089 cpu_src1
= gen_load_gpr(dc
, rs1
);
4090 cpu_src2
= gen_load_gpr(dc
, rs2
);
4091 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4092 gen_store_gpr(dc
, rd
, cpu_dst
);
4094 case 0x00a: /* VIS I edge32lcc */
4095 CHECK_FPU_FEATURE(dc
, VIS1
);
4096 cpu_src1
= gen_load_gpr(dc
, rs1
);
4097 cpu_src2
= gen_load_gpr(dc
, rs2
);
4098 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4099 gen_store_gpr(dc
, rd
, cpu_dst
);
4101 case 0x00b: /* VIS II edge32ln */
4102 CHECK_FPU_FEATURE(dc
, VIS2
);
4103 cpu_src1
= gen_load_gpr(dc
, rs1
);
4104 cpu_src2
= gen_load_gpr(dc
, rs2
);
4105 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4106 gen_store_gpr(dc
, rd
, cpu_dst
);
4108 case 0x010: /* VIS I array8 */
4109 CHECK_FPU_FEATURE(dc
, VIS1
);
4110 cpu_src1
= gen_load_gpr(dc
, rs1
);
4111 cpu_src2
= gen_load_gpr(dc
, rs2
);
4112 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4113 gen_store_gpr(dc
, rd
, cpu_dst
);
4115 case 0x012: /* VIS I array16 */
4116 CHECK_FPU_FEATURE(dc
, VIS1
);
4117 cpu_src1
= gen_load_gpr(dc
, rs1
);
4118 cpu_src2
= gen_load_gpr(dc
, rs2
);
4119 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4120 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4121 gen_store_gpr(dc
, rd
, cpu_dst
);
4123 case 0x014: /* VIS I array32 */
4124 CHECK_FPU_FEATURE(dc
, VIS1
);
4125 cpu_src1
= gen_load_gpr(dc
, rs1
);
4126 cpu_src2
= gen_load_gpr(dc
, rs2
);
4127 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4128 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4129 gen_store_gpr(dc
, rd
, cpu_dst
);
4131 case 0x018: /* VIS I alignaddr */
4132 CHECK_FPU_FEATURE(dc
, VIS1
);
4133 cpu_src1
= gen_load_gpr(dc
, rs1
);
4134 cpu_src2
= gen_load_gpr(dc
, rs2
);
4135 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4136 gen_store_gpr(dc
, rd
, cpu_dst
);
4138 case 0x01a: /* VIS I alignaddrl */
4139 CHECK_FPU_FEATURE(dc
, VIS1
);
4140 cpu_src1
= gen_load_gpr(dc
, rs1
);
4141 cpu_src2
= gen_load_gpr(dc
, rs2
);
4142 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4143 gen_store_gpr(dc
, rd
, cpu_dst
);
4145 case 0x019: /* VIS II bmask */
4146 CHECK_FPU_FEATURE(dc
, VIS2
);
4147 cpu_src1
= gen_load_gpr(dc
, rs1
);
4148 cpu_src2
= gen_load_gpr(dc
, rs2
);
4149 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4150 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4151 gen_store_gpr(dc
, rd
, cpu_dst
);
4153 case 0x020: /* VIS I fcmple16 */
4154 CHECK_FPU_FEATURE(dc
, VIS1
);
4155 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4156 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4157 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4158 gen_store_gpr(dc
, rd
, cpu_dst
);
4160 case 0x022: /* VIS I fcmpne16 */
4161 CHECK_FPU_FEATURE(dc
, VIS1
);
4162 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4163 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4164 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4165 gen_store_gpr(dc
, rd
, cpu_dst
);
4167 case 0x024: /* VIS I fcmple32 */
4168 CHECK_FPU_FEATURE(dc
, VIS1
);
4169 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4170 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4171 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4172 gen_store_gpr(dc
, rd
, cpu_dst
);
4174 case 0x026: /* VIS I fcmpne32 */
4175 CHECK_FPU_FEATURE(dc
, VIS1
);
4176 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4177 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4178 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4179 gen_store_gpr(dc
, rd
, cpu_dst
);
4181 case 0x028: /* VIS I fcmpgt16 */
4182 CHECK_FPU_FEATURE(dc
, VIS1
);
4183 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4184 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4185 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4186 gen_store_gpr(dc
, rd
, cpu_dst
);
4188 case 0x02a: /* VIS I fcmpeq16 */
4189 CHECK_FPU_FEATURE(dc
, VIS1
);
4190 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4191 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4192 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4193 gen_store_gpr(dc
, rd
, cpu_dst
);
4195 case 0x02c: /* VIS I fcmpgt32 */
4196 CHECK_FPU_FEATURE(dc
, VIS1
);
4197 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4198 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4199 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4200 gen_store_gpr(dc
, rd
, cpu_dst
);
4202 case 0x02e: /* VIS I fcmpeq32 */
4203 CHECK_FPU_FEATURE(dc
, VIS1
);
4204 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4205 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4206 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4207 gen_store_gpr(dc
, rd
, cpu_dst
);
4209 case 0x031: /* VIS I fmul8x16 */
4210 CHECK_FPU_FEATURE(dc
, VIS1
);
4211 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4213 case 0x033: /* VIS I fmul8x16au */
4214 CHECK_FPU_FEATURE(dc
, VIS1
);
4215 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4217 case 0x035: /* VIS I fmul8x16al */
4218 CHECK_FPU_FEATURE(dc
, VIS1
);
4219 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4221 case 0x036: /* VIS I fmul8sux16 */
4222 CHECK_FPU_FEATURE(dc
, VIS1
);
4223 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4225 case 0x037: /* VIS I fmul8ulx16 */
4226 CHECK_FPU_FEATURE(dc
, VIS1
);
4227 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4229 case 0x038: /* VIS I fmuld8sux16 */
4230 CHECK_FPU_FEATURE(dc
, VIS1
);
4231 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4233 case 0x039: /* VIS I fmuld8ulx16 */
4234 CHECK_FPU_FEATURE(dc
, VIS1
);
4235 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4237 case 0x03a: /* VIS I fpack32 */
4238 CHECK_FPU_FEATURE(dc
, VIS1
);
4239 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4241 case 0x03b: /* VIS I fpack16 */
4242 CHECK_FPU_FEATURE(dc
, VIS1
);
4243 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4244 cpu_dst_32
= gen_dest_fpr_F(dc
);
4245 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4246 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4248 case 0x03d: /* VIS I fpackfix */
4249 CHECK_FPU_FEATURE(dc
, VIS1
);
4250 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4251 cpu_dst_32
= gen_dest_fpr_F(dc
);
4252 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4253 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4255 case 0x03e: /* VIS I pdist */
4256 CHECK_FPU_FEATURE(dc
, VIS1
);
4257 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4259 case 0x048: /* VIS I faligndata */
4260 CHECK_FPU_FEATURE(dc
, VIS1
);
4261 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4263 case 0x04b: /* VIS I fpmerge */
4264 CHECK_FPU_FEATURE(dc
, VIS1
);
4265 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4267 case 0x04c: /* VIS II bshuffle */
4268 CHECK_FPU_FEATURE(dc
, VIS2
);
4269 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4271 case 0x04d: /* VIS I fexpand */
4272 CHECK_FPU_FEATURE(dc
, VIS1
);
4273 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4275 case 0x050: /* VIS I fpadd16 */
4276 CHECK_FPU_FEATURE(dc
, VIS1
);
4277 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4279 case 0x051: /* VIS I fpadd16s */
4280 CHECK_FPU_FEATURE(dc
, VIS1
);
4281 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4283 case 0x052: /* VIS I fpadd32 */
4284 CHECK_FPU_FEATURE(dc
, VIS1
);
4285 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4287 case 0x053: /* VIS I fpadd32s */
4288 CHECK_FPU_FEATURE(dc
, VIS1
);
4289 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4291 case 0x054: /* VIS I fpsub16 */
4292 CHECK_FPU_FEATURE(dc
, VIS1
);
4293 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4295 case 0x055: /* VIS I fpsub16s */
4296 CHECK_FPU_FEATURE(dc
, VIS1
);
4297 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4299 case 0x056: /* VIS I fpsub32 */
4300 CHECK_FPU_FEATURE(dc
, VIS1
);
4301 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4303 case 0x057: /* VIS I fpsub32s */
4304 CHECK_FPU_FEATURE(dc
, VIS1
);
4305 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4307 case 0x060: /* VIS I fzero */
4308 CHECK_FPU_FEATURE(dc
, VIS1
);
4309 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4310 tcg_gen_movi_i64(cpu_dst_64
, 0);
4311 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4313 case 0x061: /* VIS I fzeros */
4314 CHECK_FPU_FEATURE(dc
, VIS1
);
4315 cpu_dst_32
= gen_dest_fpr_F(dc
);
4316 tcg_gen_movi_i32(cpu_dst_32
, 0);
4317 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4319 case 0x062: /* VIS I fnor */
4320 CHECK_FPU_FEATURE(dc
, VIS1
);
4321 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4323 case 0x063: /* VIS I fnors */
4324 CHECK_FPU_FEATURE(dc
, VIS1
);
4325 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4327 case 0x064: /* VIS I fandnot2 */
4328 CHECK_FPU_FEATURE(dc
, VIS1
);
4329 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4331 case 0x065: /* VIS I fandnot2s */
4332 CHECK_FPU_FEATURE(dc
, VIS1
);
4333 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4335 case 0x066: /* VIS I fnot2 */
4336 CHECK_FPU_FEATURE(dc
, VIS1
);
4337 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4339 case 0x067: /* VIS I fnot2s */
4340 CHECK_FPU_FEATURE(dc
, VIS1
);
4341 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4343 case 0x068: /* VIS I fandnot1 */
4344 CHECK_FPU_FEATURE(dc
, VIS1
);
4345 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4347 case 0x069: /* VIS I fandnot1s */
4348 CHECK_FPU_FEATURE(dc
, VIS1
);
4349 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4351 case 0x06a: /* VIS I fnot1 */
4352 CHECK_FPU_FEATURE(dc
, VIS1
);
4353 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4355 case 0x06b: /* VIS I fnot1s */
4356 CHECK_FPU_FEATURE(dc
, VIS1
);
4357 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4359 case 0x06c: /* VIS I fxor */
4360 CHECK_FPU_FEATURE(dc
, VIS1
);
4361 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4363 case 0x06d: /* VIS I fxors */
4364 CHECK_FPU_FEATURE(dc
, VIS1
);
4365 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4367 case 0x06e: /* VIS I fnand */
4368 CHECK_FPU_FEATURE(dc
, VIS1
);
4369 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4371 case 0x06f: /* VIS I fnands */
4372 CHECK_FPU_FEATURE(dc
, VIS1
);
4373 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4375 case 0x070: /* VIS I fand */
4376 CHECK_FPU_FEATURE(dc
, VIS1
);
4377 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4379 case 0x071: /* VIS I fands */
4380 CHECK_FPU_FEATURE(dc
, VIS1
);
4381 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4383 case 0x072: /* VIS I fxnor */
4384 CHECK_FPU_FEATURE(dc
, VIS1
);
4385 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4387 case 0x073: /* VIS I fxnors */
4388 CHECK_FPU_FEATURE(dc
, VIS1
);
4389 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4391 case 0x074: /* VIS I fsrc1 */
4392 CHECK_FPU_FEATURE(dc
, VIS1
);
4393 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4394 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4396 case 0x075: /* VIS I fsrc1s */
4397 CHECK_FPU_FEATURE(dc
, VIS1
);
4398 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4399 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4401 case 0x076: /* VIS I fornot2 */
4402 CHECK_FPU_FEATURE(dc
, VIS1
);
4403 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4405 case 0x077: /* VIS I fornot2s */
4406 CHECK_FPU_FEATURE(dc
, VIS1
);
4407 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4409 case 0x078: /* VIS I fsrc2 */
4410 CHECK_FPU_FEATURE(dc
, VIS1
);
4411 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4412 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4414 case 0x079: /* VIS I fsrc2s */
4415 CHECK_FPU_FEATURE(dc
, VIS1
);
4416 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4417 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4419 case 0x07a: /* VIS I fornot1 */
4420 CHECK_FPU_FEATURE(dc
, VIS1
);
4421 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4423 case 0x07b: /* VIS I fornot1s */
4424 CHECK_FPU_FEATURE(dc
, VIS1
);
4425 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4427 case 0x07c: /* VIS I for */
4428 CHECK_FPU_FEATURE(dc
, VIS1
);
4429 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4431 case 0x07d: /* VIS I fors */
4432 CHECK_FPU_FEATURE(dc
, VIS1
);
4433 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4435 case 0x07e: /* VIS I fone */
4436 CHECK_FPU_FEATURE(dc
, VIS1
);
4437 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4438 tcg_gen_movi_i64(cpu_dst_64
, -1);
4439 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4441 case 0x07f: /* VIS I fones */
4442 CHECK_FPU_FEATURE(dc
, VIS1
);
4443 cpu_dst_32
= gen_dest_fpr_F(dc
);
4444 tcg_gen_movi_i32(cpu_dst_32
, -1);
4445 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4447 case 0x080: /* VIS I shutdown */
4448 case 0x081: /* VIS II siam */
4457 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
4458 #ifdef TARGET_SPARC64
4463 #ifdef TARGET_SPARC64
4464 } else if (xop
== 0x39) { /* V9 return */
4468 cpu_src1
= get_src1(dc
, insn
);
4469 cpu_tmp0
= get_temp_tl(dc
);
4470 if (IS_IMM
) { /* immediate */
4471 simm
= GET_FIELDs(insn
, 19, 31);
4472 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4473 } else { /* register */
4474 rs2
= GET_FIELD(insn
, 27, 31);
4476 cpu_src2
= gen_load_gpr(dc
, rs2
);
4477 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4479 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4482 gen_helper_restore(cpu_env
);
4484 r_const
= tcg_const_i32(3);
4485 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4486 tcg_temp_free_i32(r_const
);
4487 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4488 dc
->npc
= DYNAMIC_PC
;
4492 cpu_src1
= get_src1(dc
, insn
);
4493 cpu_tmp0
= get_temp_tl(dc
);
4494 if (IS_IMM
) { /* immediate */
4495 simm
= GET_FIELDs(insn
, 19, 31);
4496 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4497 } else { /* register */
4498 rs2
= GET_FIELD(insn
, 27, 31);
4500 cpu_src2
= gen_load_gpr(dc
, rs2
);
4501 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4503 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4507 case 0x38: /* jmpl */
4512 t
= gen_dest_gpr(dc
, rd
);
4513 tcg_gen_movi_tl(t
, dc
->pc
);
4514 gen_store_gpr(dc
, rd
, t
);
4516 r_const
= tcg_const_i32(3);
4517 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4518 tcg_temp_free_i32(r_const
);
4519 gen_address_mask(dc
, cpu_tmp0
);
4520 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4521 dc
->npc
= DYNAMIC_PC
;
4524 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4525 case 0x39: /* rett, V9 return */
4529 if (!supervisor(dc
))
4532 r_const
= tcg_const_i32(3);
4533 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4534 tcg_temp_free_i32(r_const
);
4535 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4536 dc
->npc
= DYNAMIC_PC
;
4537 gen_helper_rett(cpu_env
);
4541 case 0x3b: /* flush */
4542 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
4546 case 0x3c: /* save */
4548 gen_helper_save(cpu_env
);
4549 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4551 case 0x3d: /* restore */
4553 gen_helper_restore(cpu_env
);
4554 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4556 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4557 case 0x3e: /* V9 done/retry */
4561 if (!supervisor(dc
))
4563 dc
->npc
= DYNAMIC_PC
;
4564 dc
->pc
= DYNAMIC_PC
;
4565 gen_helper_done(cpu_env
);
4568 if (!supervisor(dc
))
4570 dc
->npc
= DYNAMIC_PC
;
4571 dc
->pc
= DYNAMIC_PC
;
4572 gen_helper_retry(cpu_env
);
4587 case 3: /* load/store instructions */
4589 unsigned int xop
= GET_FIELD(insn
, 7, 12);
4590 /* ??? gen_address_mask prevents us from using a source
4591 register directly. Always generate a temporary. */
4592 TCGv cpu_addr
= get_temp_tl(dc
);
4594 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
4595 if (xop
== 0x3c || xop
== 0x3e) {
4596 /* V9 casa/casxa : no offset */
4597 } else if (IS_IMM
) { /* immediate */
4598 simm
= GET_FIELDs(insn
, 19, 31);
4600 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
4602 } else { /* register */
4603 rs2
= GET_FIELD(insn
, 27, 31);
4605 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
4608 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
4609 (xop
> 0x17 && xop
<= 0x1d ) ||
4610 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
4611 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
4614 case 0x0: /* ld, V9 lduw, load unsigned word */
4615 gen_address_mask(dc
, cpu_addr
);
4616 tcg_gen_qemu_ld32u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4618 case 0x1: /* ldub, load unsigned byte */
4619 gen_address_mask(dc
, cpu_addr
);
4620 tcg_gen_qemu_ld8u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4622 case 0x2: /* lduh, load unsigned halfword */
4623 gen_address_mask(dc
, cpu_addr
);
4624 tcg_gen_qemu_ld16u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4626 case 0x3: /* ldd, load double word */
4634 r_const
= tcg_const_i32(7);
4635 /* XXX remove alignment check */
4636 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4637 tcg_temp_free_i32(r_const
);
4638 gen_address_mask(dc
, cpu_addr
);
4639 t64
= tcg_temp_new_i64();
4640 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4641 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4642 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4643 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
4644 tcg_gen_shri_i64(t64
, t64
, 32);
4645 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4646 tcg_temp_free_i64(t64
);
4647 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4650 case 0x9: /* ldsb, load signed byte */
4651 gen_address_mask(dc
, cpu_addr
);
4652 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4654 case 0xa: /* ldsh, load signed halfword */
4655 gen_address_mask(dc
, cpu_addr
);
4656 tcg_gen_qemu_ld16s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4658 case 0xd: /* ldstub -- XXX: should be atomically */
4662 gen_address_mask(dc
, cpu_addr
);
4663 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4664 r_const
= tcg_const_tl(0xff);
4665 tcg_gen_qemu_st8(r_const
, cpu_addr
, dc
->mem_idx
);
4666 tcg_temp_free(r_const
);
4670 /* swap, swap register with memory. Also atomically */
4672 TCGv t0
= get_temp_tl(dc
);
4673 CHECK_IU_FEATURE(dc
, SWAP
);
4674 cpu_src1
= gen_load_gpr(dc
, rd
);
4675 gen_address_mask(dc
, cpu_addr
);
4676 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4677 tcg_gen_qemu_st32(cpu_src1
, cpu_addr
, dc
->mem_idx
);
4678 tcg_gen_mov_tl(cpu_val
, t0
);
4681 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4682 case 0x10: /* lda, V9 lduwa, load word alternate */
4683 #ifndef TARGET_SPARC64
4686 if (!supervisor(dc
))
4690 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 0);
4692 case 0x11: /* lduba, load unsigned byte alternate */
4693 #ifndef TARGET_SPARC64
4696 if (!supervisor(dc
))
4700 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 0);
4702 case 0x12: /* lduha, load unsigned halfword alternate */
4703 #ifndef TARGET_SPARC64
4706 if (!supervisor(dc
))
4710 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 0);
4712 case 0x13: /* ldda, load double word alternate */
4713 #ifndef TARGET_SPARC64
4716 if (!supervisor(dc
))
4722 gen_ldda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4724 case 0x19: /* ldsba, load signed byte alternate */
4725 #ifndef TARGET_SPARC64
4728 if (!supervisor(dc
))
4732 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 1);
4734 case 0x1a: /* ldsha, load signed halfword alternate */
4735 #ifndef TARGET_SPARC64
4738 if (!supervisor(dc
))
4742 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 1);
4744 case 0x1d: /* ldstuba -- XXX: should be atomically */
4745 #ifndef TARGET_SPARC64
4748 if (!supervisor(dc
))
4752 gen_ldstub_asi(cpu_val
, cpu_addr
, insn
);
4754 case 0x1f: /* swapa, swap reg with alt. memory. Also
4756 CHECK_IU_FEATURE(dc
, SWAP
);
4757 #ifndef TARGET_SPARC64
4760 if (!supervisor(dc
))
4764 cpu_src1
= gen_load_gpr(dc
, rd
);
4765 gen_swap_asi(cpu_val
, cpu_src1
, cpu_addr
, insn
);
4768 #ifndef TARGET_SPARC64
4769 case 0x30: /* ldc */
4770 case 0x31: /* ldcsr */
4771 case 0x33: /* lddc */
4775 #ifdef TARGET_SPARC64
4776 case 0x08: /* V9 ldsw */
4777 gen_address_mask(dc
, cpu_addr
);
4778 tcg_gen_qemu_ld32s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4780 case 0x0b: /* V9 ldx */
4781 gen_address_mask(dc
, cpu_addr
);
4782 tcg_gen_qemu_ld64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4784 case 0x18: /* V9 ldswa */
4786 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 1);
4788 case 0x1b: /* V9 ldxa */
4790 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 8, 0);
4792 case 0x2d: /* V9 prefetch, no effect */
4794 case 0x30: /* V9 ldfa */
4795 if (gen_trap_ifnofpu(dc
)) {
4799 gen_ldf_asi(cpu_addr
, insn
, 4, rd
);
4800 gen_update_fprs_dirty(rd
);
4802 case 0x33: /* V9 lddfa */
4803 if (gen_trap_ifnofpu(dc
)) {
4807 gen_ldf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
4808 gen_update_fprs_dirty(DFPREG(rd
));
4810 case 0x3d: /* V9 prefetcha, no effect */
4812 case 0x32: /* V9 ldqfa */
4813 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4814 if (gen_trap_ifnofpu(dc
)) {
4818 gen_ldf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
4819 gen_update_fprs_dirty(QFPREG(rd
));
4825 gen_store_gpr(dc
, rd
, cpu_val
);
4826 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4829 } else if (xop
>= 0x20 && xop
< 0x24) {
4832 if (gen_trap_ifnofpu(dc
)) {
4837 case 0x20: /* ldf, load fpreg */
4838 gen_address_mask(dc
, cpu_addr
);
4839 t0
= get_temp_tl(dc
);
4840 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4841 cpu_dst_32
= gen_dest_fpr_F(dc
);
4842 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4843 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4845 case 0x21: /* ldfsr, V9 ldxfsr */
4846 #ifdef TARGET_SPARC64
4847 gen_address_mask(dc
, cpu_addr
);
4849 TCGv_i64 t64
= tcg_temp_new_i64();
4850 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4851 gen_helper_ldxfsr(cpu_env
, t64
);
4852 tcg_temp_free_i64(t64
);
4856 cpu_dst_32
= get_temp_i32(dc
);
4857 t0
= get_temp_tl(dc
);
4858 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4859 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4860 gen_helper_ldfsr(cpu_env
, cpu_dst_32
);
4862 case 0x22: /* ldqf, load quad fpreg */
4866 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4867 r_const
= tcg_const_i32(dc
->mem_idx
);
4868 gen_address_mask(dc
, cpu_addr
);
4869 gen_helper_ldqf(cpu_env
, cpu_addr
, r_const
);
4870 tcg_temp_free_i32(r_const
);
4871 gen_op_store_QT0_fpr(QFPREG(rd
));
4872 gen_update_fprs_dirty(QFPREG(rd
));
4875 case 0x23: /* lddf, load double fpreg */
4876 gen_address_mask(dc
, cpu_addr
);
4877 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4878 tcg_gen_qemu_ld64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
);
4879 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4884 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
4885 xop
== 0xe || xop
== 0x1e) {
4886 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
4889 case 0x4: /* st, store word */
4890 gen_address_mask(dc
, cpu_addr
);
4891 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
4893 case 0x5: /* stb, store byte */
4894 gen_address_mask(dc
, cpu_addr
);
4895 tcg_gen_qemu_st8(cpu_val
, cpu_addr
, dc
->mem_idx
);
4897 case 0x6: /* sth, store halfword */
4898 gen_address_mask(dc
, cpu_addr
);
4899 tcg_gen_qemu_st16(cpu_val
, cpu_addr
, dc
->mem_idx
);
4901 case 0x7: /* std, store double word */
4910 gen_address_mask(dc
, cpu_addr
);
4911 r_const
= tcg_const_i32(7);
4912 /* XXX remove alignment check */
4913 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4914 tcg_temp_free_i32(r_const
);
4915 lo
= gen_load_gpr(dc
, rd
+ 1);
4917 t64
= tcg_temp_new_i64();
4918 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
4919 tcg_gen_qemu_st64(t64
, cpu_addr
, dc
->mem_idx
);
4920 tcg_temp_free_i64(t64
);
4923 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4924 case 0x14: /* sta, V9 stwa, store word alternate */
4925 #ifndef TARGET_SPARC64
4928 if (!supervisor(dc
))
4932 gen_st_asi(cpu_val
, cpu_addr
, insn
, 4);
4933 dc
->npc
= DYNAMIC_PC
;
4935 case 0x15: /* stba, store byte alternate */
4936 #ifndef TARGET_SPARC64
4939 if (!supervisor(dc
))
4943 gen_st_asi(cpu_val
, cpu_addr
, insn
, 1);
4944 dc
->npc
= DYNAMIC_PC
;
4946 case 0x16: /* stha, store halfword alternate */
4947 #ifndef TARGET_SPARC64
4950 if (!supervisor(dc
))
4954 gen_st_asi(cpu_val
, cpu_addr
, insn
, 2);
4955 dc
->npc
= DYNAMIC_PC
;
4957 case 0x17: /* stda, store double word alternate */
4958 #ifndef TARGET_SPARC64
4961 if (!supervisor(dc
))
4968 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4972 #ifdef TARGET_SPARC64
4973 case 0x0e: /* V9 stx */
4974 gen_address_mask(dc
, cpu_addr
);
4975 tcg_gen_qemu_st64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4977 case 0x1e: /* V9 stxa */
4979 gen_st_asi(cpu_val
, cpu_addr
, insn
, 8);
4980 dc
->npc
= DYNAMIC_PC
;
4986 } else if (xop
> 0x23 && xop
< 0x28) {
4987 if (gen_trap_ifnofpu(dc
)) {
4992 case 0x24: /* stf, store fpreg */
4994 TCGv t
= get_temp_tl(dc
);
4995 gen_address_mask(dc
, cpu_addr
);
4996 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
4997 tcg_gen_ext_i32_tl(t
, cpu_src1_32
);
4998 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5001 case 0x25: /* stfsr, V9 stxfsr */
5003 TCGv t
= get_temp_tl(dc
);
5005 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUSPARCState
, fsr
));
5006 #ifdef TARGET_SPARC64
5007 gen_address_mask(dc
, cpu_addr
);
5009 tcg_gen_qemu_st64(t
, cpu_addr
, dc
->mem_idx
);
5013 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5017 #ifdef TARGET_SPARC64
5018 /* V9 stqf, store quad fpreg */
5022 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5023 gen_op_load_fpr_QT0(QFPREG(rd
));
5024 r_const
= tcg_const_i32(dc
->mem_idx
);
5025 gen_address_mask(dc
, cpu_addr
);
5026 gen_helper_stqf(cpu_env
, cpu_addr
, r_const
);
5027 tcg_temp_free_i32(r_const
);
5030 #else /* !TARGET_SPARC64 */
5031 /* stdfq, store floating point queue */
5032 #if defined(CONFIG_USER_ONLY)
5035 if (!supervisor(dc
))
5037 if (gen_trap_ifnofpu(dc
)) {
5043 case 0x27: /* stdf, store double fpreg */
5044 gen_address_mask(dc
, cpu_addr
);
5045 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5046 tcg_gen_qemu_st64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
);
5051 } else if (xop
> 0x33 && xop
< 0x3f) {
5054 #ifdef TARGET_SPARC64
5055 case 0x34: /* V9 stfa */
5056 if (gen_trap_ifnofpu(dc
)) {
5059 gen_stf_asi(cpu_addr
, insn
, 4, rd
);
5061 case 0x36: /* V9 stqfa */
5065 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5066 if (gen_trap_ifnofpu(dc
)) {
5069 r_const
= tcg_const_i32(7);
5070 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
5071 tcg_temp_free_i32(r_const
);
5072 gen_stf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
5075 case 0x37: /* V9 stdfa */
5076 if (gen_trap_ifnofpu(dc
)) {
5079 gen_stf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
5081 case 0x3e: /* V9 casxa */
5082 rs2
= GET_FIELD(insn
, 27, 31);
5083 cpu_src2
= gen_load_gpr(dc
, rs2
);
5084 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5087 case 0x34: /* stc */
5088 case 0x35: /* stcsr */
5089 case 0x36: /* stdcq */
5090 case 0x37: /* stdc */
5093 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5094 case 0x3c: /* V9 or LEON3 casa */
5095 #ifndef TARGET_SPARC64
5096 CHECK_IU_FEATURE(dc
, CASA
);
5100 /* LEON3 allows CASA from user space with ASI 0xa */
5101 if ((GET_FIELD(insn
, 19, 26) != 0xa) && !supervisor(dc
)) {
5105 rs2
= GET_FIELD(insn
, 27, 31);
5106 cpu_src2
= gen_load_gpr(dc
, rs2
);
5107 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5119 /* default case for non jump instructions */
5120 if (dc
->npc
== DYNAMIC_PC
) {
5121 dc
->pc
= DYNAMIC_PC
;
5123 } else if (dc
->npc
== JUMP_PC
) {
5124 /* we can do a static jump */
5125 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5129 dc
->npc
= dc
->npc
+ 4;
5138 r_const
= tcg_const_i32(TT_ILL_INSN
);
5139 gen_helper_raise_exception(cpu_env
, r_const
);
5140 tcg_temp_free_i32(r_const
);
5149 r_const
= tcg_const_i32(TT_UNIMP_FLUSH
);
5150 gen_helper_raise_exception(cpu_env
, r_const
);
5151 tcg_temp_free_i32(r_const
);
5155 #if !defined(CONFIG_USER_ONLY)
5161 r_const
= tcg_const_i32(TT_PRIV_INSN
);
5162 gen_helper_raise_exception(cpu_env
, r_const
);
5163 tcg_temp_free_i32(r_const
);
5170 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP
);
5173 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5176 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR
);
5180 #ifndef TARGET_SPARC64
5186 r_const
= tcg_const_i32(TT_NCP_INSN
);
5187 gen_helper_raise_exception(cpu_env
, r_const
);
5188 tcg_temp_free(r_const
);
5194 if (dc
->n_t32
!= 0) {
5196 for (i
= dc
->n_t32
- 1; i
>= 0; --i
) {
5197 tcg_temp_free_i32(dc
->t32
[i
]);
5201 if (dc
->n_ttl
!= 0) {
5203 for (i
= dc
->n_ttl
- 1; i
>= 0; --i
) {
5204 tcg_temp_free(dc
->ttl
[i
]);
5210 void gen_intermediate_code(CPUSPARCState
* env
, TranslationBlock
* tb
)
5212 SPARCCPU
*cpu
= sparc_env_get_cpu(env
);
5213 CPUState
*cs
= CPU(cpu
);
5214 target_ulong pc_start
, last_pc
;
5215 DisasContext dc1
, *dc
= &dc1
;
5220 memset(dc
, 0, sizeof(DisasContext
));
5225 dc
->npc
= (target_ulong
) tb
->cs_base
;
5226 dc
->cc_op
= CC_OP_DYNAMIC
;
5227 dc
->mem_idx
= cpu_mmu_index(env
, false);
5229 dc
->fpu_enabled
= tb_fpu_enabled(tb
->flags
);
5230 dc
->address_mask_32bit
= tb_am_enabled(tb
->flags
);
5231 dc
->singlestep
= (cs
->singlestep_enabled
|| singlestep
);
5234 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5235 if (max_insns
== 0) {
5236 max_insns
= CF_COUNT_MASK
;
5238 if (max_insns
> TCG_MAX_INSNS
) {
5239 max_insns
= TCG_MAX_INSNS
;
5244 if (dc
->npc
& JUMP_PC
) {
5245 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5246 tcg_gen_insn_start(dc
->pc
, dc
->jump_pc
[0] | JUMP_PC
);
5248 tcg_gen_insn_start(dc
->pc
, dc
->npc
);
5253 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
5254 if (dc
->pc
!= pc_start
) {
5257 gen_helper_debug(cpu_env
);
5263 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5267 insn
= cpu_ldl_code(env
, dc
->pc
);
5269 disas_sparc_insn(dc
, insn
);
5273 /* if the next PC is different, we abort now */
5274 if (dc
->pc
!= (last_pc
+ 4))
5276 /* if we reach a page boundary, we stop generation so that the
5277 PC of a TT_TFAULT exception is always in the right page */
5278 if ((dc
->pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
5280 /* if single step mode, we generate only one instruction and
5281 generate an exception */
5282 if (dc
->singlestep
) {
5285 } while (!tcg_op_buf_full() &&
5286 (dc
->pc
- pc_start
) < (TARGET_PAGE_SIZE
- 32) &&
5287 num_insns
< max_insns
);
5290 if (tb
->cflags
& CF_LAST_IO
) {
5294 if (dc
->pc
!= DYNAMIC_PC
&&
5295 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
5296 /* static PC and NPC: we can use direct chaining */
5297 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5299 if (dc
->pc
!= DYNAMIC_PC
) {
5300 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5306 gen_tb_end(tb
, num_insns
);
5308 tb
->size
= last_pc
+ 4 - pc_start
;
5309 tb
->icount
= num_insns
;
5312 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5313 qemu_log("--------------\n");
5314 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5315 log_target_disas(cs
, pc_start
, last_pc
+ 4 - pc_start
, 0);
5321 void gen_intermediate_code_init(CPUSPARCState
*env
)
5325 static const char * const gregnames
[8] = {
5326 NULL
, // g0 not used
5335 static const char * const fregnames
[32] = {
5336 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5337 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5338 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5339 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5342 /* init various static tables */
5346 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
5347 cpu_regwptr
= tcg_global_mem_new_ptr(TCG_AREG0
,
5348 offsetof(CPUSPARCState
, regwptr
),
5350 #ifdef TARGET_SPARC64
5351 cpu_xcc
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, xcc
),
5353 cpu_asi
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, asi
),
5355 cpu_fprs
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, fprs
),
5357 cpu_gsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, gsr
),
5359 cpu_tick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5360 offsetof(CPUSPARCState
, tick_cmpr
),
5362 cpu_stick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5363 offsetof(CPUSPARCState
, stick_cmpr
),
5365 cpu_hstick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5366 offsetof(CPUSPARCState
, hstick_cmpr
),
5368 cpu_hintp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hintp
),
5370 cpu_htba
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, htba
),
5372 cpu_hver
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hver
),
5374 cpu_ssr
= tcg_global_mem_new(TCG_AREG0
,
5375 offsetof(CPUSPARCState
, ssr
), "ssr");
5376 cpu_ver
= tcg_global_mem_new(TCG_AREG0
,
5377 offsetof(CPUSPARCState
, version
), "ver");
5378 cpu_softint
= tcg_global_mem_new_i32(TCG_AREG0
,
5379 offsetof(CPUSPARCState
, softint
),
5382 cpu_wim
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, wim
),
5385 cpu_cond
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cond
),
5387 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_src
),
5389 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
,
5390 offsetof(CPUSPARCState
, cc_src2
),
5392 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_dst
),
5394 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, cc_op
),
5396 cpu_psr
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, psr
),
5398 cpu_fsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, fsr
),
5400 cpu_pc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, pc
),
5402 cpu_npc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, npc
),
5404 cpu_y
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, y
), "y");
5405 #ifndef CONFIG_USER_ONLY
5406 cpu_tbr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, tbr
),
5409 for (i
= 1; i
< 8; i
++) {
5410 cpu_gregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
5411 offsetof(CPUSPARCState
, gregs
[i
]),
5414 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5415 cpu_fpr
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
5416 offsetof(CPUSPARCState
, fpr
[i
]),
5422 void restore_state_to_opc(CPUSPARCState
*env
, TranslationBlock
*tb
,
5425 target_ulong pc
= data
[0];
5426 target_ulong npc
= data
[1];
5429 if (npc
== DYNAMIC_PC
) {
5430 /* dynamic NPC: already stored */
5431 } else if (npc
& JUMP_PC
) {
5432 /* jump PC: use 'cond' and the jump targets of the translation */
5434 env
->npc
= npc
& ~3;