4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
41 /* global register indexes */
42 static TCGv_ptr cpu_env
, cpu_regwptr
;
43 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
44 static TCGv_i32 cpu_cc_op
;
45 static TCGv_i32 cpu_psr
;
46 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
47 static TCGv cpu_regs
[32];
49 #ifndef CONFIG_USER_ONLY
54 static TCGv_i32 cpu_xcc
, cpu_asi
, cpu_fprs
;
56 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
57 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
58 static TCGv_i32 cpu_softint
;
62 /* Floating point registers */
63 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
65 #include "exec/gen-icount.h"
67 typedef struct DisasContext
{
68 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
69 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
70 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
74 int address_mask_32bit
;
76 uint32_t cc_op
; /* current CC operation */
77 struct TranslationBlock
*tb
;
92 // This function uses non-native bit order
93 #define GET_FIELD(X, FROM, TO) \
94 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
96 // This function uses the order in the manuals, i.e. bit 0 is 2^0
97 #define GET_FIELD_SP(X, FROM, TO) \
98 GET_FIELD(X, 31 - (TO), 31 - (FROM))
100 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
101 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
103 #ifdef TARGET_SPARC64
104 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
105 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
107 #define DFPREG(r) (r & 0x1e)
108 #define QFPREG(r) (r & 0x1c)
111 #define UA2005_HTRAP_MASK 0xff
112 #define V8_TRAP_MASK 0x7f
114 static int sign_extend(int x
, int len
)
117 return (x
<< len
) >> len
;
120 #define IS_IMM (insn & (1<<13))
122 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
125 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
126 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
130 static inline TCGv
get_temp_tl(DisasContext
*dc
)
133 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
134 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
138 static inline void gen_update_fprs_dirty(int rd
)
140 #if defined(TARGET_SPARC64)
141 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, (rd
< 32) ? 1 : 2);
145 /* floating point registers moves */
146 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
148 #if TCG_TARGET_REG_BITS == 32
150 return TCGV_LOW(cpu_fpr
[src
/ 2]);
152 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
156 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr
[src
/ 2]));
158 TCGv_i32 ret
= get_temp_i32(dc
);
159 TCGv_i64 t
= tcg_temp_new_i64();
161 tcg_gen_shri_i64(t
, cpu_fpr
[src
/ 2], 32);
162 tcg_gen_extrl_i64_i32(ret
, t
);
163 tcg_temp_free_i64(t
);
170 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
172 #if TCG_TARGET_REG_BITS == 32
174 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
176 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
179 TCGv_i64 t
= MAKE_TCGV_I64(GET_TCGV_I32(v
));
180 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
181 (dst
& 1 ? 0 : 32), 32);
183 gen_update_fprs_dirty(dst
);
186 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
188 return get_temp_i32(dc
);
191 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
194 return cpu_fpr
[src
/ 2];
197 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
200 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
201 gen_update_fprs_dirty(dst
);
204 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
206 return cpu_fpr
[DFPREG(dst
) / 2];
209 static void gen_op_load_fpr_QT0(unsigned int src
)
211 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
212 offsetof(CPU_QuadU
, ll
.upper
));
213 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
214 offsetof(CPU_QuadU
, ll
.lower
));
217 static void gen_op_load_fpr_QT1(unsigned int src
)
219 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
220 offsetof(CPU_QuadU
, ll
.upper
));
221 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
222 offsetof(CPU_QuadU
, ll
.lower
));
225 static void gen_op_store_QT0_fpr(unsigned int dst
)
227 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
228 offsetof(CPU_QuadU
, ll
.upper
));
229 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
230 offsetof(CPU_QuadU
, ll
.lower
));
233 #ifdef TARGET_SPARC64
234 static void gen_move_Q(unsigned int rd
, unsigned int rs
)
239 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
240 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
241 gen_update_fprs_dirty(rd
);
246 #ifdef CONFIG_USER_ONLY
247 #define supervisor(dc) 0
248 #ifdef TARGET_SPARC64
249 #define hypervisor(dc) 0
252 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
253 #ifdef TARGET_SPARC64
254 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
259 #ifdef TARGET_SPARC64
261 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
263 #define AM_CHECK(dc) (1)
267 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
269 #ifdef TARGET_SPARC64
271 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
275 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
279 return cpu_regs
[reg
];
281 TCGv t
= get_temp_tl(dc
);
282 tcg_gen_movi_tl(t
, 0);
287 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
291 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
295 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
299 return cpu_regs
[reg
];
301 return get_temp_tl(dc
);
305 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
,
306 target_ulong pc
, target_ulong npc
)
308 TranslationBlock
*tb
;
311 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
312 (npc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
314 /* jump to same page: we can use a direct jump */
315 tcg_gen_goto_tb(tb_num
);
316 tcg_gen_movi_tl(cpu_pc
, pc
);
317 tcg_gen_movi_tl(cpu_npc
, npc
);
318 tcg_gen_exit_tb((uintptr_t)tb
+ tb_num
);
320 /* jump to another page: currently not optimized */
321 tcg_gen_movi_tl(cpu_pc
, pc
);
322 tcg_gen_movi_tl(cpu_npc
, npc
);
328 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
330 tcg_gen_extu_i32_tl(reg
, src
);
331 tcg_gen_shri_tl(reg
, reg
, PSR_NEG_SHIFT
);
332 tcg_gen_andi_tl(reg
, reg
, 0x1);
335 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
337 tcg_gen_extu_i32_tl(reg
, src
);
338 tcg_gen_shri_tl(reg
, reg
, PSR_ZERO_SHIFT
);
339 tcg_gen_andi_tl(reg
, reg
, 0x1);
342 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
344 tcg_gen_extu_i32_tl(reg
, src
);
345 tcg_gen_shri_tl(reg
, reg
, PSR_OVF_SHIFT
);
346 tcg_gen_andi_tl(reg
, reg
, 0x1);
349 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
351 tcg_gen_extu_i32_tl(reg
, src
);
352 tcg_gen_shri_tl(reg
, reg
, PSR_CARRY_SHIFT
);
353 tcg_gen_andi_tl(reg
, reg
, 0x1);
356 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
358 tcg_gen_mov_tl(cpu_cc_src
, src1
);
359 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
360 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
361 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
364 static TCGv_i32
gen_add32_carry32(void)
366 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
368 /* Carry is computed from a previous add: (dst < src) */
369 #if TARGET_LONG_BITS == 64
370 cc_src1_32
= tcg_temp_new_i32();
371 cc_src2_32
= tcg_temp_new_i32();
372 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
373 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
375 cc_src1_32
= cpu_cc_dst
;
376 cc_src2_32
= cpu_cc_src
;
379 carry_32
= tcg_temp_new_i32();
380 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
382 #if TARGET_LONG_BITS == 64
383 tcg_temp_free_i32(cc_src1_32
);
384 tcg_temp_free_i32(cc_src2_32
);
390 static TCGv_i32
gen_sub32_carry32(void)
392 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
394 /* Carry is computed from a previous borrow: (src1 < src2) */
395 #if TARGET_LONG_BITS == 64
396 cc_src1_32
= tcg_temp_new_i32();
397 cc_src2_32
= tcg_temp_new_i32();
398 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
399 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
401 cc_src1_32
= cpu_cc_src
;
402 cc_src2_32
= cpu_cc_src2
;
405 carry_32
= tcg_temp_new_i32();
406 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
408 #if TARGET_LONG_BITS == 64
409 tcg_temp_free_i32(cc_src1_32
);
410 tcg_temp_free_i32(cc_src2_32
);
416 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
417 TCGv src2
, int update_cc
)
425 /* Carry is known to be zero. Fall back to plain ADD. */
427 gen_op_add_cc(dst
, src1
, src2
);
429 tcg_gen_add_tl(dst
, src1
, src2
);
436 if (TARGET_LONG_BITS
== 32) {
437 /* We can re-use the host's hardware carry generation by using
438 an ADD2 opcode. We discard the low part of the output.
439 Ideally we'd combine this operation with the add that
440 generated the carry in the first place. */
441 carry
= tcg_temp_new();
442 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
443 tcg_temp_free(carry
);
446 carry_32
= gen_add32_carry32();
452 carry_32
= gen_sub32_carry32();
456 /* We need external help to produce the carry. */
457 carry_32
= tcg_temp_new_i32();
458 gen_helper_compute_C_icc(carry_32
, cpu_env
);
462 #if TARGET_LONG_BITS == 64
463 carry
= tcg_temp_new();
464 tcg_gen_extu_i32_i64(carry
, carry_32
);
469 tcg_gen_add_tl(dst
, src1
, src2
);
470 tcg_gen_add_tl(dst
, dst
, carry
);
472 tcg_temp_free_i32(carry_32
);
473 #if TARGET_LONG_BITS == 64
474 tcg_temp_free(carry
);
479 tcg_gen_mov_tl(cpu_cc_src
, src1
);
480 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
481 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
482 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
483 dc
->cc_op
= CC_OP_ADDX
;
487 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
489 tcg_gen_mov_tl(cpu_cc_src
, src1
);
490 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
491 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
492 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
495 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
496 TCGv src2
, int update_cc
)
504 /* Carry is known to be zero. Fall back to plain SUB. */
506 gen_op_sub_cc(dst
, src1
, src2
);
508 tcg_gen_sub_tl(dst
, src1
, src2
);
515 carry_32
= gen_add32_carry32();
521 if (TARGET_LONG_BITS
== 32) {
522 /* We can re-use the host's hardware carry generation by using
523 a SUB2 opcode. We discard the low part of the output.
524 Ideally we'd combine this operation with the add that
525 generated the carry in the first place. */
526 carry
= tcg_temp_new();
527 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
528 tcg_temp_free(carry
);
531 carry_32
= gen_sub32_carry32();
535 /* We need external help to produce the carry. */
536 carry_32
= tcg_temp_new_i32();
537 gen_helper_compute_C_icc(carry_32
, cpu_env
);
541 #if TARGET_LONG_BITS == 64
542 carry
= tcg_temp_new();
543 tcg_gen_extu_i32_i64(carry
, carry_32
);
548 tcg_gen_sub_tl(dst
, src1
, src2
);
549 tcg_gen_sub_tl(dst
, dst
, carry
);
551 tcg_temp_free_i32(carry_32
);
552 #if TARGET_LONG_BITS == 64
553 tcg_temp_free(carry
);
558 tcg_gen_mov_tl(cpu_cc_src
, src1
);
559 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
560 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
561 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
562 dc
->cc_op
= CC_OP_SUBX
;
566 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
568 TCGv r_temp
, zero
, t0
;
570 r_temp
= tcg_temp_new();
577 zero
= tcg_const_tl(0);
578 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
579 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
580 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
581 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
586 // env->y = (b2 << 31) | (env->y >> 1);
587 tcg_gen_andi_tl(r_temp
, cpu_cc_src
, 0x1);
588 tcg_gen_shli_tl(r_temp
, r_temp
, 31);
589 tcg_gen_shri_tl(t0
, cpu_y
, 1);
590 tcg_gen_andi_tl(t0
, t0
, 0x7fffffff);
591 tcg_gen_or_tl(t0
, t0
, r_temp
);
592 tcg_gen_andi_tl(cpu_y
, t0
, 0xffffffff);
595 gen_mov_reg_N(t0
, cpu_psr
);
596 gen_mov_reg_V(r_temp
, cpu_psr
);
597 tcg_gen_xor_tl(t0
, t0
, r_temp
);
598 tcg_temp_free(r_temp
);
600 // T0 = (b1 << 31) | (T0 >> 1);
602 tcg_gen_shli_tl(t0
, t0
, 31);
603 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
604 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
607 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
609 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
612 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
614 #if TARGET_LONG_BITS == 32
616 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
618 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
621 TCGv t0
= tcg_temp_new_i64();
622 TCGv t1
= tcg_temp_new_i64();
625 tcg_gen_ext32s_i64(t0
, src1
);
626 tcg_gen_ext32s_i64(t1
, src2
);
628 tcg_gen_ext32u_i64(t0
, src1
);
629 tcg_gen_ext32u_i64(t1
, src2
);
632 tcg_gen_mul_i64(dst
, t0
, t1
);
636 tcg_gen_shri_i64(cpu_y
, dst
, 32);
640 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
642 /* zero-extend truncated operands before multiplication */
643 gen_op_multiply(dst
, src1
, src2
, 0);
646 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
648 /* sign-extend truncated operands before multiplication */
649 gen_op_multiply(dst
, src1
, src2
, 1);
653 static inline void gen_op_eval_ba(TCGv dst
)
655 tcg_gen_movi_tl(dst
, 1);
659 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
661 gen_mov_reg_Z(dst
, src
);
665 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
667 TCGv t0
= tcg_temp_new();
668 gen_mov_reg_N(t0
, src
);
669 gen_mov_reg_V(dst
, src
);
670 tcg_gen_xor_tl(dst
, dst
, t0
);
671 gen_mov_reg_Z(t0
, src
);
672 tcg_gen_or_tl(dst
, dst
, t0
);
677 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
679 TCGv t0
= tcg_temp_new();
680 gen_mov_reg_V(t0
, src
);
681 gen_mov_reg_N(dst
, src
);
682 tcg_gen_xor_tl(dst
, dst
, t0
);
687 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
689 TCGv t0
= tcg_temp_new();
690 gen_mov_reg_Z(t0
, src
);
691 gen_mov_reg_C(dst
, src
);
692 tcg_gen_or_tl(dst
, dst
, t0
);
697 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
699 gen_mov_reg_C(dst
, src
);
703 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
705 gen_mov_reg_V(dst
, src
);
709 static inline void gen_op_eval_bn(TCGv dst
)
711 tcg_gen_movi_tl(dst
, 0);
715 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
717 gen_mov_reg_N(dst
, src
);
721 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
723 gen_mov_reg_Z(dst
, src
);
724 tcg_gen_xori_tl(dst
, dst
, 0x1);
728 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
730 gen_op_eval_ble(dst
, src
);
731 tcg_gen_xori_tl(dst
, dst
, 0x1);
735 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
737 gen_op_eval_bl(dst
, src
);
738 tcg_gen_xori_tl(dst
, dst
, 0x1);
742 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
744 gen_op_eval_bleu(dst
, src
);
745 tcg_gen_xori_tl(dst
, dst
, 0x1);
749 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
751 gen_mov_reg_C(dst
, src
);
752 tcg_gen_xori_tl(dst
, dst
, 0x1);
756 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
758 gen_mov_reg_N(dst
, src
);
759 tcg_gen_xori_tl(dst
, dst
, 0x1);
763 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
765 gen_mov_reg_V(dst
, src
);
766 tcg_gen_xori_tl(dst
, dst
, 0x1);
770 FPSR bit field FCC1 | FCC0:
776 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
777 unsigned int fcc_offset
)
779 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
780 tcg_gen_andi_tl(reg
, reg
, 0x1);
783 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
784 unsigned int fcc_offset
)
786 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
787 tcg_gen_andi_tl(reg
, reg
, 0x1);
791 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
792 unsigned int fcc_offset
)
794 TCGv t0
= tcg_temp_new();
795 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
796 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
797 tcg_gen_or_tl(dst
, dst
, t0
);
801 // 1 or 2: FCC0 ^ FCC1
802 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
803 unsigned int fcc_offset
)
805 TCGv t0
= tcg_temp_new();
806 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
807 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
808 tcg_gen_xor_tl(dst
, dst
, t0
);
813 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
814 unsigned int fcc_offset
)
816 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
820 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
821 unsigned int fcc_offset
)
823 TCGv t0
= tcg_temp_new();
824 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
825 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
826 tcg_gen_andc_tl(dst
, dst
, t0
);
831 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
832 unsigned int fcc_offset
)
834 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
838 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
839 unsigned int fcc_offset
)
841 TCGv t0
= tcg_temp_new();
842 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
843 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
844 tcg_gen_andc_tl(dst
, t0
, dst
);
849 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
850 unsigned int fcc_offset
)
852 TCGv t0
= tcg_temp_new();
853 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
854 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
855 tcg_gen_and_tl(dst
, dst
, t0
);
860 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
861 unsigned int fcc_offset
)
863 TCGv t0
= tcg_temp_new();
864 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
865 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
866 tcg_gen_or_tl(dst
, dst
, t0
);
867 tcg_gen_xori_tl(dst
, dst
, 0x1);
871 // 0 or 3: !(FCC0 ^ FCC1)
872 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
873 unsigned int fcc_offset
)
875 TCGv t0
= tcg_temp_new();
876 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
877 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
878 tcg_gen_xor_tl(dst
, dst
, t0
);
879 tcg_gen_xori_tl(dst
, dst
, 0x1);
884 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
885 unsigned int fcc_offset
)
887 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
888 tcg_gen_xori_tl(dst
, dst
, 0x1);
891 // !1: !(FCC0 & !FCC1)
892 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
893 unsigned int fcc_offset
)
895 TCGv t0
= tcg_temp_new();
896 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
897 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
898 tcg_gen_andc_tl(dst
, dst
, t0
);
899 tcg_gen_xori_tl(dst
, dst
, 0x1);
904 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
905 unsigned int fcc_offset
)
907 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
908 tcg_gen_xori_tl(dst
, dst
, 0x1);
911 // !2: !(!FCC0 & FCC1)
912 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
913 unsigned int fcc_offset
)
915 TCGv t0
= tcg_temp_new();
916 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
917 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
918 tcg_gen_andc_tl(dst
, t0
, dst
);
919 tcg_gen_xori_tl(dst
, dst
, 0x1);
923 // !3: !(FCC0 & FCC1)
924 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
925 unsigned int fcc_offset
)
927 TCGv t0
= tcg_temp_new();
928 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
929 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
930 tcg_gen_and_tl(dst
, dst
, t0
);
931 tcg_gen_xori_tl(dst
, dst
, 0x1);
935 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
936 target_ulong pc2
, TCGv r_cond
)
938 TCGLabel
*l1
= gen_new_label();
940 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
942 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
945 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
948 static void gen_branch_a(DisasContext
*dc
, target_ulong pc1
)
950 TCGLabel
*l1
= gen_new_label();
951 target_ulong npc
= dc
->npc
;
953 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cond
, 0, l1
);
955 gen_goto_tb(dc
, 0, npc
, pc1
);
958 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
963 static void gen_branch_n(DisasContext
*dc
, target_ulong pc1
)
965 target_ulong npc
= dc
->npc
;
967 if (likely(npc
!= DYNAMIC_PC
)) {
969 dc
->jump_pc
[0] = pc1
;
970 dc
->jump_pc
[1] = npc
+ 4;
975 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
977 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
978 t
= tcg_const_tl(pc1
);
980 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, z
, t
, cpu_npc
);
988 static inline void gen_generic_branch(DisasContext
*dc
)
990 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
991 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
992 TCGv zero
= tcg_const_tl(0);
994 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1001 /* call this function before using the condition register as it may
1002 have been set for a jump */
1003 static inline void flush_cond(DisasContext
*dc
)
1005 if (dc
->npc
== JUMP_PC
) {
1006 gen_generic_branch(dc
);
1007 dc
->npc
= DYNAMIC_PC
;
1011 static inline void save_npc(DisasContext
*dc
)
1013 if (dc
->npc
== JUMP_PC
) {
1014 gen_generic_branch(dc
);
1015 dc
->npc
= DYNAMIC_PC
;
1016 } else if (dc
->npc
!= DYNAMIC_PC
) {
1017 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1021 static inline void update_psr(DisasContext
*dc
)
1023 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1024 dc
->cc_op
= CC_OP_FLAGS
;
1025 gen_helper_compute_psr(cpu_env
);
1029 static inline void save_state(DisasContext
*dc
)
1031 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1035 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1037 if (dc
->npc
== JUMP_PC
) {
1038 gen_generic_branch(dc
);
1039 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1040 dc
->pc
= DYNAMIC_PC
;
1041 } else if (dc
->npc
== DYNAMIC_PC
) {
1042 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1043 dc
->pc
= DYNAMIC_PC
;
1049 static inline void gen_op_next_insn(void)
1051 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1052 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1055 static void free_compare(DisasCompare
*cmp
)
1058 tcg_temp_free(cmp
->c1
);
1061 tcg_temp_free(cmp
->c2
);
1065 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1068 static int subcc_cond
[16] = {
1084 -1, /* no overflow */
1087 static int logic_cond
[16] = {
1089 TCG_COND_EQ
, /* eq: Z */
1090 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1091 TCG_COND_LT
, /* lt: N ^ V -> N */
1092 TCG_COND_EQ
, /* leu: C | Z -> Z */
1093 TCG_COND_NEVER
, /* ltu: C -> 0 */
1094 TCG_COND_LT
, /* neg: N */
1095 TCG_COND_NEVER
, /* vs: V -> 0 */
1097 TCG_COND_NE
, /* ne: !Z */
1098 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1099 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1100 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1101 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1102 TCG_COND_GE
, /* pos: !N */
1103 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1109 #ifdef TARGET_SPARC64
1119 switch (dc
->cc_op
) {
1121 cmp
->cond
= logic_cond
[cond
];
1123 cmp
->is_bool
= false;
1125 cmp
->c2
= tcg_const_tl(0);
1126 #ifdef TARGET_SPARC64
1129 cmp
->c1
= tcg_temp_new();
1130 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1135 cmp
->c1
= cpu_cc_dst
;
1142 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1143 goto do_compare_dst_0
;
1145 case 7: /* overflow */
1146 case 15: /* !overflow */
1150 cmp
->cond
= subcc_cond
[cond
];
1151 cmp
->is_bool
= false;
1152 #ifdef TARGET_SPARC64
1154 /* Note that sign-extension works for unsigned compares as
1155 long as both operands are sign-extended. */
1156 cmp
->g1
= cmp
->g2
= false;
1157 cmp
->c1
= tcg_temp_new();
1158 cmp
->c2
= tcg_temp_new();
1159 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1160 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1164 cmp
->g1
= cmp
->g2
= true;
1165 cmp
->c1
= cpu_cc_src
;
1166 cmp
->c2
= cpu_cc_src2
;
1173 gen_helper_compute_psr(cpu_env
);
1174 dc
->cc_op
= CC_OP_FLAGS
;
1178 /* We're going to generate a boolean result. */
1179 cmp
->cond
= TCG_COND_NE
;
1180 cmp
->is_bool
= true;
1181 cmp
->g1
= cmp
->g2
= false;
1182 cmp
->c1
= r_dst
= tcg_temp_new();
1183 cmp
->c2
= tcg_const_tl(0);
1187 gen_op_eval_bn(r_dst
);
1190 gen_op_eval_be(r_dst
, r_src
);
1193 gen_op_eval_ble(r_dst
, r_src
);
1196 gen_op_eval_bl(r_dst
, r_src
);
1199 gen_op_eval_bleu(r_dst
, r_src
);
1202 gen_op_eval_bcs(r_dst
, r_src
);
1205 gen_op_eval_bneg(r_dst
, r_src
);
1208 gen_op_eval_bvs(r_dst
, r_src
);
1211 gen_op_eval_ba(r_dst
);
1214 gen_op_eval_bne(r_dst
, r_src
);
1217 gen_op_eval_bg(r_dst
, r_src
);
1220 gen_op_eval_bge(r_dst
, r_src
);
1223 gen_op_eval_bgu(r_dst
, r_src
);
1226 gen_op_eval_bcc(r_dst
, r_src
);
1229 gen_op_eval_bpos(r_dst
, r_src
);
1232 gen_op_eval_bvc(r_dst
, r_src
);
1239 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1241 unsigned int offset
;
1244 /* For now we still generate a straight boolean result. */
1245 cmp
->cond
= TCG_COND_NE
;
1246 cmp
->is_bool
= true;
1247 cmp
->g1
= cmp
->g2
= false;
1248 cmp
->c1
= r_dst
= tcg_temp_new();
1249 cmp
->c2
= tcg_const_tl(0);
1269 gen_op_eval_bn(r_dst
);
1272 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1275 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1278 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1281 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1284 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1287 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1290 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1293 gen_op_eval_ba(r_dst
);
1296 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1299 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1302 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1305 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1308 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1311 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1314 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1319 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1323 gen_compare(&cmp
, cc
, cond
, dc
);
1325 /* The interface is to return a boolean in r_dst. */
1327 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1329 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1335 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1338 gen_fcompare(&cmp
, cc
, cond
);
1340 /* The interface is to return a boolean in r_dst. */
1342 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1344 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1350 #ifdef TARGET_SPARC64
1352 static const int gen_tcg_cond_reg
[8] = {
1363 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1365 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1366 cmp
->is_bool
= false;
1370 cmp
->c2
= tcg_const_tl(0);
1373 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1376 gen_compare_reg(&cmp
, cond
, r_src
);
1378 /* The interface is to return a boolean in r_dst. */
1379 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1385 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1387 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1388 target_ulong target
= dc
->pc
+ offset
;
1390 #ifdef TARGET_SPARC64
1391 if (unlikely(AM_CHECK(dc
))) {
1392 target
&= 0xffffffffULL
;
1396 /* unconditional not taken */
1398 dc
->pc
= dc
->npc
+ 4;
1399 dc
->npc
= dc
->pc
+ 4;
1402 dc
->npc
= dc
->pc
+ 4;
1404 } else if (cond
== 0x8) {
1405 /* unconditional taken */
1408 dc
->npc
= dc
->pc
+ 4;
1412 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1416 gen_cond(cpu_cond
, cc
, cond
, dc
);
1418 gen_branch_a(dc
, target
);
1420 gen_branch_n(dc
, target
);
1425 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1427 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1428 target_ulong target
= dc
->pc
+ offset
;
1430 #ifdef TARGET_SPARC64
1431 if (unlikely(AM_CHECK(dc
))) {
1432 target
&= 0xffffffffULL
;
1436 /* unconditional not taken */
1438 dc
->pc
= dc
->npc
+ 4;
1439 dc
->npc
= dc
->pc
+ 4;
1442 dc
->npc
= dc
->pc
+ 4;
1444 } else if (cond
== 0x8) {
1445 /* unconditional taken */
1448 dc
->npc
= dc
->pc
+ 4;
1452 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1456 gen_fcond(cpu_cond
, cc
, cond
);
1458 gen_branch_a(dc
, target
);
1460 gen_branch_n(dc
, target
);
1465 #ifdef TARGET_SPARC64
1466 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1469 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1470 target_ulong target
= dc
->pc
+ offset
;
1472 if (unlikely(AM_CHECK(dc
))) {
1473 target
&= 0xffffffffULL
;
1476 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1478 gen_branch_a(dc
, target
);
1480 gen_branch_n(dc
, target
);
1484 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1488 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1491 gen_helper_fcmps_fcc1(cpu_env
, r_rs1
, r_rs2
);
1494 gen_helper_fcmps_fcc2(cpu_env
, r_rs1
, r_rs2
);
1497 gen_helper_fcmps_fcc3(cpu_env
, r_rs1
, r_rs2
);
1502 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1506 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1509 gen_helper_fcmpd_fcc1(cpu_env
, r_rs1
, r_rs2
);
1512 gen_helper_fcmpd_fcc2(cpu_env
, r_rs1
, r_rs2
);
1515 gen_helper_fcmpd_fcc3(cpu_env
, r_rs1
, r_rs2
);
1520 static inline void gen_op_fcmpq(int fccno
)
1524 gen_helper_fcmpq(cpu_env
);
1527 gen_helper_fcmpq_fcc1(cpu_env
);
1530 gen_helper_fcmpq_fcc2(cpu_env
);
1533 gen_helper_fcmpq_fcc3(cpu_env
);
1538 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1542 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1545 gen_helper_fcmpes_fcc1(cpu_env
, r_rs1
, r_rs2
);
1548 gen_helper_fcmpes_fcc2(cpu_env
, r_rs1
, r_rs2
);
1551 gen_helper_fcmpes_fcc3(cpu_env
, r_rs1
, r_rs2
);
1556 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1560 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1563 gen_helper_fcmped_fcc1(cpu_env
, r_rs1
, r_rs2
);
1566 gen_helper_fcmped_fcc2(cpu_env
, r_rs1
, r_rs2
);
1569 gen_helper_fcmped_fcc3(cpu_env
, r_rs1
, r_rs2
);
1574 static inline void gen_op_fcmpeq(int fccno
)
1578 gen_helper_fcmpeq(cpu_env
);
1581 gen_helper_fcmpeq_fcc1(cpu_env
);
1584 gen_helper_fcmpeq_fcc2(cpu_env
);
1587 gen_helper_fcmpeq_fcc3(cpu_env
);
1594 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1596 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1599 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1601 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1604 static inline void gen_op_fcmpq(int fccno
)
1606 gen_helper_fcmpq(cpu_env
);
1609 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1611 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1614 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1616 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1619 static inline void gen_op_fcmpeq(int fccno
)
1621 gen_helper_fcmpeq(cpu_env
);
1625 static inline void gen_op_fpexception_im(int fsr_flags
)
1629 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1630 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1631 r_const
= tcg_const_i32(TT_FP_EXCP
);
1632 gen_helper_raise_exception(cpu_env
, r_const
);
1633 tcg_temp_free_i32(r_const
);
1636 static int gen_trap_ifnofpu(DisasContext
*dc
)
1638 #if !defined(CONFIG_USER_ONLY)
1639 if (!dc
->fpu_enabled
) {
1643 r_const
= tcg_const_i32(TT_NFPU_INSN
);
1644 gen_helper_raise_exception(cpu_env
, r_const
);
1645 tcg_temp_free_i32(r_const
);
1653 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1655 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1658 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1659 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1663 src
= gen_load_fpr_F(dc
, rs
);
1664 dst
= gen_dest_fpr_F(dc
);
1666 gen(dst
, cpu_env
, src
);
1668 gen_store_fpr_F(dc
, rd
, dst
);
1671 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1672 void (*gen
)(TCGv_i32
, TCGv_i32
))
1676 src
= gen_load_fpr_F(dc
, rs
);
1677 dst
= gen_dest_fpr_F(dc
);
1681 gen_store_fpr_F(dc
, rd
, dst
);
1684 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1685 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1687 TCGv_i32 dst
, src1
, src2
;
1689 src1
= gen_load_fpr_F(dc
, rs1
);
1690 src2
= gen_load_fpr_F(dc
, rs2
);
1691 dst
= gen_dest_fpr_F(dc
);
1693 gen(dst
, cpu_env
, src1
, src2
);
1695 gen_store_fpr_F(dc
, rd
, dst
);
1698 #ifdef TARGET_SPARC64
1699 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1700 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1702 TCGv_i32 dst
, src1
, src2
;
1704 src1
= gen_load_fpr_F(dc
, rs1
);
1705 src2
= gen_load_fpr_F(dc
, rs2
);
1706 dst
= gen_dest_fpr_F(dc
);
1708 gen(dst
, src1
, src2
);
1710 gen_store_fpr_F(dc
, rd
, dst
);
1714 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1715 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1719 src
= gen_load_fpr_D(dc
, rs
);
1720 dst
= gen_dest_fpr_D(dc
, rd
);
1722 gen(dst
, cpu_env
, src
);
1724 gen_store_fpr_D(dc
, rd
, dst
);
1727 #ifdef TARGET_SPARC64
1728 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1729 void (*gen
)(TCGv_i64
, TCGv_i64
))
1733 src
= gen_load_fpr_D(dc
, rs
);
1734 dst
= gen_dest_fpr_D(dc
, rd
);
1738 gen_store_fpr_D(dc
, rd
, dst
);
1742 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1743 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1745 TCGv_i64 dst
, src1
, src2
;
1747 src1
= gen_load_fpr_D(dc
, rs1
);
1748 src2
= gen_load_fpr_D(dc
, rs2
);
1749 dst
= gen_dest_fpr_D(dc
, rd
);
1751 gen(dst
, cpu_env
, src1
, src2
);
1753 gen_store_fpr_D(dc
, rd
, dst
);
1756 #ifdef TARGET_SPARC64
1757 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1758 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1760 TCGv_i64 dst
, src1
, src2
;
1762 src1
= gen_load_fpr_D(dc
, rs1
);
1763 src2
= gen_load_fpr_D(dc
, rs2
);
1764 dst
= gen_dest_fpr_D(dc
, rd
);
1766 gen(dst
, src1
, src2
);
1768 gen_store_fpr_D(dc
, rd
, dst
);
1771 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1772 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1774 TCGv_i64 dst
, src1
, src2
;
1776 src1
= gen_load_fpr_D(dc
, rs1
);
1777 src2
= gen_load_fpr_D(dc
, rs2
);
1778 dst
= gen_dest_fpr_D(dc
, rd
);
1780 gen(dst
, cpu_gsr
, src1
, src2
);
1782 gen_store_fpr_D(dc
, rd
, dst
);
1785 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1786 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1788 TCGv_i64 dst
, src0
, src1
, src2
;
1790 src1
= gen_load_fpr_D(dc
, rs1
);
1791 src2
= gen_load_fpr_D(dc
, rs2
);
1792 src0
= gen_load_fpr_D(dc
, rd
);
1793 dst
= gen_dest_fpr_D(dc
, rd
);
1795 gen(dst
, src0
, src1
, src2
);
1797 gen_store_fpr_D(dc
, rd
, dst
);
1801 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1802 void (*gen
)(TCGv_ptr
))
1804 gen_op_load_fpr_QT1(QFPREG(rs
));
1808 gen_op_store_QT0_fpr(QFPREG(rd
));
1809 gen_update_fprs_dirty(QFPREG(rd
));
1812 #ifdef TARGET_SPARC64
1813 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1814 void (*gen
)(TCGv_ptr
))
1816 gen_op_load_fpr_QT1(QFPREG(rs
));
1820 gen_op_store_QT0_fpr(QFPREG(rd
));
1821 gen_update_fprs_dirty(QFPREG(rd
));
1825 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1826 void (*gen
)(TCGv_ptr
))
1828 gen_op_load_fpr_QT0(QFPREG(rs1
));
1829 gen_op_load_fpr_QT1(QFPREG(rs2
));
1833 gen_op_store_QT0_fpr(QFPREG(rd
));
1834 gen_update_fprs_dirty(QFPREG(rd
));
1837 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1838 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1841 TCGv_i32 src1
, src2
;
1843 src1
= gen_load_fpr_F(dc
, rs1
);
1844 src2
= gen_load_fpr_F(dc
, rs2
);
1845 dst
= gen_dest_fpr_D(dc
, rd
);
1847 gen(dst
, cpu_env
, src1
, src2
);
1849 gen_store_fpr_D(dc
, rd
, dst
);
1852 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1853 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1855 TCGv_i64 src1
, src2
;
1857 src1
= gen_load_fpr_D(dc
, rs1
);
1858 src2
= gen_load_fpr_D(dc
, rs2
);
1860 gen(cpu_env
, src1
, src2
);
1862 gen_op_store_QT0_fpr(QFPREG(rd
));
1863 gen_update_fprs_dirty(QFPREG(rd
));
1866 #ifdef TARGET_SPARC64
1867 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1868 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1873 src
= gen_load_fpr_F(dc
, rs
);
1874 dst
= gen_dest_fpr_D(dc
, rd
);
1876 gen(dst
, cpu_env
, src
);
1878 gen_store_fpr_D(dc
, rd
, dst
);
1882 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1883 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1888 src
= gen_load_fpr_F(dc
, rs
);
1889 dst
= gen_dest_fpr_D(dc
, rd
);
1891 gen(dst
, cpu_env
, src
);
1893 gen_store_fpr_D(dc
, rd
, dst
);
1896 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1897 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1902 src
= gen_load_fpr_D(dc
, rs
);
1903 dst
= gen_dest_fpr_F(dc
);
1905 gen(dst
, cpu_env
, src
);
1907 gen_store_fpr_F(dc
, rd
, dst
);
1910 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1911 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1915 gen_op_load_fpr_QT1(QFPREG(rs
));
1916 dst
= gen_dest_fpr_F(dc
);
1920 gen_store_fpr_F(dc
, rd
, dst
);
1923 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1924 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1928 gen_op_load_fpr_QT1(QFPREG(rs
));
1929 dst
= gen_dest_fpr_D(dc
, rd
);
1933 gen_store_fpr_D(dc
, rd
, dst
);
1936 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1937 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1941 src
= gen_load_fpr_F(dc
, rs
);
1945 gen_op_store_QT0_fpr(QFPREG(rd
));
1946 gen_update_fprs_dirty(QFPREG(rd
));
1949 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1950 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1954 src
= gen_load_fpr_D(dc
, rs
);
1958 gen_op_store_QT0_fpr(QFPREG(rd
));
1959 gen_update_fprs_dirty(QFPREG(rd
));
1963 #ifdef TARGET_SPARC64
1964 static inline TCGv_i32
gen_get_asi(int insn
, TCGv r_addr
)
1970 r_asi
= tcg_temp_new_i32();
1971 tcg_gen_mov_i32(r_asi
, cpu_asi
);
1973 asi
= GET_FIELD(insn
, 19, 26);
1974 r_asi
= tcg_const_i32(asi
);
1979 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
1982 TCGv_i32 r_asi
, r_size
, r_sign
;
1984 r_asi
= gen_get_asi(insn
, addr
);
1985 r_size
= tcg_const_i32(size
);
1986 r_sign
= tcg_const_i32(sign
);
1987 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
1988 tcg_temp_free_i32(r_sign
);
1989 tcg_temp_free_i32(r_size
);
1990 tcg_temp_free_i32(r_asi
);
1993 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
1995 TCGv_i32 r_asi
, r_size
;
1997 r_asi
= gen_get_asi(insn
, addr
);
1998 r_size
= tcg_const_i32(size
);
1999 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2000 tcg_temp_free_i32(r_size
);
2001 tcg_temp_free_i32(r_asi
);
2004 static inline void gen_ldf_asi(TCGv addr
, int insn
, int size
, int rd
)
2006 TCGv_i32 r_asi
, r_size
, r_rd
;
2008 r_asi
= gen_get_asi(insn
, addr
);
2009 r_size
= tcg_const_i32(size
);
2010 r_rd
= tcg_const_i32(rd
);
2011 gen_helper_ldf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2012 tcg_temp_free_i32(r_rd
);
2013 tcg_temp_free_i32(r_size
);
2014 tcg_temp_free_i32(r_asi
);
2017 static inline void gen_stf_asi(TCGv addr
, int insn
, int size
, int rd
)
2019 TCGv_i32 r_asi
, r_size
, r_rd
;
2021 r_asi
= gen_get_asi(insn
, addr
);
2022 r_size
= tcg_const_i32(size
);
2023 r_rd
= tcg_const_i32(rd
);
2024 gen_helper_stf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2025 tcg_temp_free_i32(r_rd
);
2026 tcg_temp_free_i32(r_size
);
2027 tcg_temp_free_i32(r_asi
);
2030 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2032 TCGv_i32 r_asi
, r_size
, r_sign
;
2033 TCGv_i64 t64
= tcg_temp_new_i64();
2035 r_asi
= gen_get_asi(insn
, addr
);
2036 r_size
= tcg_const_i32(4);
2037 r_sign
= tcg_const_i32(0);
2038 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2039 tcg_temp_free_i32(r_sign
);
2040 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2041 tcg_temp_free_i32(r_size
);
2042 tcg_temp_free_i32(r_asi
);
2043 tcg_gen_trunc_i64_tl(dst
, t64
);
2044 tcg_temp_free_i64(t64
);
2047 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2050 TCGv_i32 r_asi
, r_rd
;
2052 r_asi
= gen_get_asi(insn
, addr
);
2053 r_rd
= tcg_const_i32(rd
);
2054 gen_helper_ldda_asi(cpu_env
, addr
, r_asi
, r_rd
);
2055 tcg_temp_free_i32(r_rd
);
2056 tcg_temp_free_i32(r_asi
);
2059 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2062 TCGv_i32 r_asi
, r_size
;
2063 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2064 TCGv_i64 t64
= tcg_temp_new_i64();
2066 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2067 r_asi
= gen_get_asi(insn
, addr
);
2068 r_size
= tcg_const_i32(8);
2069 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2070 tcg_temp_free_i32(r_size
);
2071 tcg_temp_free_i32(r_asi
);
2072 tcg_temp_free_i64(t64
);
2075 static inline void gen_casx_asi(DisasContext
*dc
, TCGv addr
,
2076 TCGv val2
, int insn
, int rd
)
2078 TCGv val1
= gen_load_gpr(dc
, rd
);
2079 TCGv dst
= gen_dest_gpr(dc
, rd
);
2080 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2082 gen_helper_casx_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2083 tcg_temp_free_i32(r_asi
);
2084 gen_store_gpr(dc
, rd
, dst
);
2087 #elif !defined(CONFIG_USER_ONLY)
2089 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2092 TCGv_i32 r_asi
, r_size
, r_sign
;
2093 TCGv_i64 t64
= tcg_temp_new_i64();
2095 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2096 r_size
= tcg_const_i32(size
);
2097 r_sign
= tcg_const_i32(sign
);
2098 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2099 tcg_temp_free_i32(r_sign
);
2100 tcg_temp_free_i32(r_size
);
2101 tcg_temp_free_i32(r_asi
);
2102 tcg_gen_trunc_i64_tl(dst
, t64
);
2103 tcg_temp_free_i64(t64
);
2106 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2108 TCGv_i32 r_asi
, r_size
;
2109 TCGv_i64 t64
= tcg_temp_new_i64();
2111 tcg_gen_extu_tl_i64(t64
, src
);
2112 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2113 r_size
= tcg_const_i32(size
);
2114 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2115 tcg_temp_free_i32(r_size
);
2116 tcg_temp_free_i32(r_asi
);
2117 tcg_temp_free_i64(t64
);
2120 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2122 TCGv_i32 r_asi
, r_size
, r_sign
;
2123 TCGv_i64 r_val
, t64
;
2125 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2126 r_size
= tcg_const_i32(4);
2127 r_sign
= tcg_const_i32(0);
2128 t64
= tcg_temp_new_i64();
2129 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2130 tcg_temp_free(r_sign
);
2131 r_val
= tcg_temp_new_i64();
2132 tcg_gen_extu_tl_i64(r_val
, src
);
2133 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2134 tcg_temp_free_i64(r_val
);
2135 tcg_temp_free_i32(r_size
);
2136 tcg_temp_free_i32(r_asi
);
2137 tcg_gen_trunc_i64_tl(dst
, t64
);
2138 tcg_temp_free_i64(t64
);
2141 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2144 TCGv_i32 r_asi
, r_size
, r_sign
;
2148 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2149 r_size
= tcg_const_i32(8);
2150 r_sign
= tcg_const_i32(0);
2151 t64
= tcg_temp_new_i64();
2152 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2153 tcg_temp_free_i32(r_sign
);
2154 tcg_temp_free_i32(r_size
);
2155 tcg_temp_free_i32(r_asi
);
2157 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2158 whereby "rd + 1" elicits "error: array subscript is above array".
2159 Since we have already asserted that rd is even, the semantics
2161 t
= gen_dest_gpr(dc
, rd
| 1);
2162 tcg_gen_trunc_i64_tl(t
, t64
);
2163 gen_store_gpr(dc
, rd
| 1, t
);
2165 tcg_gen_shri_i64(t64
, t64
, 32);
2166 tcg_gen_trunc_i64_tl(hi
, t64
);
2167 tcg_temp_free_i64(t64
);
2168 gen_store_gpr(dc
, rd
, hi
);
2171 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2174 TCGv_i32 r_asi
, r_size
;
2175 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2176 TCGv_i64 t64
= tcg_temp_new_i64();
2178 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2179 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2180 r_size
= tcg_const_i32(8);
2181 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2182 tcg_temp_free_i32(r_size
);
2183 tcg_temp_free_i32(r_asi
);
2184 tcg_temp_free_i64(t64
);
2188 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2189 static inline void gen_cas_asi(DisasContext
*dc
, TCGv addr
,
2190 TCGv val2
, int insn
, int rd
)
2192 TCGv val1
= gen_load_gpr(dc
, rd
);
2193 TCGv dst
= gen_dest_gpr(dc
, rd
);
2194 #ifdef TARGET_SPARC64
2195 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2197 TCGv_i32 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2200 gen_helper_cas_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2201 tcg_temp_free_i32(r_asi
);
2202 gen_store_gpr(dc
, rd
, dst
);
2205 static inline void gen_ldstub_asi(TCGv dst
, TCGv addr
, int insn
)
2208 TCGv_i32 r_asi
, r_size
;
2210 gen_ld_asi(dst
, addr
, insn
, 1, 0);
2212 r_val
= tcg_const_i64(0xffULL
);
2213 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2214 r_size
= tcg_const_i32(1);
2215 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2216 tcg_temp_free_i32(r_size
);
2217 tcg_temp_free_i32(r_asi
);
2218 tcg_temp_free_i64(r_val
);
2222 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2224 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2225 return gen_load_gpr(dc
, rs1
);
2228 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2230 if (IS_IMM
) { /* immediate */
2231 target_long simm
= GET_FIELDs(insn
, 19, 31);
2232 TCGv t
= get_temp_tl(dc
);
2233 tcg_gen_movi_tl(t
, simm
);
2235 } else { /* register */
2236 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2237 return gen_load_gpr(dc
, rs2
);
2241 #ifdef TARGET_SPARC64
2242 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2244 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2246 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2247 or fold the comparison down to 32 bits and use movcond_i32. Choose
2249 c32
= tcg_temp_new_i32();
2251 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2253 TCGv_i64 c64
= tcg_temp_new_i64();
2254 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2255 tcg_gen_extrl_i64_i32(c32
, c64
);
2256 tcg_temp_free_i64(c64
);
2259 s1
= gen_load_fpr_F(dc
, rs
);
2260 s2
= gen_load_fpr_F(dc
, rd
);
2261 dst
= gen_dest_fpr_F(dc
);
2262 zero
= tcg_const_i32(0);
2264 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2266 tcg_temp_free_i32(c32
);
2267 tcg_temp_free_i32(zero
);
2268 gen_store_fpr_F(dc
, rd
, dst
);
2271 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2273 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2274 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2275 gen_load_fpr_D(dc
, rs
),
2276 gen_load_fpr_D(dc
, rd
));
2277 gen_store_fpr_D(dc
, rd
, dst
);
2280 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2282 int qd
= QFPREG(rd
);
2283 int qs
= QFPREG(rs
);
2285 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2286 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2287 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2288 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2290 gen_update_fprs_dirty(qd
);
2293 #ifndef CONFIG_USER_ONLY
2294 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_ptr cpu_env
)
2296 TCGv_i32 r_tl
= tcg_temp_new_i32();
2298 /* load env->tl into r_tl */
2299 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2301 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2302 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2304 /* calculate offset to current trap state from env->ts, reuse r_tl */
2305 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2306 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2308 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2310 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2311 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2312 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2313 tcg_temp_free_ptr(r_tl_tmp
);
2316 tcg_temp_free_i32(r_tl
);
2320 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2321 int width
, bool cc
, bool left
)
2323 TCGv lo1
, lo2
, t1
, t2
;
2324 uint64_t amask
, tabl
, tabr
;
2325 int shift
, imask
, omask
;
2328 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2329 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2330 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2331 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2332 dc
->cc_op
= CC_OP_SUB
;
2335 /* Theory of operation: there are two tables, left and right (not to
2336 be confused with the left and right versions of the opcode). These
2337 are indexed by the low 3 bits of the inputs. To make things "easy",
2338 these tables are loaded into two constants, TABL and TABR below.
2339 The operation index = (input & imask) << shift calculates the index
2340 into the constant, while val = (table >> index) & omask calculates
2341 the value we're looking for. */
2348 tabl
= 0x80c0e0f0f8fcfeffULL
;
2349 tabr
= 0xff7f3f1f0f070301ULL
;
2351 tabl
= 0x0103070f1f3f7fffULL
;
2352 tabr
= 0xfffefcf8f0e0c080ULL
;
2372 tabl
= (2 << 2) | 3;
2373 tabr
= (3 << 2) | 1;
2375 tabl
= (1 << 2) | 3;
2376 tabr
= (3 << 2) | 2;
2383 lo1
= tcg_temp_new();
2384 lo2
= tcg_temp_new();
2385 tcg_gen_andi_tl(lo1
, s1
, imask
);
2386 tcg_gen_andi_tl(lo2
, s2
, imask
);
2387 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2388 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2390 t1
= tcg_const_tl(tabl
);
2391 t2
= tcg_const_tl(tabr
);
2392 tcg_gen_shr_tl(lo1
, t1
, lo1
);
2393 tcg_gen_shr_tl(lo2
, t2
, lo2
);
2394 tcg_gen_andi_tl(dst
, lo1
, omask
);
2395 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2399 amask
&= 0xffffffffULL
;
2401 tcg_gen_andi_tl(s1
, s1
, amask
);
2402 tcg_gen_andi_tl(s2
, s2
, amask
);
2404 /* We want to compute
2405 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2406 We've already done dst = lo1, so this reduces to
2407 dst &= (s1 == s2 ? -1 : lo2)
2412 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
2413 tcg_gen_neg_tl(t1
, t1
);
2414 tcg_gen_or_tl(lo2
, lo2
, t1
);
2415 tcg_gen_and_tl(dst
, dst
, lo2
);
2423 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2425 TCGv tmp
= tcg_temp_new();
2427 tcg_gen_add_tl(tmp
, s1
, s2
);
2428 tcg_gen_andi_tl(dst
, tmp
, -8);
2430 tcg_gen_neg_tl(tmp
, tmp
);
2432 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2437 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2441 t1
= tcg_temp_new();
2442 t2
= tcg_temp_new();
2443 shift
= tcg_temp_new();
2445 tcg_gen_andi_tl(shift
, gsr
, 7);
2446 tcg_gen_shli_tl(shift
, shift
, 3);
2447 tcg_gen_shl_tl(t1
, s1
, shift
);
2449 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2450 shift of (up to 63) followed by a constant shift of 1. */
2451 tcg_gen_xori_tl(shift
, shift
, 63);
2452 tcg_gen_shr_tl(t2
, s2
, shift
);
2453 tcg_gen_shri_tl(t2
, t2
, 1);
2455 tcg_gen_or_tl(dst
, t1
, t2
);
2459 tcg_temp_free(shift
);
2463 #define CHECK_IU_FEATURE(dc, FEATURE) \
2464 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2466 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2467 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2470 /* before an instruction, dc->pc must be static */
2471 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2473 unsigned int opc
, rs1
, rs2
, rd
;
2474 TCGv cpu_src1
, cpu_src2
;
2475 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2476 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2479 opc
= GET_FIELD(insn
, 0, 1);
2480 rd
= GET_FIELD(insn
, 2, 6);
2483 case 0: /* branches/sethi */
2485 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2488 #ifdef TARGET_SPARC64
2489 case 0x1: /* V9 BPcc */
2493 target
= GET_FIELD_SP(insn
, 0, 18);
2494 target
= sign_extend(target
, 19);
2496 cc
= GET_FIELD_SP(insn
, 20, 21);
2498 do_branch(dc
, target
, insn
, 0);
2500 do_branch(dc
, target
, insn
, 1);
2505 case 0x3: /* V9 BPr */
2507 target
= GET_FIELD_SP(insn
, 0, 13) |
2508 (GET_FIELD_SP(insn
, 20, 21) << 14);
2509 target
= sign_extend(target
, 16);
2511 cpu_src1
= get_src1(dc
, insn
);
2512 do_branch_reg(dc
, target
, insn
, cpu_src1
);
2515 case 0x5: /* V9 FBPcc */
2517 int cc
= GET_FIELD_SP(insn
, 20, 21);
2518 if (gen_trap_ifnofpu(dc
)) {
2521 target
= GET_FIELD_SP(insn
, 0, 18);
2522 target
= sign_extend(target
, 19);
2524 do_fbranch(dc
, target
, insn
, cc
);
2528 case 0x7: /* CBN+x */
2533 case 0x2: /* BN+x */
2535 target
= GET_FIELD(insn
, 10, 31);
2536 target
= sign_extend(target
, 22);
2538 do_branch(dc
, target
, insn
, 0);
2541 case 0x6: /* FBN+x */
2543 if (gen_trap_ifnofpu(dc
)) {
2546 target
= GET_FIELD(insn
, 10, 31);
2547 target
= sign_extend(target
, 22);
2549 do_fbranch(dc
, target
, insn
, 0);
2552 case 0x4: /* SETHI */
2553 /* Special-case %g0 because that's the canonical nop. */
2555 uint32_t value
= GET_FIELD(insn
, 10, 31);
2556 TCGv t
= gen_dest_gpr(dc
, rd
);
2557 tcg_gen_movi_tl(t
, value
<< 10);
2558 gen_store_gpr(dc
, rd
, t
);
2561 case 0x0: /* UNIMPL */
2570 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
2571 TCGv o7
= gen_dest_gpr(dc
, 15);
2573 tcg_gen_movi_tl(o7
, dc
->pc
);
2574 gen_store_gpr(dc
, 15, o7
);
2577 #ifdef TARGET_SPARC64
2578 if (unlikely(AM_CHECK(dc
))) {
2579 target
&= 0xffffffffULL
;
2585 case 2: /* FPU & Logical Operations */
2587 unsigned int xop
= GET_FIELD(insn
, 7, 12);
2588 TCGv cpu_dst
= get_temp_tl(dc
);
2591 if (xop
== 0x3a) { /* generate trap */
2592 int cond
= GET_FIELD(insn
, 3, 6);
2594 TCGLabel
*l1
= NULL
;
2605 /* Conditional trap. */
2607 #ifdef TARGET_SPARC64
2609 int cc
= GET_FIELD_SP(insn
, 11, 12);
2611 gen_compare(&cmp
, 0, cond
, dc
);
2612 } else if (cc
== 2) {
2613 gen_compare(&cmp
, 1, cond
, dc
);
2618 gen_compare(&cmp
, 0, cond
, dc
);
2620 l1
= gen_new_label();
2621 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
2622 cmp
.c1
, cmp
.c2
, l1
);
2626 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2627 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2629 /* Don't use the normal temporaries, as they may well have
2630 gone out of scope with the branch above. While we're
2631 doing that we might as well pre-truncate to 32-bit. */
2632 trap
= tcg_temp_new_i32();
2634 rs1
= GET_FIELD_SP(insn
, 14, 18);
2636 rs2
= GET_FIELD_SP(insn
, 0, 6);
2638 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
2639 /* Signal that the trap value is fully constant. */
2642 TCGv t1
= gen_load_gpr(dc
, rs1
);
2643 tcg_gen_trunc_tl_i32(trap
, t1
);
2644 tcg_gen_addi_i32(trap
, trap
, rs2
);
2648 rs2
= GET_FIELD_SP(insn
, 0, 4);
2649 t1
= gen_load_gpr(dc
, rs1
);
2650 t2
= gen_load_gpr(dc
, rs2
);
2651 tcg_gen_add_tl(t1
, t1
, t2
);
2652 tcg_gen_trunc_tl_i32(trap
, t1
);
2655 tcg_gen_andi_i32(trap
, trap
, mask
);
2656 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2659 gen_helper_raise_exception(cpu_env
, trap
);
2660 tcg_temp_free_i32(trap
);
2663 /* An unconditional trap ends the TB. */
2667 /* A conditional trap falls through to the next insn. */
2671 } else if (xop
== 0x28) {
2672 rs1
= GET_FIELD(insn
, 13, 17);
2675 #ifndef TARGET_SPARC64
2676 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2677 manual, rdy on the microSPARC
2679 case 0x0f: /* stbar in the SPARCv8 manual,
2680 rdy on the microSPARC II */
2681 case 0x10 ... 0x1f: /* implementation-dependent in the
2682 SPARCv8 manual, rdy on the
2685 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
2686 TCGv t
= gen_dest_gpr(dc
, rd
);
2687 /* Read Asr17 for a Leon3 monoprocessor */
2688 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
2689 gen_store_gpr(dc
, rd
, t
);
2693 gen_store_gpr(dc
, rd
, cpu_y
);
2695 #ifdef TARGET_SPARC64
2696 case 0x2: /* V9 rdccr */
2698 gen_helper_rdccr(cpu_dst
, cpu_env
);
2699 gen_store_gpr(dc
, rd
, cpu_dst
);
2701 case 0x3: /* V9 rdasi */
2702 tcg_gen_ext_i32_tl(cpu_dst
, cpu_asi
);
2703 gen_store_gpr(dc
, rd
, cpu_dst
);
2705 case 0x4: /* V9 rdtick */
2710 r_tickptr
= tcg_temp_new_ptr();
2711 r_const
= tcg_const_i32(dc
->mem_idx
);
2712 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2713 offsetof(CPUSPARCState
, tick
));
2714 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
2716 tcg_temp_free_ptr(r_tickptr
);
2717 tcg_temp_free_i32(r_const
);
2718 gen_store_gpr(dc
, rd
, cpu_dst
);
2721 case 0x5: /* V9 rdpc */
2723 TCGv t
= gen_dest_gpr(dc
, rd
);
2724 if (unlikely(AM_CHECK(dc
))) {
2725 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
2727 tcg_gen_movi_tl(t
, dc
->pc
);
2729 gen_store_gpr(dc
, rd
, t
);
2732 case 0x6: /* V9 rdfprs */
2733 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
2734 gen_store_gpr(dc
, rd
, cpu_dst
);
2736 case 0xf: /* V9 membar */
2737 break; /* no effect */
2738 case 0x13: /* Graphics Status */
2739 if (gen_trap_ifnofpu(dc
)) {
2742 gen_store_gpr(dc
, rd
, cpu_gsr
);
2744 case 0x16: /* Softint */
2745 tcg_gen_ext_i32_tl(cpu_dst
, cpu_softint
);
2746 gen_store_gpr(dc
, rd
, cpu_dst
);
2748 case 0x17: /* Tick compare */
2749 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
2751 case 0x18: /* System tick */
2756 r_tickptr
= tcg_temp_new_ptr();
2757 r_const
= tcg_const_i32(dc
->mem_idx
);
2758 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2759 offsetof(CPUSPARCState
, stick
));
2760 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
2762 tcg_temp_free_ptr(r_tickptr
);
2763 tcg_temp_free_i32(r_const
);
2764 gen_store_gpr(dc
, rd
, cpu_dst
);
2767 case 0x19: /* System tick compare */
2768 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
2770 case 0x10: /* Performance Control */
2771 case 0x11: /* Performance Instrumentation Counter */
2772 case 0x12: /* Dispatch Control */
2773 case 0x14: /* Softint set, WO */
2774 case 0x15: /* Softint clear, WO */
2779 #if !defined(CONFIG_USER_ONLY)
2780 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
2781 #ifndef TARGET_SPARC64
2782 if (!supervisor(dc
)) {
2786 gen_helper_rdpsr(cpu_dst
, cpu_env
);
2788 CHECK_IU_FEATURE(dc
, HYPV
);
2789 if (!hypervisor(dc
))
2791 rs1
= GET_FIELD(insn
, 13, 17);
2794 // gen_op_rdhpstate();
2797 // gen_op_rdhtstate();
2800 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
2803 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
2806 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
2808 case 31: // hstick_cmpr
2809 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
2815 gen_store_gpr(dc
, rd
, cpu_dst
);
2817 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
2818 if (!supervisor(dc
)) {
2821 cpu_tmp0
= get_temp_tl(dc
);
2822 #ifdef TARGET_SPARC64
2823 rs1
= GET_FIELD(insn
, 13, 17);
2829 r_tsptr
= tcg_temp_new_ptr();
2830 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2831 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2832 offsetof(trap_state
, tpc
));
2833 tcg_temp_free_ptr(r_tsptr
);
2840 r_tsptr
= tcg_temp_new_ptr();
2841 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2842 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2843 offsetof(trap_state
, tnpc
));
2844 tcg_temp_free_ptr(r_tsptr
);
2851 r_tsptr
= tcg_temp_new_ptr();
2852 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2853 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2854 offsetof(trap_state
, tstate
));
2855 tcg_temp_free_ptr(r_tsptr
);
2860 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2862 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2863 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
2864 offsetof(trap_state
, tt
));
2865 tcg_temp_free_ptr(r_tsptr
);
2873 r_tickptr
= tcg_temp_new_ptr();
2874 r_const
= tcg_const_i32(dc
->mem_idx
);
2875 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2876 offsetof(CPUSPARCState
, tick
));
2877 gen_helper_tick_get_count(cpu_tmp0
, cpu_env
,
2878 r_tickptr
, r_const
);
2879 tcg_temp_free_ptr(r_tickptr
);
2880 tcg_temp_free_i32(r_const
);
2884 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
2887 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2888 offsetof(CPUSPARCState
, pstate
));
2891 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2892 offsetof(CPUSPARCState
, tl
));
2895 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2896 offsetof(CPUSPARCState
, psrpil
));
2899 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
2902 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2903 offsetof(CPUSPARCState
, cansave
));
2905 case 11: // canrestore
2906 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2907 offsetof(CPUSPARCState
, canrestore
));
2909 case 12: // cleanwin
2910 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2911 offsetof(CPUSPARCState
, cleanwin
));
2913 case 13: // otherwin
2914 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2915 offsetof(CPUSPARCState
, otherwin
));
2918 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2919 offsetof(CPUSPARCState
, wstate
));
2921 case 16: // UA2005 gl
2922 CHECK_IU_FEATURE(dc
, GL
);
2923 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2924 offsetof(CPUSPARCState
, gl
));
2926 case 26: // UA2005 strand status
2927 CHECK_IU_FEATURE(dc
, HYPV
);
2928 if (!hypervisor(dc
))
2930 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
2933 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
2940 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
2942 gen_store_gpr(dc
, rd
, cpu_tmp0
);
2944 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
2945 #ifdef TARGET_SPARC64
2947 gen_helper_flushw(cpu_env
);
2949 if (!supervisor(dc
))
2951 gen_store_gpr(dc
, rd
, cpu_tbr
);
2955 } else if (xop
== 0x34) { /* FPU Operations */
2956 if (gen_trap_ifnofpu(dc
)) {
2959 gen_op_clear_ieee_excp_and_FTT();
2960 rs1
= GET_FIELD(insn
, 13, 17);
2961 rs2
= GET_FIELD(insn
, 27, 31);
2962 xop
= GET_FIELD(insn
, 18, 26);
2965 case 0x1: /* fmovs */
2966 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
2967 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
2969 case 0x5: /* fnegs */
2970 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
2972 case 0x9: /* fabss */
2973 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
2975 case 0x29: /* fsqrts */
2976 CHECK_FPU_FEATURE(dc
, FSQRT
);
2977 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
2979 case 0x2a: /* fsqrtd */
2980 CHECK_FPU_FEATURE(dc
, FSQRT
);
2981 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
2983 case 0x2b: /* fsqrtq */
2984 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2985 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
2987 case 0x41: /* fadds */
2988 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
2990 case 0x42: /* faddd */
2991 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
2993 case 0x43: /* faddq */
2994 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2995 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
2997 case 0x45: /* fsubs */
2998 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
3000 case 0x46: /* fsubd */
3001 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3003 case 0x47: /* fsubq */
3004 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3005 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3007 case 0x49: /* fmuls */
3008 CHECK_FPU_FEATURE(dc
, FMUL
);
3009 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3011 case 0x4a: /* fmuld */
3012 CHECK_FPU_FEATURE(dc
, FMUL
);
3013 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3015 case 0x4b: /* fmulq */
3016 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3017 CHECK_FPU_FEATURE(dc
, FMUL
);
3018 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3020 case 0x4d: /* fdivs */
3021 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3023 case 0x4e: /* fdivd */
3024 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3026 case 0x4f: /* fdivq */
3027 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3028 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3030 case 0x69: /* fsmuld */
3031 CHECK_FPU_FEATURE(dc
, FSMULD
);
3032 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3034 case 0x6e: /* fdmulq */
3035 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3036 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3038 case 0xc4: /* fitos */
3039 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3041 case 0xc6: /* fdtos */
3042 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3044 case 0xc7: /* fqtos */
3045 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3046 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3048 case 0xc8: /* fitod */
3049 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3051 case 0xc9: /* fstod */
3052 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3054 case 0xcb: /* fqtod */
3055 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3056 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3058 case 0xcc: /* fitoq */
3059 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3060 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3062 case 0xcd: /* fstoq */
3063 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3064 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3066 case 0xce: /* fdtoq */
3067 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3068 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3070 case 0xd1: /* fstoi */
3071 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3073 case 0xd2: /* fdtoi */
3074 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3076 case 0xd3: /* fqtoi */
3077 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3078 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3080 #ifdef TARGET_SPARC64
3081 case 0x2: /* V9 fmovd */
3082 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3083 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3085 case 0x3: /* V9 fmovq */
3086 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3087 gen_move_Q(rd
, rs2
);
3089 case 0x6: /* V9 fnegd */
3090 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3092 case 0x7: /* V9 fnegq */
3093 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3094 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3096 case 0xa: /* V9 fabsd */
3097 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3099 case 0xb: /* V9 fabsq */
3100 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3101 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3103 case 0x81: /* V9 fstox */
3104 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3106 case 0x82: /* V9 fdtox */
3107 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3109 case 0x83: /* V9 fqtox */
3110 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3111 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3113 case 0x84: /* V9 fxtos */
3114 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3116 case 0x88: /* V9 fxtod */
3117 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3119 case 0x8c: /* V9 fxtoq */
3120 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3121 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3127 } else if (xop
== 0x35) { /* FPU Operations */
3128 #ifdef TARGET_SPARC64
3131 if (gen_trap_ifnofpu(dc
)) {
3134 gen_op_clear_ieee_excp_and_FTT();
3135 rs1
= GET_FIELD(insn
, 13, 17);
3136 rs2
= GET_FIELD(insn
, 27, 31);
3137 xop
= GET_FIELD(insn
, 18, 26);
3140 #ifdef TARGET_SPARC64
3144 cond = GET_FIELD_SP(insn, 10, 12); \
3145 cpu_src1 = get_src1(dc, insn); \
3146 gen_compare_reg(&cmp, cond, cpu_src1); \
3147 gen_fmov##sz(dc, &cmp, rd, rs2); \
3148 free_compare(&cmp); \
3151 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3154 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3157 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3158 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3165 #ifdef TARGET_SPARC64
3166 #define FMOVCC(fcc, sz) \
3169 cond = GET_FIELD_SP(insn, 14, 17); \
3170 gen_fcompare(&cmp, fcc, cond); \
3171 gen_fmov##sz(dc, &cmp, rd, rs2); \
3172 free_compare(&cmp); \
3175 case 0x001: /* V9 fmovscc %fcc0 */
3178 case 0x002: /* V9 fmovdcc %fcc0 */
3181 case 0x003: /* V9 fmovqcc %fcc0 */
3182 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3185 case 0x041: /* V9 fmovscc %fcc1 */
3188 case 0x042: /* V9 fmovdcc %fcc1 */
3191 case 0x043: /* V9 fmovqcc %fcc1 */
3192 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3195 case 0x081: /* V9 fmovscc %fcc2 */
3198 case 0x082: /* V9 fmovdcc %fcc2 */
3201 case 0x083: /* V9 fmovqcc %fcc2 */
3202 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3205 case 0x0c1: /* V9 fmovscc %fcc3 */
3208 case 0x0c2: /* V9 fmovdcc %fcc3 */
3211 case 0x0c3: /* V9 fmovqcc %fcc3 */
3212 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3216 #define FMOVCC(xcc, sz) \
3219 cond = GET_FIELD_SP(insn, 14, 17); \
3220 gen_compare(&cmp, xcc, cond, dc); \
3221 gen_fmov##sz(dc, &cmp, rd, rs2); \
3222 free_compare(&cmp); \
3225 case 0x101: /* V9 fmovscc %icc */
3228 case 0x102: /* V9 fmovdcc %icc */
3231 case 0x103: /* V9 fmovqcc %icc */
3232 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3235 case 0x181: /* V9 fmovscc %xcc */
3238 case 0x182: /* V9 fmovdcc %xcc */
3241 case 0x183: /* V9 fmovqcc %xcc */
3242 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3247 case 0x51: /* fcmps, V9 %fcc */
3248 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3249 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3250 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3252 case 0x52: /* fcmpd, V9 %fcc */
3253 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3254 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3255 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3257 case 0x53: /* fcmpq, V9 %fcc */
3258 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3259 gen_op_load_fpr_QT0(QFPREG(rs1
));
3260 gen_op_load_fpr_QT1(QFPREG(rs2
));
3261 gen_op_fcmpq(rd
& 3);
3263 case 0x55: /* fcmpes, V9 %fcc */
3264 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3265 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3266 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3268 case 0x56: /* fcmped, V9 %fcc */
3269 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3270 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3271 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3273 case 0x57: /* fcmpeq, V9 %fcc */
3274 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3275 gen_op_load_fpr_QT0(QFPREG(rs1
));
3276 gen_op_load_fpr_QT1(QFPREG(rs2
));
3277 gen_op_fcmpeq(rd
& 3);
3282 } else if (xop
== 0x2) {
3283 TCGv dst
= gen_dest_gpr(dc
, rd
);
3284 rs1
= GET_FIELD(insn
, 13, 17);
3286 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3287 if (IS_IMM
) { /* immediate */
3288 simm
= GET_FIELDs(insn
, 19, 31);
3289 tcg_gen_movi_tl(dst
, simm
);
3290 gen_store_gpr(dc
, rd
, dst
);
3291 } else { /* register */
3292 rs2
= GET_FIELD(insn
, 27, 31);
3294 tcg_gen_movi_tl(dst
, 0);
3295 gen_store_gpr(dc
, rd
, dst
);
3297 cpu_src2
= gen_load_gpr(dc
, rs2
);
3298 gen_store_gpr(dc
, rd
, cpu_src2
);
3302 cpu_src1
= get_src1(dc
, insn
);
3303 if (IS_IMM
) { /* immediate */
3304 simm
= GET_FIELDs(insn
, 19, 31);
3305 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
3306 gen_store_gpr(dc
, rd
, dst
);
3307 } else { /* register */
3308 rs2
= GET_FIELD(insn
, 27, 31);
3310 /* mov shortcut: or x, %g0, y -> mov x, y */
3311 gen_store_gpr(dc
, rd
, cpu_src1
);
3313 cpu_src2
= gen_load_gpr(dc
, rs2
);
3314 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
3315 gen_store_gpr(dc
, rd
, dst
);
3319 #ifdef TARGET_SPARC64
3320 } else if (xop
== 0x25) { /* sll, V9 sllx */
3321 cpu_src1
= get_src1(dc
, insn
);
3322 if (IS_IMM
) { /* immediate */
3323 simm
= GET_FIELDs(insn
, 20, 31);
3324 if (insn
& (1 << 12)) {
3325 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3327 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3329 } else { /* register */
3330 rs2
= GET_FIELD(insn
, 27, 31);
3331 cpu_src2
= gen_load_gpr(dc
, rs2
);
3332 cpu_tmp0
= get_temp_tl(dc
);
3333 if (insn
& (1 << 12)) {
3334 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3336 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3338 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3340 gen_store_gpr(dc
, rd
, cpu_dst
);
3341 } else if (xop
== 0x26) { /* srl, V9 srlx */
3342 cpu_src1
= get_src1(dc
, insn
);
3343 if (IS_IMM
) { /* immediate */
3344 simm
= GET_FIELDs(insn
, 20, 31);
3345 if (insn
& (1 << 12)) {
3346 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3348 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3349 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3351 } else { /* register */
3352 rs2
= GET_FIELD(insn
, 27, 31);
3353 cpu_src2
= gen_load_gpr(dc
, rs2
);
3354 cpu_tmp0
= get_temp_tl(dc
);
3355 if (insn
& (1 << 12)) {
3356 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3357 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3359 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3360 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3361 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3364 gen_store_gpr(dc
, rd
, cpu_dst
);
3365 } else if (xop
== 0x27) { /* sra, V9 srax */
3366 cpu_src1
= get_src1(dc
, insn
);
3367 if (IS_IMM
) { /* immediate */
3368 simm
= GET_FIELDs(insn
, 20, 31);
3369 if (insn
& (1 << 12)) {
3370 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3372 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3373 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3375 } else { /* register */
3376 rs2
= GET_FIELD(insn
, 27, 31);
3377 cpu_src2
= gen_load_gpr(dc
, rs2
);
3378 cpu_tmp0
= get_temp_tl(dc
);
3379 if (insn
& (1 << 12)) {
3380 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3381 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3383 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3384 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3385 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3388 gen_store_gpr(dc
, rd
, cpu_dst
);
3390 } else if (xop
< 0x36) {
3392 cpu_src1
= get_src1(dc
, insn
);
3393 cpu_src2
= get_src2(dc
, insn
);
3394 switch (xop
& ~0x10) {
3397 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3398 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3399 dc
->cc_op
= CC_OP_ADD
;
3401 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3405 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3407 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3408 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3409 dc
->cc_op
= CC_OP_LOGIC
;
3413 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3415 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3416 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3417 dc
->cc_op
= CC_OP_LOGIC
;
3421 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3423 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3424 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3425 dc
->cc_op
= CC_OP_LOGIC
;
3430 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3431 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3432 dc
->cc_op
= CC_OP_SUB
;
3434 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3437 case 0x5: /* andn */
3438 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3440 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3441 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3442 dc
->cc_op
= CC_OP_LOGIC
;
3446 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3448 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3449 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3450 dc
->cc_op
= CC_OP_LOGIC
;
3453 case 0x7: /* xorn */
3454 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3456 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3457 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3458 dc
->cc_op
= CC_OP_LOGIC
;
3461 case 0x8: /* addx, V9 addc */
3462 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3465 #ifdef TARGET_SPARC64
3466 case 0x9: /* V9 mulx */
3467 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3470 case 0xa: /* umul */
3471 CHECK_IU_FEATURE(dc
, MUL
);
3472 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3474 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3475 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3476 dc
->cc_op
= CC_OP_LOGIC
;
3479 case 0xb: /* smul */
3480 CHECK_IU_FEATURE(dc
, MUL
);
3481 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
3483 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3484 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3485 dc
->cc_op
= CC_OP_LOGIC
;
3488 case 0xc: /* subx, V9 subc */
3489 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3492 #ifdef TARGET_SPARC64
3493 case 0xd: /* V9 udivx */
3494 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3497 case 0xe: /* udiv */
3498 CHECK_IU_FEATURE(dc
, DIV
);
3500 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3502 dc
->cc_op
= CC_OP_DIV
;
3504 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
3508 case 0xf: /* sdiv */
3509 CHECK_IU_FEATURE(dc
, DIV
);
3511 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3513 dc
->cc_op
= CC_OP_DIV
;
3515 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
3522 gen_store_gpr(dc
, rd
, cpu_dst
);
3524 cpu_src1
= get_src1(dc
, insn
);
3525 cpu_src2
= get_src2(dc
, insn
);
3527 case 0x20: /* taddcc */
3528 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3529 gen_store_gpr(dc
, rd
, cpu_dst
);
3530 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
3531 dc
->cc_op
= CC_OP_TADD
;
3533 case 0x21: /* tsubcc */
3534 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3535 gen_store_gpr(dc
, rd
, cpu_dst
);
3536 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
3537 dc
->cc_op
= CC_OP_TSUB
;
3539 case 0x22: /* taddcctv */
3540 gen_helper_taddcctv(cpu_dst
, cpu_env
,
3541 cpu_src1
, cpu_src2
);
3542 gen_store_gpr(dc
, rd
, cpu_dst
);
3543 dc
->cc_op
= CC_OP_TADDTV
;
3545 case 0x23: /* tsubcctv */
3546 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
3547 cpu_src1
, cpu_src2
);
3548 gen_store_gpr(dc
, rd
, cpu_dst
);
3549 dc
->cc_op
= CC_OP_TSUBTV
;
3551 case 0x24: /* mulscc */
3553 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
3554 gen_store_gpr(dc
, rd
, cpu_dst
);
3555 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3556 dc
->cc_op
= CC_OP_ADD
;
3558 #ifndef TARGET_SPARC64
3559 case 0x25: /* sll */
3560 if (IS_IMM
) { /* immediate */
3561 simm
= GET_FIELDs(insn
, 20, 31);
3562 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3563 } else { /* register */
3564 cpu_tmp0
= get_temp_tl(dc
);
3565 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3566 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3568 gen_store_gpr(dc
, rd
, cpu_dst
);
3570 case 0x26: /* srl */
3571 if (IS_IMM
) { /* immediate */
3572 simm
= GET_FIELDs(insn
, 20, 31);
3573 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3574 } else { /* register */
3575 cpu_tmp0
= get_temp_tl(dc
);
3576 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3577 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3579 gen_store_gpr(dc
, rd
, cpu_dst
);
3581 case 0x27: /* sra */
3582 if (IS_IMM
) { /* immediate */
3583 simm
= GET_FIELDs(insn
, 20, 31);
3584 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3585 } else { /* register */
3586 cpu_tmp0
= get_temp_tl(dc
);
3587 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3588 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3590 gen_store_gpr(dc
, rd
, cpu_dst
);
3595 cpu_tmp0
= get_temp_tl(dc
);
3598 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3599 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
3601 #ifndef TARGET_SPARC64
3602 case 0x01 ... 0x0f: /* undefined in the
3606 case 0x10 ... 0x1f: /* implementation-dependent
3610 if ((rd
== 0x13) && (dc
->def
->features
&
3611 CPU_FEATURE_POWERDOWN
)) {
3612 /* LEON3 power-down */
3614 gen_helper_power_down(cpu_env
);
3618 case 0x2: /* V9 wrccr */
3619 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3620 gen_helper_wrccr(cpu_env
, cpu_tmp0
);
3621 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3622 dc
->cc_op
= CC_OP_FLAGS
;
3624 case 0x3: /* V9 wrasi */
3625 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3626 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
3627 tcg_gen_trunc_tl_i32(cpu_asi
, cpu_tmp0
);
3629 case 0x6: /* V9 wrfprs */
3630 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3631 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
3637 case 0xf: /* V9 sir, nop if user */
3638 #if !defined(CONFIG_USER_ONLY)
3639 if (supervisor(dc
)) {
3644 case 0x13: /* Graphics Status */
3645 if (gen_trap_ifnofpu(dc
)) {
3648 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
3650 case 0x14: /* Softint set */
3651 if (!supervisor(dc
))
3653 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3654 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
3656 case 0x15: /* Softint clear */
3657 if (!supervisor(dc
))
3659 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3660 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
3662 case 0x16: /* Softint write */
3663 if (!supervisor(dc
))
3665 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3666 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
3668 case 0x17: /* Tick compare */
3669 #if !defined(CONFIG_USER_ONLY)
3670 if (!supervisor(dc
))
3676 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
3678 r_tickptr
= tcg_temp_new_ptr();
3679 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3680 offsetof(CPUSPARCState
, tick
));
3681 gen_helper_tick_set_limit(r_tickptr
,
3683 tcg_temp_free_ptr(r_tickptr
);
3686 case 0x18: /* System tick */
3687 #if !defined(CONFIG_USER_ONLY)
3688 if (!supervisor(dc
))
3694 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
3696 r_tickptr
= tcg_temp_new_ptr();
3697 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3698 offsetof(CPUSPARCState
, stick
));
3699 gen_helper_tick_set_count(r_tickptr
,
3701 tcg_temp_free_ptr(r_tickptr
);
3704 case 0x19: /* System tick compare */
3705 #if !defined(CONFIG_USER_ONLY)
3706 if (!supervisor(dc
))
3712 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
3714 r_tickptr
= tcg_temp_new_ptr();
3715 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3716 offsetof(CPUSPARCState
, stick
));
3717 gen_helper_tick_set_limit(r_tickptr
,
3719 tcg_temp_free_ptr(r_tickptr
);
3723 case 0x10: /* Performance Control */
3724 case 0x11: /* Performance Instrumentation
3726 case 0x12: /* Dispatch Control */
3733 #if !defined(CONFIG_USER_ONLY)
3734 case 0x31: /* wrpsr, V9 saved, restored */
3736 if (!supervisor(dc
))
3738 #ifdef TARGET_SPARC64
3741 gen_helper_saved(cpu_env
);
3744 gen_helper_restored(cpu_env
);
3746 case 2: /* UA2005 allclean */
3747 case 3: /* UA2005 otherw */
3748 case 4: /* UA2005 normalw */
3749 case 5: /* UA2005 invalw */
3755 cpu_tmp0
= get_temp_tl(dc
);
3756 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3757 gen_helper_wrpsr(cpu_env
, cpu_tmp0
);
3758 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3759 dc
->cc_op
= CC_OP_FLAGS
;
3767 case 0x32: /* wrwim, V9 wrpr */
3769 if (!supervisor(dc
))
3771 cpu_tmp0
= get_temp_tl(dc
);
3772 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3773 #ifdef TARGET_SPARC64
3779 r_tsptr
= tcg_temp_new_ptr();
3780 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3781 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3782 offsetof(trap_state
, tpc
));
3783 tcg_temp_free_ptr(r_tsptr
);
3790 r_tsptr
= tcg_temp_new_ptr();
3791 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3792 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3793 offsetof(trap_state
, tnpc
));
3794 tcg_temp_free_ptr(r_tsptr
);
3801 r_tsptr
= tcg_temp_new_ptr();
3802 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3803 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3804 offsetof(trap_state
,
3806 tcg_temp_free_ptr(r_tsptr
);
3813 r_tsptr
= tcg_temp_new_ptr();
3814 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3815 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
3816 offsetof(trap_state
, tt
));
3817 tcg_temp_free_ptr(r_tsptr
);
3824 r_tickptr
= tcg_temp_new_ptr();
3825 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3826 offsetof(CPUSPARCState
, tick
));
3827 gen_helper_tick_set_count(r_tickptr
,
3829 tcg_temp_free_ptr(r_tickptr
);
3833 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
3837 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
3838 dc
->npc
= DYNAMIC_PC
;
3842 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3843 offsetof(CPUSPARCState
, tl
));
3844 dc
->npc
= DYNAMIC_PC
;
3847 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
3850 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
3853 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3854 offsetof(CPUSPARCState
,
3857 case 11: // canrestore
3858 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3859 offsetof(CPUSPARCState
,
3862 case 12: // cleanwin
3863 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3864 offsetof(CPUSPARCState
,
3867 case 13: // otherwin
3868 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3869 offsetof(CPUSPARCState
,
3873 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3874 offsetof(CPUSPARCState
,
3877 case 16: // UA2005 gl
3878 CHECK_IU_FEATURE(dc
, GL
);
3879 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3880 offsetof(CPUSPARCState
, gl
));
3882 case 26: // UA2005 strand status
3883 CHECK_IU_FEATURE(dc
, HYPV
);
3884 if (!hypervisor(dc
))
3886 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
3892 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
3893 if (dc
->def
->nwindows
!= 32) {
3894 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
3895 (1 << dc
->def
->nwindows
) - 1);
3900 case 0x33: /* wrtbr, UA2005 wrhpr */
3902 #ifndef TARGET_SPARC64
3903 if (!supervisor(dc
))
3905 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
3907 CHECK_IU_FEATURE(dc
, HYPV
);
3908 if (!hypervisor(dc
))
3910 cpu_tmp0
= get_temp_tl(dc
);
3911 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3914 // XXX gen_op_wrhpstate();
3921 // XXX gen_op_wrhtstate();
3924 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
3927 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
3929 case 31: // hstick_cmpr
3933 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
3934 r_tickptr
= tcg_temp_new_ptr();
3935 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3936 offsetof(CPUSPARCState
, hstick
));
3937 gen_helper_tick_set_limit(r_tickptr
,
3939 tcg_temp_free_ptr(r_tickptr
);
3942 case 6: // hver readonly
3950 #ifdef TARGET_SPARC64
3951 case 0x2c: /* V9 movcc */
3953 int cc
= GET_FIELD_SP(insn
, 11, 12);
3954 int cond
= GET_FIELD_SP(insn
, 14, 17);
3958 if (insn
& (1 << 18)) {
3960 gen_compare(&cmp
, 0, cond
, dc
);
3961 } else if (cc
== 2) {
3962 gen_compare(&cmp
, 1, cond
, dc
);
3967 gen_fcompare(&cmp
, cc
, cond
);
3970 /* The get_src2 above loaded the normal 13-bit
3971 immediate field, not the 11-bit field we have
3972 in movcc. But it did handle the reg case. */
3974 simm
= GET_FIELD_SPs(insn
, 0, 10);
3975 tcg_gen_movi_tl(cpu_src2
, simm
);
3978 dst
= gen_load_gpr(dc
, rd
);
3979 tcg_gen_movcond_tl(cmp
.cond
, dst
,
3983 gen_store_gpr(dc
, rd
, dst
);
3986 case 0x2d: /* V9 sdivx */
3987 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3988 gen_store_gpr(dc
, rd
, cpu_dst
);
3990 case 0x2e: /* V9 popc */
3991 gen_helper_popc(cpu_dst
, cpu_src2
);
3992 gen_store_gpr(dc
, rd
, cpu_dst
);
3994 case 0x2f: /* V9 movr */
3996 int cond
= GET_FIELD_SP(insn
, 10, 12);
4000 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4002 /* The get_src2 above loaded the normal 13-bit
4003 immediate field, not the 10-bit field we have
4004 in movr. But it did handle the reg case. */
4006 simm
= GET_FIELD_SPs(insn
, 0, 9);
4007 tcg_gen_movi_tl(cpu_src2
, simm
);
4010 dst
= gen_load_gpr(dc
, rd
);
4011 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4015 gen_store_gpr(dc
, rd
, dst
);
4023 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4024 #ifdef TARGET_SPARC64
4025 int opf
= GET_FIELD_SP(insn
, 5, 13);
4026 rs1
= GET_FIELD(insn
, 13, 17);
4027 rs2
= GET_FIELD(insn
, 27, 31);
4028 if (gen_trap_ifnofpu(dc
)) {
4033 case 0x000: /* VIS I edge8cc */
4034 CHECK_FPU_FEATURE(dc
, VIS1
);
4035 cpu_src1
= gen_load_gpr(dc
, rs1
);
4036 cpu_src2
= gen_load_gpr(dc
, rs2
);
4037 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4038 gen_store_gpr(dc
, rd
, cpu_dst
);
4040 case 0x001: /* VIS II edge8n */
4041 CHECK_FPU_FEATURE(dc
, VIS2
);
4042 cpu_src1
= gen_load_gpr(dc
, rs1
);
4043 cpu_src2
= gen_load_gpr(dc
, rs2
);
4044 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4045 gen_store_gpr(dc
, rd
, cpu_dst
);
4047 case 0x002: /* VIS I edge8lcc */
4048 CHECK_FPU_FEATURE(dc
, VIS1
);
4049 cpu_src1
= gen_load_gpr(dc
, rs1
);
4050 cpu_src2
= gen_load_gpr(dc
, rs2
);
4051 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4052 gen_store_gpr(dc
, rd
, cpu_dst
);
4054 case 0x003: /* VIS II edge8ln */
4055 CHECK_FPU_FEATURE(dc
, VIS2
);
4056 cpu_src1
= gen_load_gpr(dc
, rs1
);
4057 cpu_src2
= gen_load_gpr(dc
, rs2
);
4058 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4059 gen_store_gpr(dc
, rd
, cpu_dst
);
4061 case 0x004: /* VIS I edge16cc */
4062 CHECK_FPU_FEATURE(dc
, VIS1
);
4063 cpu_src1
= gen_load_gpr(dc
, rs1
);
4064 cpu_src2
= gen_load_gpr(dc
, rs2
);
4065 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4066 gen_store_gpr(dc
, rd
, cpu_dst
);
4068 case 0x005: /* VIS II edge16n */
4069 CHECK_FPU_FEATURE(dc
, VIS2
);
4070 cpu_src1
= gen_load_gpr(dc
, rs1
);
4071 cpu_src2
= gen_load_gpr(dc
, rs2
);
4072 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4073 gen_store_gpr(dc
, rd
, cpu_dst
);
4075 case 0x006: /* VIS I edge16lcc */
4076 CHECK_FPU_FEATURE(dc
, VIS1
);
4077 cpu_src1
= gen_load_gpr(dc
, rs1
);
4078 cpu_src2
= gen_load_gpr(dc
, rs2
);
4079 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4080 gen_store_gpr(dc
, rd
, cpu_dst
);
4082 case 0x007: /* VIS II edge16ln */
4083 CHECK_FPU_FEATURE(dc
, VIS2
);
4084 cpu_src1
= gen_load_gpr(dc
, rs1
);
4085 cpu_src2
= gen_load_gpr(dc
, rs2
);
4086 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4087 gen_store_gpr(dc
, rd
, cpu_dst
);
4089 case 0x008: /* VIS I edge32cc */
4090 CHECK_FPU_FEATURE(dc
, VIS1
);
4091 cpu_src1
= gen_load_gpr(dc
, rs1
);
4092 cpu_src2
= gen_load_gpr(dc
, rs2
);
4093 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4094 gen_store_gpr(dc
, rd
, cpu_dst
);
4096 case 0x009: /* VIS II edge32n */
4097 CHECK_FPU_FEATURE(dc
, VIS2
);
4098 cpu_src1
= gen_load_gpr(dc
, rs1
);
4099 cpu_src2
= gen_load_gpr(dc
, rs2
);
4100 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4101 gen_store_gpr(dc
, rd
, cpu_dst
);
4103 case 0x00a: /* VIS I edge32lcc */
4104 CHECK_FPU_FEATURE(dc
, VIS1
);
4105 cpu_src1
= gen_load_gpr(dc
, rs1
);
4106 cpu_src2
= gen_load_gpr(dc
, rs2
);
4107 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4108 gen_store_gpr(dc
, rd
, cpu_dst
);
4110 case 0x00b: /* VIS II edge32ln */
4111 CHECK_FPU_FEATURE(dc
, VIS2
);
4112 cpu_src1
= gen_load_gpr(dc
, rs1
);
4113 cpu_src2
= gen_load_gpr(dc
, rs2
);
4114 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4115 gen_store_gpr(dc
, rd
, cpu_dst
);
4117 case 0x010: /* VIS I array8 */
4118 CHECK_FPU_FEATURE(dc
, VIS1
);
4119 cpu_src1
= gen_load_gpr(dc
, rs1
);
4120 cpu_src2
= gen_load_gpr(dc
, rs2
);
4121 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4122 gen_store_gpr(dc
, rd
, cpu_dst
);
4124 case 0x012: /* VIS I array16 */
4125 CHECK_FPU_FEATURE(dc
, VIS1
);
4126 cpu_src1
= gen_load_gpr(dc
, rs1
);
4127 cpu_src2
= gen_load_gpr(dc
, rs2
);
4128 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4129 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4130 gen_store_gpr(dc
, rd
, cpu_dst
);
4132 case 0x014: /* VIS I array32 */
4133 CHECK_FPU_FEATURE(dc
, VIS1
);
4134 cpu_src1
= gen_load_gpr(dc
, rs1
);
4135 cpu_src2
= gen_load_gpr(dc
, rs2
);
4136 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4137 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4138 gen_store_gpr(dc
, rd
, cpu_dst
);
4140 case 0x018: /* VIS I alignaddr */
4141 CHECK_FPU_FEATURE(dc
, VIS1
);
4142 cpu_src1
= gen_load_gpr(dc
, rs1
);
4143 cpu_src2
= gen_load_gpr(dc
, rs2
);
4144 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4145 gen_store_gpr(dc
, rd
, cpu_dst
);
4147 case 0x01a: /* VIS I alignaddrl */
4148 CHECK_FPU_FEATURE(dc
, VIS1
);
4149 cpu_src1
= gen_load_gpr(dc
, rs1
);
4150 cpu_src2
= gen_load_gpr(dc
, rs2
);
4151 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4152 gen_store_gpr(dc
, rd
, cpu_dst
);
4154 case 0x019: /* VIS II bmask */
4155 CHECK_FPU_FEATURE(dc
, VIS2
);
4156 cpu_src1
= gen_load_gpr(dc
, rs1
);
4157 cpu_src2
= gen_load_gpr(dc
, rs2
);
4158 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4159 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4160 gen_store_gpr(dc
, rd
, cpu_dst
);
4162 case 0x020: /* VIS I fcmple16 */
4163 CHECK_FPU_FEATURE(dc
, VIS1
);
4164 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4165 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4166 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4167 gen_store_gpr(dc
, rd
, cpu_dst
);
4169 case 0x022: /* VIS I fcmpne16 */
4170 CHECK_FPU_FEATURE(dc
, VIS1
);
4171 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4172 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4173 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4174 gen_store_gpr(dc
, rd
, cpu_dst
);
4176 case 0x024: /* VIS I fcmple32 */
4177 CHECK_FPU_FEATURE(dc
, VIS1
);
4178 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4179 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4180 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4181 gen_store_gpr(dc
, rd
, cpu_dst
);
4183 case 0x026: /* VIS I fcmpne32 */
4184 CHECK_FPU_FEATURE(dc
, VIS1
);
4185 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4186 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4187 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4188 gen_store_gpr(dc
, rd
, cpu_dst
);
4190 case 0x028: /* VIS I fcmpgt16 */
4191 CHECK_FPU_FEATURE(dc
, VIS1
);
4192 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4193 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4194 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4195 gen_store_gpr(dc
, rd
, cpu_dst
);
4197 case 0x02a: /* VIS I fcmpeq16 */
4198 CHECK_FPU_FEATURE(dc
, VIS1
);
4199 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4200 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4201 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4202 gen_store_gpr(dc
, rd
, cpu_dst
);
4204 case 0x02c: /* VIS I fcmpgt32 */
4205 CHECK_FPU_FEATURE(dc
, VIS1
);
4206 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4207 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4208 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4209 gen_store_gpr(dc
, rd
, cpu_dst
);
4211 case 0x02e: /* VIS I fcmpeq32 */
4212 CHECK_FPU_FEATURE(dc
, VIS1
);
4213 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4214 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4215 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4216 gen_store_gpr(dc
, rd
, cpu_dst
);
4218 case 0x031: /* VIS I fmul8x16 */
4219 CHECK_FPU_FEATURE(dc
, VIS1
);
4220 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4222 case 0x033: /* VIS I fmul8x16au */
4223 CHECK_FPU_FEATURE(dc
, VIS1
);
4224 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4226 case 0x035: /* VIS I fmul8x16al */
4227 CHECK_FPU_FEATURE(dc
, VIS1
);
4228 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4230 case 0x036: /* VIS I fmul8sux16 */
4231 CHECK_FPU_FEATURE(dc
, VIS1
);
4232 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4234 case 0x037: /* VIS I fmul8ulx16 */
4235 CHECK_FPU_FEATURE(dc
, VIS1
);
4236 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4238 case 0x038: /* VIS I fmuld8sux16 */
4239 CHECK_FPU_FEATURE(dc
, VIS1
);
4240 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4242 case 0x039: /* VIS I fmuld8ulx16 */
4243 CHECK_FPU_FEATURE(dc
, VIS1
);
4244 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4246 case 0x03a: /* VIS I fpack32 */
4247 CHECK_FPU_FEATURE(dc
, VIS1
);
4248 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4250 case 0x03b: /* VIS I fpack16 */
4251 CHECK_FPU_FEATURE(dc
, VIS1
);
4252 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4253 cpu_dst_32
= gen_dest_fpr_F(dc
);
4254 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4255 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4257 case 0x03d: /* VIS I fpackfix */
4258 CHECK_FPU_FEATURE(dc
, VIS1
);
4259 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4260 cpu_dst_32
= gen_dest_fpr_F(dc
);
4261 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4262 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4264 case 0x03e: /* VIS I pdist */
4265 CHECK_FPU_FEATURE(dc
, VIS1
);
4266 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4268 case 0x048: /* VIS I faligndata */
4269 CHECK_FPU_FEATURE(dc
, VIS1
);
4270 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4272 case 0x04b: /* VIS I fpmerge */
4273 CHECK_FPU_FEATURE(dc
, VIS1
);
4274 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4276 case 0x04c: /* VIS II bshuffle */
4277 CHECK_FPU_FEATURE(dc
, VIS2
);
4278 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4280 case 0x04d: /* VIS I fexpand */
4281 CHECK_FPU_FEATURE(dc
, VIS1
);
4282 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4284 case 0x050: /* VIS I fpadd16 */
4285 CHECK_FPU_FEATURE(dc
, VIS1
);
4286 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4288 case 0x051: /* VIS I fpadd16s */
4289 CHECK_FPU_FEATURE(dc
, VIS1
);
4290 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4292 case 0x052: /* VIS I fpadd32 */
4293 CHECK_FPU_FEATURE(dc
, VIS1
);
4294 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4296 case 0x053: /* VIS I fpadd32s */
4297 CHECK_FPU_FEATURE(dc
, VIS1
);
4298 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4300 case 0x054: /* VIS I fpsub16 */
4301 CHECK_FPU_FEATURE(dc
, VIS1
);
4302 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4304 case 0x055: /* VIS I fpsub16s */
4305 CHECK_FPU_FEATURE(dc
, VIS1
);
4306 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4308 case 0x056: /* VIS I fpsub32 */
4309 CHECK_FPU_FEATURE(dc
, VIS1
);
4310 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4312 case 0x057: /* VIS I fpsub32s */
4313 CHECK_FPU_FEATURE(dc
, VIS1
);
4314 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4316 case 0x060: /* VIS I fzero */
4317 CHECK_FPU_FEATURE(dc
, VIS1
);
4318 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4319 tcg_gen_movi_i64(cpu_dst_64
, 0);
4320 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4322 case 0x061: /* VIS I fzeros */
4323 CHECK_FPU_FEATURE(dc
, VIS1
);
4324 cpu_dst_32
= gen_dest_fpr_F(dc
);
4325 tcg_gen_movi_i32(cpu_dst_32
, 0);
4326 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4328 case 0x062: /* VIS I fnor */
4329 CHECK_FPU_FEATURE(dc
, VIS1
);
4330 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4332 case 0x063: /* VIS I fnors */
4333 CHECK_FPU_FEATURE(dc
, VIS1
);
4334 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4336 case 0x064: /* VIS I fandnot2 */
4337 CHECK_FPU_FEATURE(dc
, VIS1
);
4338 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4340 case 0x065: /* VIS I fandnot2s */
4341 CHECK_FPU_FEATURE(dc
, VIS1
);
4342 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4344 case 0x066: /* VIS I fnot2 */
4345 CHECK_FPU_FEATURE(dc
, VIS1
);
4346 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4348 case 0x067: /* VIS I fnot2s */
4349 CHECK_FPU_FEATURE(dc
, VIS1
);
4350 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4352 case 0x068: /* VIS I fandnot1 */
4353 CHECK_FPU_FEATURE(dc
, VIS1
);
4354 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4356 case 0x069: /* VIS I fandnot1s */
4357 CHECK_FPU_FEATURE(dc
, VIS1
);
4358 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4360 case 0x06a: /* VIS I fnot1 */
4361 CHECK_FPU_FEATURE(dc
, VIS1
);
4362 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4364 case 0x06b: /* VIS I fnot1s */
4365 CHECK_FPU_FEATURE(dc
, VIS1
);
4366 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4368 case 0x06c: /* VIS I fxor */
4369 CHECK_FPU_FEATURE(dc
, VIS1
);
4370 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4372 case 0x06d: /* VIS I fxors */
4373 CHECK_FPU_FEATURE(dc
, VIS1
);
4374 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4376 case 0x06e: /* VIS I fnand */
4377 CHECK_FPU_FEATURE(dc
, VIS1
);
4378 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4380 case 0x06f: /* VIS I fnands */
4381 CHECK_FPU_FEATURE(dc
, VIS1
);
4382 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4384 case 0x070: /* VIS I fand */
4385 CHECK_FPU_FEATURE(dc
, VIS1
);
4386 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4388 case 0x071: /* VIS I fands */
4389 CHECK_FPU_FEATURE(dc
, VIS1
);
4390 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4392 case 0x072: /* VIS I fxnor */
4393 CHECK_FPU_FEATURE(dc
, VIS1
);
4394 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4396 case 0x073: /* VIS I fxnors */
4397 CHECK_FPU_FEATURE(dc
, VIS1
);
4398 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4400 case 0x074: /* VIS I fsrc1 */
4401 CHECK_FPU_FEATURE(dc
, VIS1
);
4402 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4403 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4405 case 0x075: /* VIS I fsrc1s */
4406 CHECK_FPU_FEATURE(dc
, VIS1
);
4407 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4408 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4410 case 0x076: /* VIS I fornot2 */
4411 CHECK_FPU_FEATURE(dc
, VIS1
);
4412 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4414 case 0x077: /* VIS I fornot2s */
4415 CHECK_FPU_FEATURE(dc
, VIS1
);
4416 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4418 case 0x078: /* VIS I fsrc2 */
4419 CHECK_FPU_FEATURE(dc
, VIS1
);
4420 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4421 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4423 case 0x079: /* VIS I fsrc2s */
4424 CHECK_FPU_FEATURE(dc
, VIS1
);
4425 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4426 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4428 case 0x07a: /* VIS I fornot1 */
4429 CHECK_FPU_FEATURE(dc
, VIS1
);
4430 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4432 case 0x07b: /* VIS I fornot1s */
4433 CHECK_FPU_FEATURE(dc
, VIS1
);
4434 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4436 case 0x07c: /* VIS I for */
4437 CHECK_FPU_FEATURE(dc
, VIS1
);
4438 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4440 case 0x07d: /* VIS I fors */
4441 CHECK_FPU_FEATURE(dc
, VIS1
);
4442 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4444 case 0x07e: /* VIS I fone */
4445 CHECK_FPU_FEATURE(dc
, VIS1
);
4446 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4447 tcg_gen_movi_i64(cpu_dst_64
, -1);
4448 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4450 case 0x07f: /* VIS I fones */
4451 CHECK_FPU_FEATURE(dc
, VIS1
);
4452 cpu_dst_32
= gen_dest_fpr_F(dc
);
4453 tcg_gen_movi_i32(cpu_dst_32
, -1);
4454 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4456 case 0x080: /* VIS I shutdown */
4457 case 0x081: /* VIS II siam */
4466 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
4467 #ifdef TARGET_SPARC64
4472 #ifdef TARGET_SPARC64
4473 } else if (xop
== 0x39) { /* V9 return */
4477 cpu_src1
= get_src1(dc
, insn
);
4478 cpu_tmp0
= get_temp_tl(dc
);
4479 if (IS_IMM
) { /* immediate */
4480 simm
= GET_FIELDs(insn
, 19, 31);
4481 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4482 } else { /* register */
4483 rs2
= GET_FIELD(insn
, 27, 31);
4485 cpu_src2
= gen_load_gpr(dc
, rs2
);
4486 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4488 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4491 gen_helper_restore(cpu_env
);
4493 r_const
= tcg_const_i32(3);
4494 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4495 tcg_temp_free_i32(r_const
);
4496 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4497 dc
->npc
= DYNAMIC_PC
;
4501 cpu_src1
= get_src1(dc
, insn
);
4502 cpu_tmp0
= get_temp_tl(dc
);
4503 if (IS_IMM
) { /* immediate */
4504 simm
= GET_FIELDs(insn
, 19, 31);
4505 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4506 } else { /* register */
4507 rs2
= GET_FIELD(insn
, 27, 31);
4509 cpu_src2
= gen_load_gpr(dc
, rs2
);
4510 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4512 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4516 case 0x38: /* jmpl */
4521 t
= gen_dest_gpr(dc
, rd
);
4522 tcg_gen_movi_tl(t
, dc
->pc
);
4523 gen_store_gpr(dc
, rd
, t
);
4525 r_const
= tcg_const_i32(3);
4526 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4527 tcg_temp_free_i32(r_const
);
4528 gen_address_mask(dc
, cpu_tmp0
);
4529 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4530 dc
->npc
= DYNAMIC_PC
;
4533 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4534 case 0x39: /* rett, V9 return */
4538 if (!supervisor(dc
))
4541 r_const
= tcg_const_i32(3);
4542 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4543 tcg_temp_free_i32(r_const
);
4544 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4545 dc
->npc
= DYNAMIC_PC
;
4546 gen_helper_rett(cpu_env
);
4550 case 0x3b: /* flush */
4551 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
4555 case 0x3c: /* save */
4557 gen_helper_save(cpu_env
);
4558 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4560 case 0x3d: /* restore */
4562 gen_helper_restore(cpu_env
);
4563 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4565 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4566 case 0x3e: /* V9 done/retry */
4570 if (!supervisor(dc
))
4572 dc
->npc
= DYNAMIC_PC
;
4573 dc
->pc
= DYNAMIC_PC
;
4574 gen_helper_done(cpu_env
);
4577 if (!supervisor(dc
))
4579 dc
->npc
= DYNAMIC_PC
;
4580 dc
->pc
= DYNAMIC_PC
;
4581 gen_helper_retry(cpu_env
);
4596 case 3: /* load/store instructions */
4598 unsigned int xop
= GET_FIELD(insn
, 7, 12);
4599 /* ??? gen_address_mask prevents us from using a source
4600 register directly. Always generate a temporary. */
4601 TCGv cpu_addr
= get_temp_tl(dc
);
4603 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
4604 if (xop
== 0x3c || xop
== 0x3e) {
4605 /* V9 casa/casxa : no offset */
4606 } else if (IS_IMM
) { /* immediate */
4607 simm
= GET_FIELDs(insn
, 19, 31);
4609 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
4611 } else { /* register */
4612 rs2
= GET_FIELD(insn
, 27, 31);
4614 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
4617 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
4618 (xop
> 0x17 && xop
<= 0x1d ) ||
4619 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
4620 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
4623 case 0x0: /* ld, V9 lduw, load unsigned word */
4624 gen_address_mask(dc
, cpu_addr
);
4625 tcg_gen_qemu_ld32u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4627 case 0x1: /* ldub, load unsigned byte */
4628 gen_address_mask(dc
, cpu_addr
);
4629 tcg_gen_qemu_ld8u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4631 case 0x2: /* lduh, load unsigned halfword */
4632 gen_address_mask(dc
, cpu_addr
);
4633 tcg_gen_qemu_ld16u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4635 case 0x3: /* ldd, load double word */
4643 r_const
= tcg_const_i32(7);
4644 /* XXX remove alignment check */
4645 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4646 tcg_temp_free_i32(r_const
);
4647 gen_address_mask(dc
, cpu_addr
);
4648 t64
= tcg_temp_new_i64();
4649 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4650 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4651 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4652 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
4653 tcg_gen_shri_i64(t64
, t64
, 32);
4654 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4655 tcg_temp_free_i64(t64
);
4656 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4659 case 0x9: /* ldsb, load signed byte */
4660 gen_address_mask(dc
, cpu_addr
);
4661 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4663 case 0xa: /* ldsh, load signed halfword */
4664 gen_address_mask(dc
, cpu_addr
);
4665 tcg_gen_qemu_ld16s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4667 case 0xd: /* ldstub -- XXX: should be atomically */
4671 gen_address_mask(dc
, cpu_addr
);
4672 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4673 r_const
= tcg_const_tl(0xff);
4674 tcg_gen_qemu_st8(r_const
, cpu_addr
, dc
->mem_idx
);
4675 tcg_temp_free(r_const
);
4679 /* swap, swap register with memory. Also atomically */
4681 TCGv t0
= get_temp_tl(dc
);
4682 CHECK_IU_FEATURE(dc
, SWAP
);
4683 cpu_src1
= gen_load_gpr(dc
, rd
);
4684 gen_address_mask(dc
, cpu_addr
);
4685 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4686 tcg_gen_qemu_st32(cpu_src1
, cpu_addr
, dc
->mem_idx
);
4687 tcg_gen_mov_tl(cpu_val
, t0
);
4690 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4691 case 0x10: /* lda, V9 lduwa, load word alternate */
4692 #ifndef TARGET_SPARC64
4695 if (!supervisor(dc
))
4699 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 0);
4701 case 0x11: /* lduba, load unsigned byte alternate */
4702 #ifndef TARGET_SPARC64
4705 if (!supervisor(dc
))
4709 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 0);
4711 case 0x12: /* lduha, load unsigned halfword alternate */
4712 #ifndef TARGET_SPARC64
4715 if (!supervisor(dc
))
4719 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 0);
4721 case 0x13: /* ldda, load double word alternate */
4722 #ifndef TARGET_SPARC64
4725 if (!supervisor(dc
))
4731 gen_ldda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4733 case 0x19: /* ldsba, load signed byte alternate */
4734 #ifndef TARGET_SPARC64
4737 if (!supervisor(dc
))
4741 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 1);
4743 case 0x1a: /* ldsha, load signed halfword alternate */
4744 #ifndef TARGET_SPARC64
4747 if (!supervisor(dc
))
4751 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 1);
4753 case 0x1d: /* ldstuba -- XXX: should be atomically */
4754 #ifndef TARGET_SPARC64
4757 if (!supervisor(dc
))
4761 gen_ldstub_asi(cpu_val
, cpu_addr
, insn
);
4763 case 0x1f: /* swapa, swap reg with alt. memory. Also
4765 CHECK_IU_FEATURE(dc
, SWAP
);
4766 #ifndef TARGET_SPARC64
4769 if (!supervisor(dc
))
4773 cpu_src1
= gen_load_gpr(dc
, rd
);
4774 gen_swap_asi(cpu_val
, cpu_src1
, cpu_addr
, insn
);
4777 #ifndef TARGET_SPARC64
4778 case 0x30: /* ldc */
4779 case 0x31: /* ldcsr */
4780 case 0x33: /* lddc */
4784 #ifdef TARGET_SPARC64
4785 case 0x08: /* V9 ldsw */
4786 gen_address_mask(dc
, cpu_addr
);
4787 tcg_gen_qemu_ld32s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4789 case 0x0b: /* V9 ldx */
4790 gen_address_mask(dc
, cpu_addr
);
4791 tcg_gen_qemu_ld64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4793 case 0x18: /* V9 ldswa */
4795 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 1);
4797 case 0x1b: /* V9 ldxa */
4799 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 8, 0);
4801 case 0x2d: /* V9 prefetch, no effect */
4803 case 0x30: /* V9 ldfa */
4804 if (gen_trap_ifnofpu(dc
)) {
4808 gen_ldf_asi(cpu_addr
, insn
, 4, rd
);
4809 gen_update_fprs_dirty(rd
);
4811 case 0x33: /* V9 lddfa */
4812 if (gen_trap_ifnofpu(dc
)) {
4816 gen_ldf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
4817 gen_update_fprs_dirty(DFPREG(rd
));
4819 case 0x3d: /* V9 prefetcha, no effect */
4821 case 0x32: /* V9 ldqfa */
4822 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4823 if (gen_trap_ifnofpu(dc
)) {
4827 gen_ldf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
4828 gen_update_fprs_dirty(QFPREG(rd
));
4834 gen_store_gpr(dc
, rd
, cpu_val
);
4835 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4838 } else if (xop
>= 0x20 && xop
< 0x24) {
4841 if (gen_trap_ifnofpu(dc
)) {
4846 case 0x20: /* ldf, load fpreg */
4847 gen_address_mask(dc
, cpu_addr
);
4848 t0
= get_temp_tl(dc
);
4849 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4850 cpu_dst_32
= gen_dest_fpr_F(dc
);
4851 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4852 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4854 case 0x21: /* ldfsr, V9 ldxfsr */
4855 #ifdef TARGET_SPARC64
4856 gen_address_mask(dc
, cpu_addr
);
4858 TCGv_i64 t64
= tcg_temp_new_i64();
4859 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4860 gen_helper_ldxfsr(cpu_env
, t64
);
4861 tcg_temp_free_i64(t64
);
4865 cpu_dst_32
= get_temp_i32(dc
);
4866 t0
= get_temp_tl(dc
);
4867 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4868 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4869 gen_helper_ldfsr(cpu_env
, cpu_dst_32
);
4871 case 0x22: /* ldqf, load quad fpreg */
4875 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4876 r_const
= tcg_const_i32(dc
->mem_idx
);
4877 gen_address_mask(dc
, cpu_addr
);
4878 gen_helper_ldqf(cpu_env
, cpu_addr
, r_const
);
4879 tcg_temp_free_i32(r_const
);
4880 gen_op_store_QT0_fpr(QFPREG(rd
));
4881 gen_update_fprs_dirty(QFPREG(rd
));
4884 case 0x23: /* lddf, load double fpreg */
4885 gen_address_mask(dc
, cpu_addr
);
4886 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4887 tcg_gen_qemu_ld64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
);
4888 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4893 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
4894 xop
== 0xe || xop
== 0x1e) {
4895 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
4898 case 0x4: /* st, store word */
4899 gen_address_mask(dc
, cpu_addr
);
4900 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
4902 case 0x5: /* stb, store byte */
4903 gen_address_mask(dc
, cpu_addr
);
4904 tcg_gen_qemu_st8(cpu_val
, cpu_addr
, dc
->mem_idx
);
4906 case 0x6: /* sth, store halfword */
4907 gen_address_mask(dc
, cpu_addr
);
4908 tcg_gen_qemu_st16(cpu_val
, cpu_addr
, dc
->mem_idx
);
4910 case 0x7: /* std, store double word */
4919 gen_address_mask(dc
, cpu_addr
);
4920 r_const
= tcg_const_i32(7);
4921 /* XXX remove alignment check */
4922 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4923 tcg_temp_free_i32(r_const
);
4924 lo
= gen_load_gpr(dc
, rd
+ 1);
4926 t64
= tcg_temp_new_i64();
4927 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
4928 tcg_gen_qemu_st64(t64
, cpu_addr
, dc
->mem_idx
);
4929 tcg_temp_free_i64(t64
);
4932 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4933 case 0x14: /* sta, V9 stwa, store word alternate */
4934 #ifndef TARGET_SPARC64
4937 if (!supervisor(dc
))
4941 gen_st_asi(cpu_val
, cpu_addr
, insn
, 4);
4942 dc
->npc
= DYNAMIC_PC
;
4944 case 0x15: /* stba, store byte alternate */
4945 #ifndef TARGET_SPARC64
4948 if (!supervisor(dc
))
4952 gen_st_asi(cpu_val
, cpu_addr
, insn
, 1);
4953 dc
->npc
= DYNAMIC_PC
;
4955 case 0x16: /* stha, store halfword alternate */
4956 #ifndef TARGET_SPARC64
4959 if (!supervisor(dc
))
4963 gen_st_asi(cpu_val
, cpu_addr
, insn
, 2);
4964 dc
->npc
= DYNAMIC_PC
;
4966 case 0x17: /* stda, store double word alternate */
4967 #ifndef TARGET_SPARC64
4970 if (!supervisor(dc
))
4977 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4981 #ifdef TARGET_SPARC64
4982 case 0x0e: /* V9 stx */
4983 gen_address_mask(dc
, cpu_addr
);
4984 tcg_gen_qemu_st64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4986 case 0x1e: /* V9 stxa */
4988 gen_st_asi(cpu_val
, cpu_addr
, insn
, 8);
4989 dc
->npc
= DYNAMIC_PC
;
4995 } else if (xop
> 0x23 && xop
< 0x28) {
4996 if (gen_trap_ifnofpu(dc
)) {
5001 case 0x24: /* stf, store fpreg */
5003 TCGv t
= get_temp_tl(dc
);
5004 gen_address_mask(dc
, cpu_addr
);
5005 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5006 tcg_gen_ext_i32_tl(t
, cpu_src1_32
);
5007 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5010 case 0x25: /* stfsr, V9 stxfsr */
5012 TCGv t
= get_temp_tl(dc
);
5014 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUSPARCState
, fsr
));
5015 #ifdef TARGET_SPARC64
5016 gen_address_mask(dc
, cpu_addr
);
5018 tcg_gen_qemu_st64(t
, cpu_addr
, dc
->mem_idx
);
5022 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5026 #ifdef TARGET_SPARC64
5027 /* V9 stqf, store quad fpreg */
5031 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5032 gen_op_load_fpr_QT0(QFPREG(rd
));
5033 r_const
= tcg_const_i32(dc
->mem_idx
);
5034 gen_address_mask(dc
, cpu_addr
);
5035 gen_helper_stqf(cpu_env
, cpu_addr
, r_const
);
5036 tcg_temp_free_i32(r_const
);
5039 #else /* !TARGET_SPARC64 */
5040 /* stdfq, store floating point queue */
5041 #if defined(CONFIG_USER_ONLY)
5044 if (!supervisor(dc
))
5046 if (gen_trap_ifnofpu(dc
)) {
5052 case 0x27: /* stdf, store double fpreg */
5053 gen_address_mask(dc
, cpu_addr
);
5054 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5055 tcg_gen_qemu_st64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
);
5060 } else if (xop
> 0x33 && xop
< 0x3f) {
5063 #ifdef TARGET_SPARC64
5064 case 0x34: /* V9 stfa */
5065 if (gen_trap_ifnofpu(dc
)) {
5068 gen_stf_asi(cpu_addr
, insn
, 4, rd
);
5070 case 0x36: /* V9 stqfa */
5074 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5075 if (gen_trap_ifnofpu(dc
)) {
5078 r_const
= tcg_const_i32(7);
5079 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
5080 tcg_temp_free_i32(r_const
);
5081 gen_stf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
5084 case 0x37: /* V9 stdfa */
5085 if (gen_trap_ifnofpu(dc
)) {
5088 gen_stf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
5090 case 0x3e: /* V9 casxa */
5091 rs2
= GET_FIELD(insn
, 27, 31);
5092 cpu_src2
= gen_load_gpr(dc
, rs2
);
5093 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5096 case 0x34: /* stc */
5097 case 0x35: /* stcsr */
5098 case 0x36: /* stdcq */
5099 case 0x37: /* stdc */
5102 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5103 case 0x3c: /* V9 or LEON3 casa */
5104 #ifndef TARGET_SPARC64
5105 CHECK_IU_FEATURE(dc
, CASA
);
5109 /* LEON3 allows CASA from user space with ASI 0xa */
5110 if ((GET_FIELD(insn
, 19, 26) != 0xa) && !supervisor(dc
)) {
5114 rs2
= GET_FIELD(insn
, 27, 31);
5115 cpu_src2
= gen_load_gpr(dc
, rs2
);
5116 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5128 /* default case for non jump instructions */
5129 if (dc
->npc
== DYNAMIC_PC
) {
5130 dc
->pc
= DYNAMIC_PC
;
5132 } else if (dc
->npc
== JUMP_PC
) {
5133 /* we can do a static jump */
5134 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5138 dc
->npc
= dc
->npc
+ 4;
5147 r_const
= tcg_const_i32(TT_ILL_INSN
);
5148 gen_helper_raise_exception(cpu_env
, r_const
);
5149 tcg_temp_free_i32(r_const
);
5158 r_const
= tcg_const_i32(TT_UNIMP_FLUSH
);
5159 gen_helper_raise_exception(cpu_env
, r_const
);
5160 tcg_temp_free_i32(r_const
);
5164 #if !defined(CONFIG_USER_ONLY)
5170 r_const
= tcg_const_i32(TT_PRIV_INSN
);
5171 gen_helper_raise_exception(cpu_env
, r_const
);
5172 tcg_temp_free_i32(r_const
);
5179 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP
);
5182 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5185 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR
);
5189 #ifndef TARGET_SPARC64
5195 r_const
= tcg_const_i32(TT_NCP_INSN
);
5196 gen_helper_raise_exception(cpu_env
, r_const
);
5197 tcg_temp_free(r_const
);
5203 if (dc
->n_t32
!= 0) {
5205 for (i
= dc
->n_t32
- 1; i
>= 0; --i
) {
5206 tcg_temp_free_i32(dc
->t32
[i
]);
5210 if (dc
->n_ttl
!= 0) {
5212 for (i
= dc
->n_ttl
- 1; i
>= 0; --i
) {
5213 tcg_temp_free(dc
->ttl
[i
]);
5219 void gen_intermediate_code(CPUSPARCState
* env
, TranslationBlock
* tb
)
5221 SPARCCPU
*cpu
= sparc_env_get_cpu(env
);
5222 CPUState
*cs
= CPU(cpu
);
5223 target_ulong pc_start
, last_pc
;
5224 DisasContext dc1
, *dc
= &dc1
;
5229 memset(dc
, 0, sizeof(DisasContext
));
5234 dc
->npc
= (target_ulong
) tb
->cs_base
;
5235 dc
->cc_op
= CC_OP_DYNAMIC
;
5236 dc
->mem_idx
= cpu_mmu_index(env
, false);
5238 dc
->fpu_enabled
= tb_fpu_enabled(tb
->flags
);
5239 dc
->address_mask_32bit
= tb_am_enabled(tb
->flags
);
5240 dc
->singlestep
= (cs
->singlestep_enabled
|| singlestep
);
5243 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5244 if (max_insns
== 0) {
5245 max_insns
= CF_COUNT_MASK
;
5247 if (max_insns
> TCG_MAX_INSNS
) {
5248 max_insns
= TCG_MAX_INSNS
;
5253 if (dc
->npc
& JUMP_PC
) {
5254 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5255 tcg_gen_insn_start(dc
->pc
, dc
->jump_pc
[0] | JUMP_PC
);
5257 tcg_gen_insn_start(dc
->pc
, dc
->npc
);
5262 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
5263 if (dc
->pc
!= pc_start
) {
5266 gen_helper_debug(cpu_env
);
5272 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5276 insn
= cpu_ldl_code(env
, dc
->pc
);
5278 disas_sparc_insn(dc
, insn
);
5282 /* if the next PC is different, we abort now */
5283 if (dc
->pc
!= (last_pc
+ 4))
5285 /* if we reach a page boundary, we stop generation so that the
5286 PC of a TT_TFAULT exception is always in the right page */
5287 if ((dc
->pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
5289 /* if single step mode, we generate only one instruction and
5290 generate an exception */
5291 if (dc
->singlestep
) {
5294 } while (!tcg_op_buf_full() &&
5295 (dc
->pc
- pc_start
) < (TARGET_PAGE_SIZE
- 32) &&
5296 num_insns
< max_insns
);
5299 if (tb
->cflags
& CF_LAST_IO
) {
5303 if (dc
->pc
!= DYNAMIC_PC
&&
5304 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
5305 /* static PC and NPC: we can use direct chaining */
5306 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5308 if (dc
->pc
!= DYNAMIC_PC
) {
5309 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5315 gen_tb_end(tb
, num_insns
);
5317 tb
->size
= last_pc
+ 4 - pc_start
;
5318 tb
->icount
= num_insns
;
5321 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5322 qemu_log("--------------\n");
5323 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5324 log_target_disas(cs
, pc_start
, last_pc
+ 4 - pc_start
, 0);
5330 void gen_intermediate_code_init(CPUSPARCState
*env
)
5333 static const char gregnames
[32][4] = {
5334 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5335 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5336 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5337 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5339 static const char fregnames
[32][4] = {
5340 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5341 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5342 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5343 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5346 static const struct { TCGv_i32
*ptr
; int off
; const char *name
; } r32
[] = {
5347 #ifdef TARGET_SPARC64
5348 { &cpu_xcc
, offsetof(CPUSPARCState
, xcc
), "xcc" },
5349 { &cpu_asi
, offsetof(CPUSPARCState
, asi
), "asi" },
5350 { &cpu_fprs
, offsetof(CPUSPARCState
, fprs
), "fprs" },
5351 { &cpu_softint
, offsetof(CPUSPARCState
, softint
), "softint" },
5353 { &cpu_wim
, offsetof(CPUSPARCState
, wim
), "wim" },
5355 { &cpu_cc_op
, offsetof(CPUSPARCState
, cc_op
), "cc_op" },
5356 { &cpu_psr
, offsetof(CPUSPARCState
, psr
), "psr" },
5359 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5360 #ifdef TARGET_SPARC64
5361 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5362 { &cpu_tick_cmpr
, offsetof(CPUSPARCState
, tick_cmpr
), "tick_cmpr" },
5363 { &cpu_stick_cmpr
, offsetof(CPUSPARCState
, stick_cmpr
), "stick_cmpr" },
5364 { &cpu_hstick_cmpr
, offsetof(CPUSPARCState
, hstick_cmpr
),
5366 { &cpu_hintp
, offsetof(CPUSPARCState
, hintp
), "hintp" },
5367 { &cpu_htba
, offsetof(CPUSPARCState
, htba
), "htba" },
5368 { &cpu_hver
, offsetof(CPUSPARCState
, hver
), "hver" },
5369 { &cpu_ssr
, offsetof(CPUSPARCState
, ssr
), "ssr" },
5370 { &cpu_ver
, offsetof(CPUSPARCState
, version
), "ver" },
5372 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5373 { &cpu_cc_src
, offsetof(CPUSPARCState
, cc_src
), "cc_src" },
5374 { &cpu_cc_src2
, offsetof(CPUSPARCState
, cc_src2
), "cc_src2" },
5375 { &cpu_cc_dst
, offsetof(CPUSPARCState
, cc_dst
), "cc_dst" },
5376 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5377 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5378 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5379 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5380 #ifndef CONFIG_USER_ONLY
5381 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5387 /* init various static tables */
5393 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
5395 cpu_regwptr
= tcg_global_mem_new_ptr(cpu_env
,
5396 offsetof(CPUSPARCState
, regwptr
),
5399 for (i
= 0; i
< ARRAY_SIZE(r32
); ++i
) {
5400 *r32
[i
].ptr
= tcg_global_mem_new_i32(cpu_env
, r32
[i
].off
, r32
[i
].name
);
5403 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5404 *rtl
[i
].ptr
= tcg_global_mem_new(cpu_env
, rtl
[i
].off
, rtl
[i
].name
);
5407 TCGV_UNUSED(cpu_regs
[0]);
5408 for (i
= 1; i
< 8; ++i
) {
5409 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
5410 offsetof(CPUSPARCState
, gregs
[i
]),
5414 for (i
= 8; i
< 32; ++i
) {
5415 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5416 (i
- 8) * sizeof(target_ulong
),
5420 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5421 cpu_fpr
[i
] = tcg_global_mem_new_i64(cpu_env
,
5422 offsetof(CPUSPARCState
, fpr
[i
]),
5427 void restore_state_to_opc(CPUSPARCState
*env
, TranslationBlock
*tb
,
5430 target_ulong pc
= data
[0];
5431 target_ulong npc
= data
[1];
5434 if (npc
== DYNAMIC_PC
) {
5435 /* dynamic NPC: already stored */
5436 } else if (npc
& JUMP_PC
) {
5437 /* jump PC: use 'cond' and the jump targets of the translation */
5439 env
->npc
= npc
& ~3;