4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "exec/translator.h"
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
40 /* Dynamic PC, must exit to main loop. */
42 /* Dynamic PC, one of two values according to jump_pc[T2]. */
44 /* Dynamic PC, may lookup next TB. */
45 #define DYNAMIC_PC_LOOKUP 3
47 #define DISAS_EXIT DISAS_TARGET_0
49 /* global register indexes */
50 static TCGv_ptr cpu_regwptr
;
51 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
52 static TCGv_i32 cpu_cc_op
;
53 static TCGv_i32 cpu_psr
;
54 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
55 static TCGv cpu_regs
[32];
57 #ifndef CONFIG_USER_ONLY
62 static TCGv_i32 cpu_xcc
, cpu_fprs
;
64 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
65 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
69 /* Floating point registers */
70 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
72 typedef struct DisasContext
{
73 DisasContextBase base
;
74 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
75 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
76 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
79 bool address_mask_32bit
;
80 #ifndef CONFIG_USER_ONLY
87 uint32_t cc_op
; /* current CC operation */
101 // This function uses non-native bit order
102 #define GET_FIELD(X, FROM, TO) \
103 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
105 // This function uses the order in the manuals, i.e. bit 0 is 2^0
106 #define GET_FIELD_SP(X, FROM, TO) \
107 GET_FIELD(X, 31 - (TO), 31 - (FROM))
109 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
110 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
112 #ifdef TARGET_SPARC64
113 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
114 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
116 #define DFPREG(r) (r & 0x1e)
117 #define QFPREG(r) (r & 0x1c)
120 #define UA2005_HTRAP_MASK 0xff
121 #define V8_TRAP_MASK 0x7f
123 static int sign_extend(int x
, int len
)
126 return (x
<< len
) >> len
;
129 #define IS_IMM (insn & (1<<13))
131 static void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
133 #if defined(TARGET_SPARC64)
134 int bit
= (rd
< 32) ? 1 : 2;
135 /* If we know we've already set this bit within the TB,
136 we can avoid setting it again. */
137 if (!(dc
->fprs_dirty
& bit
)) {
138 dc
->fprs_dirty
|= bit
;
139 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
144 /* floating point registers moves */
145 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
147 TCGv_i32 ret
= tcg_temp_new_i32();
149 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
151 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
156 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
158 TCGv_i64 t
= tcg_temp_new_i64();
160 tcg_gen_extu_i32_i64(t
, v
);
161 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
162 (dst
& 1 ? 0 : 32), 32);
163 gen_update_fprs_dirty(dc
, dst
);
166 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
168 return tcg_temp_new_i32();
171 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
174 return cpu_fpr
[src
/ 2];
177 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
180 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
181 gen_update_fprs_dirty(dc
, dst
);
184 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
186 return cpu_fpr
[DFPREG(dst
) / 2];
189 static void gen_op_load_fpr_QT0(unsigned int src
)
191 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
192 offsetof(CPU_QuadU
, ll
.upper
));
193 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
194 offsetof(CPU_QuadU
, ll
.lower
));
197 static void gen_op_load_fpr_QT1(unsigned int src
)
199 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
200 offsetof(CPU_QuadU
, ll
.upper
));
201 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
202 offsetof(CPU_QuadU
, ll
.lower
));
205 static void gen_op_store_QT0_fpr(unsigned int dst
)
207 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
208 offsetof(CPU_QuadU
, ll
.upper
));
209 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
210 offsetof(CPU_QuadU
, ll
.lower
));
213 static void gen_store_fpr_Q(DisasContext
*dc
, unsigned int dst
,
214 TCGv_i64 v1
, TCGv_i64 v2
)
218 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v1
);
219 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2 + 1], v2
);
220 gen_update_fprs_dirty(dc
, dst
);
223 #ifdef TARGET_SPARC64
224 static TCGv_i64
gen_load_fpr_Q0(DisasContext
*dc
, unsigned int src
)
227 return cpu_fpr
[src
/ 2];
230 static TCGv_i64
gen_load_fpr_Q1(DisasContext
*dc
, unsigned int src
)
233 return cpu_fpr
[src
/ 2 + 1];
236 static void gen_move_Q(DisasContext
*dc
, unsigned int rd
, unsigned int rs
)
241 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
242 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
243 gen_update_fprs_dirty(dc
, rd
);
248 #ifdef CONFIG_USER_ONLY
249 #define supervisor(dc) 0
250 #ifdef TARGET_SPARC64
251 #define hypervisor(dc) 0
254 #ifdef TARGET_SPARC64
255 #define hypervisor(dc) (dc->hypervisor)
256 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
258 #define supervisor(dc) (dc->supervisor)
262 #ifdef TARGET_SPARC64
264 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
266 #define AM_CHECK(dc) (1)
270 static void gen_address_mask(DisasContext
*dc
, TCGv addr
)
272 #ifdef TARGET_SPARC64
274 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
278 static TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
282 return cpu_regs
[reg
];
284 TCGv t
= tcg_temp_new();
285 tcg_gen_movi_tl(t
, 0);
290 static void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
294 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
298 static TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
302 return cpu_regs
[reg
];
304 return tcg_temp_new();
308 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
310 return translator_use_goto_tb(&s
->base
, pc
) &&
311 translator_use_goto_tb(&s
->base
, npc
);
314 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
315 target_ulong pc
, target_ulong npc
)
317 if (use_goto_tb(s
, pc
, npc
)) {
318 /* jump to same page: we can use a direct jump */
319 tcg_gen_goto_tb(tb_num
);
320 tcg_gen_movi_tl(cpu_pc
, pc
);
321 tcg_gen_movi_tl(cpu_npc
, npc
);
322 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
324 /* jump to another page: we can use an indirect jump */
325 tcg_gen_movi_tl(cpu_pc
, pc
);
326 tcg_gen_movi_tl(cpu_npc
, npc
);
327 tcg_gen_lookup_and_goto_ptr();
332 static void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
334 tcg_gen_extu_i32_tl(reg
, src
);
335 tcg_gen_extract_tl(reg
, reg
, PSR_NEG_SHIFT
, 1);
338 static void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
340 tcg_gen_extu_i32_tl(reg
, src
);
341 tcg_gen_extract_tl(reg
, reg
, PSR_ZERO_SHIFT
, 1);
344 static void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
346 tcg_gen_extu_i32_tl(reg
, src
);
347 tcg_gen_extract_tl(reg
, reg
, PSR_OVF_SHIFT
, 1);
350 static void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
352 tcg_gen_extu_i32_tl(reg
, src
);
353 tcg_gen_extract_tl(reg
, reg
, PSR_CARRY_SHIFT
, 1);
356 static void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
358 tcg_gen_mov_tl(cpu_cc_src
, src1
);
359 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
360 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
361 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
364 static TCGv_i32
gen_add32_carry32(void)
366 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
368 /* Carry is computed from a previous add: (dst < src) */
369 #if TARGET_LONG_BITS == 64
370 cc_src1_32
= tcg_temp_new_i32();
371 cc_src2_32
= tcg_temp_new_i32();
372 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
373 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
375 cc_src1_32
= cpu_cc_dst
;
376 cc_src2_32
= cpu_cc_src
;
379 carry_32
= tcg_temp_new_i32();
380 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
385 static TCGv_i32
gen_sub32_carry32(void)
387 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
389 /* Carry is computed from a previous borrow: (src1 < src2) */
390 #if TARGET_LONG_BITS == 64
391 cc_src1_32
= tcg_temp_new_i32();
392 cc_src2_32
= tcg_temp_new_i32();
393 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
394 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
396 cc_src1_32
= cpu_cc_src
;
397 cc_src2_32
= cpu_cc_src2
;
400 carry_32
= tcg_temp_new_i32();
401 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
406 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
407 TCGv src2
, int update_cc
)
415 /* Carry is known to be zero. Fall back to plain ADD. */
417 gen_op_add_cc(dst
, src1
, src2
);
419 tcg_gen_add_tl(dst
, src1
, src2
);
426 if (TARGET_LONG_BITS
== 32) {
427 /* We can re-use the host's hardware carry generation by using
428 an ADD2 opcode. We discard the low part of the output.
429 Ideally we'd combine this operation with the add that
430 generated the carry in the first place. */
431 carry
= tcg_temp_new();
432 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
435 carry_32
= gen_add32_carry32();
441 carry_32
= gen_sub32_carry32();
445 /* We need external help to produce the carry. */
446 carry_32
= tcg_temp_new_i32();
447 gen_helper_compute_C_icc(carry_32
, cpu_env
);
451 #if TARGET_LONG_BITS == 64
452 carry
= tcg_temp_new();
453 tcg_gen_extu_i32_i64(carry
, carry_32
);
458 tcg_gen_add_tl(dst
, src1
, src2
);
459 tcg_gen_add_tl(dst
, dst
, carry
);
463 tcg_gen_mov_tl(cpu_cc_src
, src1
);
464 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
465 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
466 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
467 dc
->cc_op
= CC_OP_ADDX
;
471 static void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
473 tcg_gen_mov_tl(cpu_cc_src
, src1
);
474 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
475 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
476 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
479 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
480 TCGv src2
, int update_cc
)
488 /* Carry is known to be zero. Fall back to plain SUB. */
490 gen_op_sub_cc(dst
, src1
, src2
);
492 tcg_gen_sub_tl(dst
, src1
, src2
);
499 carry_32
= gen_add32_carry32();
505 if (TARGET_LONG_BITS
== 32) {
506 /* We can re-use the host's hardware carry generation by using
507 a SUB2 opcode. We discard the low part of the output.
508 Ideally we'd combine this operation with the add that
509 generated the carry in the first place. */
510 carry
= tcg_temp_new();
511 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
514 carry_32
= gen_sub32_carry32();
518 /* We need external help to produce the carry. */
519 carry_32
= tcg_temp_new_i32();
520 gen_helper_compute_C_icc(carry_32
, cpu_env
);
524 #if TARGET_LONG_BITS == 64
525 carry
= tcg_temp_new();
526 tcg_gen_extu_i32_i64(carry
, carry_32
);
531 tcg_gen_sub_tl(dst
, src1
, src2
);
532 tcg_gen_sub_tl(dst
, dst
, carry
);
536 tcg_gen_mov_tl(cpu_cc_src
, src1
);
537 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
538 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
539 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
540 dc
->cc_op
= CC_OP_SUBX
;
544 static void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
546 TCGv r_temp
, zero
, t0
;
548 r_temp
= tcg_temp_new();
555 zero
= tcg_constant_tl(0);
556 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
557 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
558 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
559 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
563 // env->y = (b2 << 31) | (env->y >> 1);
564 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
565 tcg_gen_deposit_tl(cpu_y
, t0
, cpu_cc_src
, 31, 1);
568 gen_mov_reg_N(t0
, cpu_psr
);
569 gen_mov_reg_V(r_temp
, cpu_psr
);
570 tcg_gen_xor_tl(t0
, t0
, r_temp
);
572 // T0 = (b1 << 31) | (T0 >> 1);
574 tcg_gen_shli_tl(t0
, t0
, 31);
575 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
576 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
578 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
580 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
583 static void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
585 #if TARGET_LONG_BITS == 32
587 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
589 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
592 TCGv t0
= tcg_temp_new_i64();
593 TCGv t1
= tcg_temp_new_i64();
596 tcg_gen_ext32s_i64(t0
, src1
);
597 tcg_gen_ext32s_i64(t1
, src2
);
599 tcg_gen_ext32u_i64(t0
, src1
);
600 tcg_gen_ext32u_i64(t1
, src2
);
603 tcg_gen_mul_i64(dst
, t0
, t1
);
604 tcg_gen_shri_i64(cpu_y
, dst
, 32);
608 static void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
610 /* zero-extend truncated operands before multiplication */
611 gen_op_multiply(dst
, src1
, src2
, 0);
614 static void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
616 /* sign-extend truncated operands before multiplication */
617 gen_op_multiply(dst
, src1
, src2
, 1);
621 static void gen_op_eval_ba(TCGv dst
)
623 tcg_gen_movi_tl(dst
, 1);
627 static void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
629 gen_mov_reg_Z(dst
, src
);
633 static void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
635 TCGv t0
= tcg_temp_new();
636 gen_mov_reg_N(t0
, src
);
637 gen_mov_reg_V(dst
, src
);
638 tcg_gen_xor_tl(dst
, dst
, t0
);
639 gen_mov_reg_Z(t0
, src
);
640 tcg_gen_or_tl(dst
, dst
, t0
);
644 static void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
646 TCGv t0
= tcg_temp_new();
647 gen_mov_reg_V(t0
, src
);
648 gen_mov_reg_N(dst
, src
);
649 tcg_gen_xor_tl(dst
, dst
, t0
);
653 static void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
655 TCGv t0
= tcg_temp_new();
656 gen_mov_reg_Z(t0
, src
);
657 gen_mov_reg_C(dst
, src
);
658 tcg_gen_or_tl(dst
, dst
, t0
);
662 static void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
664 gen_mov_reg_C(dst
, src
);
668 static void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
670 gen_mov_reg_V(dst
, src
);
674 static void gen_op_eval_bn(TCGv dst
)
676 tcg_gen_movi_tl(dst
, 0);
680 static void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
682 gen_mov_reg_N(dst
, src
);
686 static void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
688 gen_mov_reg_Z(dst
, src
);
689 tcg_gen_xori_tl(dst
, dst
, 0x1);
693 static void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
695 gen_op_eval_ble(dst
, src
);
696 tcg_gen_xori_tl(dst
, dst
, 0x1);
700 static void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
702 gen_op_eval_bl(dst
, src
);
703 tcg_gen_xori_tl(dst
, dst
, 0x1);
707 static void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
709 gen_op_eval_bleu(dst
, src
);
710 tcg_gen_xori_tl(dst
, dst
, 0x1);
714 static void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
716 gen_mov_reg_C(dst
, src
);
717 tcg_gen_xori_tl(dst
, dst
, 0x1);
721 static void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
723 gen_mov_reg_N(dst
, src
);
724 tcg_gen_xori_tl(dst
, dst
, 0x1);
728 static void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
730 gen_mov_reg_V(dst
, src
);
731 tcg_gen_xori_tl(dst
, dst
, 0x1);
735 FPSR bit field FCC1 | FCC0:
741 static void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
742 unsigned int fcc_offset
)
744 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
745 tcg_gen_andi_tl(reg
, reg
, 0x1);
748 static void gen_mov_reg_FCC1(TCGv reg
, TCGv src
, unsigned int fcc_offset
)
750 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
751 tcg_gen_andi_tl(reg
, reg
, 0x1);
755 static void gen_op_eval_fbne(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
757 TCGv t0
= tcg_temp_new();
758 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
759 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
760 tcg_gen_or_tl(dst
, dst
, t0
);
763 // 1 or 2: FCC0 ^ FCC1
764 static void gen_op_eval_fblg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
766 TCGv t0
= tcg_temp_new();
767 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
768 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
769 tcg_gen_xor_tl(dst
, dst
, t0
);
773 static void gen_op_eval_fbul(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
775 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
779 static void gen_op_eval_fbl(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
781 TCGv t0
= tcg_temp_new();
782 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
783 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
784 tcg_gen_andc_tl(dst
, dst
, t0
);
788 static void gen_op_eval_fbug(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
790 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
794 static void gen_op_eval_fbg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
796 TCGv t0
= tcg_temp_new();
797 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
798 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
799 tcg_gen_andc_tl(dst
, t0
, dst
);
803 static void gen_op_eval_fbu(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
805 TCGv t0
= tcg_temp_new();
806 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
807 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
808 tcg_gen_and_tl(dst
, dst
, t0
);
812 static void gen_op_eval_fbe(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
814 TCGv t0
= tcg_temp_new();
815 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
816 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
817 tcg_gen_or_tl(dst
, dst
, t0
);
818 tcg_gen_xori_tl(dst
, dst
, 0x1);
821 // 0 or 3: !(FCC0 ^ FCC1)
822 static void gen_op_eval_fbue(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
824 TCGv t0
= tcg_temp_new();
825 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
826 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
827 tcg_gen_xor_tl(dst
, dst
, t0
);
828 tcg_gen_xori_tl(dst
, dst
, 0x1);
832 static void gen_op_eval_fbge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
834 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
835 tcg_gen_xori_tl(dst
, dst
, 0x1);
838 // !1: !(FCC0 & !FCC1)
839 static void gen_op_eval_fbuge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
841 TCGv t0
= tcg_temp_new();
842 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
843 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
844 tcg_gen_andc_tl(dst
, dst
, t0
);
845 tcg_gen_xori_tl(dst
, dst
, 0x1);
849 static void gen_op_eval_fble(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
851 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
852 tcg_gen_xori_tl(dst
, dst
, 0x1);
855 // !2: !(!FCC0 & FCC1)
856 static void gen_op_eval_fbule(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
858 TCGv t0
= tcg_temp_new();
859 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
860 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
861 tcg_gen_andc_tl(dst
, t0
, dst
);
862 tcg_gen_xori_tl(dst
, dst
, 0x1);
865 // !3: !(FCC0 & FCC1)
866 static void gen_op_eval_fbo(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
868 TCGv t0
= tcg_temp_new();
869 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
870 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
871 tcg_gen_and_tl(dst
, dst
, t0
);
872 tcg_gen_xori_tl(dst
, dst
, 0x1);
875 static void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
876 target_ulong pc2
, TCGv r_cond
)
878 TCGLabel
*l1
= gen_new_label();
880 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
882 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
885 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
888 static void gen_branch_a(DisasContext
*dc
, target_ulong pc1
)
890 TCGLabel
*l1
= gen_new_label();
891 target_ulong npc
= dc
->npc
;
893 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cond
, 0, l1
);
895 gen_goto_tb(dc
, 0, npc
, pc1
);
898 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
900 dc
->base
.is_jmp
= DISAS_NORETURN
;
903 static void gen_branch_n(DisasContext
*dc
, target_ulong pc1
)
905 target_ulong npc
= dc
->npc
;
910 case DYNAMIC_PC_LOOKUP
:
911 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
912 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
913 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
,
914 cpu_cond
, tcg_constant_tl(0),
915 tcg_constant_tl(pc1
), cpu_npc
);
919 g_assert_not_reached();
923 dc
->jump_pc
[0] = pc1
;
924 dc
->jump_pc
[1] = npc
+ 4;
929 static void gen_generic_branch(DisasContext
*dc
)
931 TCGv npc0
= tcg_constant_tl(dc
->jump_pc
[0]);
932 TCGv npc1
= tcg_constant_tl(dc
->jump_pc
[1]);
933 TCGv zero
= tcg_constant_tl(0);
935 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
938 /* call this function before using the condition register as it may
939 have been set for a jump */
940 static void flush_cond(DisasContext
*dc
)
942 if (dc
->npc
== JUMP_PC
) {
943 gen_generic_branch(dc
);
944 dc
->npc
= DYNAMIC_PC_LOOKUP
;
948 static void save_npc(DisasContext
*dc
)
953 gen_generic_branch(dc
);
954 dc
->npc
= DYNAMIC_PC_LOOKUP
;
957 case DYNAMIC_PC_LOOKUP
:
960 g_assert_not_reached();
963 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
967 static void update_psr(DisasContext
*dc
)
969 if (dc
->cc_op
!= CC_OP_FLAGS
) {
970 dc
->cc_op
= CC_OP_FLAGS
;
971 gen_helper_compute_psr(cpu_env
);
975 static void save_state(DisasContext
*dc
)
977 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
981 static void gen_exception(DisasContext
*dc
, int which
)
984 gen_helper_raise_exception(cpu_env
, tcg_constant_i32(which
));
985 dc
->base
.is_jmp
= DISAS_NORETURN
;
988 static void gen_check_align(TCGv addr
, int mask
)
990 gen_helper_check_align(cpu_env
, addr
, tcg_constant_i32(mask
));
993 static void gen_mov_pc_npc(DisasContext
*dc
)
998 gen_generic_branch(dc
);
999 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1000 dc
->pc
= DYNAMIC_PC_LOOKUP
;
1003 case DYNAMIC_PC_LOOKUP
:
1004 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1008 g_assert_not_reached();
1015 static void gen_op_next_insn(void)
1017 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1018 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1021 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1024 static int subcc_cond
[16] = {
1040 -1, /* no overflow */
1043 static int logic_cond
[16] = {
1045 TCG_COND_EQ
, /* eq: Z */
1046 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1047 TCG_COND_LT
, /* lt: N ^ V -> N */
1048 TCG_COND_EQ
, /* leu: C | Z -> Z */
1049 TCG_COND_NEVER
, /* ltu: C -> 0 */
1050 TCG_COND_LT
, /* neg: N */
1051 TCG_COND_NEVER
, /* vs: V -> 0 */
1053 TCG_COND_NE
, /* ne: !Z */
1054 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1055 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1056 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1057 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1058 TCG_COND_GE
, /* pos: !N */
1059 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1065 #ifdef TARGET_SPARC64
1075 switch (dc
->cc_op
) {
1077 cmp
->cond
= logic_cond
[cond
];
1079 cmp
->is_bool
= false;
1080 cmp
->c2
= tcg_constant_tl(0);
1081 #ifdef TARGET_SPARC64
1083 cmp
->c1
= tcg_temp_new();
1084 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1088 cmp
->c1
= cpu_cc_dst
;
1095 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1096 goto do_compare_dst_0
;
1098 case 7: /* overflow */
1099 case 15: /* !overflow */
1103 cmp
->cond
= subcc_cond
[cond
];
1104 cmp
->is_bool
= false;
1105 #ifdef TARGET_SPARC64
1107 /* Note that sign-extension works for unsigned compares as
1108 long as both operands are sign-extended. */
1109 cmp
->c1
= tcg_temp_new();
1110 cmp
->c2
= tcg_temp_new();
1111 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1112 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1116 cmp
->c1
= cpu_cc_src
;
1117 cmp
->c2
= cpu_cc_src2
;
1124 gen_helper_compute_psr(cpu_env
);
1125 dc
->cc_op
= CC_OP_FLAGS
;
1129 /* We're going to generate a boolean result. */
1130 cmp
->cond
= TCG_COND_NE
;
1131 cmp
->is_bool
= true;
1132 cmp
->c1
= r_dst
= tcg_temp_new();
1133 cmp
->c2
= tcg_constant_tl(0);
1137 gen_op_eval_bn(r_dst
);
1140 gen_op_eval_be(r_dst
, r_src
);
1143 gen_op_eval_ble(r_dst
, r_src
);
1146 gen_op_eval_bl(r_dst
, r_src
);
1149 gen_op_eval_bleu(r_dst
, r_src
);
1152 gen_op_eval_bcs(r_dst
, r_src
);
1155 gen_op_eval_bneg(r_dst
, r_src
);
1158 gen_op_eval_bvs(r_dst
, r_src
);
1161 gen_op_eval_ba(r_dst
);
1164 gen_op_eval_bne(r_dst
, r_src
);
1167 gen_op_eval_bg(r_dst
, r_src
);
1170 gen_op_eval_bge(r_dst
, r_src
);
1173 gen_op_eval_bgu(r_dst
, r_src
);
1176 gen_op_eval_bcc(r_dst
, r_src
);
1179 gen_op_eval_bpos(r_dst
, r_src
);
1182 gen_op_eval_bvc(r_dst
, r_src
);
1189 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1191 unsigned int offset
;
1194 /* For now we still generate a straight boolean result. */
1195 cmp
->cond
= TCG_COND_NE
;
1196 cmp
->is_bool
= true;
1197 cmp
->c1
= r_dst
= tcg_temp_new();
1198 cmp
->c2
= tcg_constant_tl(0);
1218 gen_op_eval_bn(r_dst
);
1221 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1224 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1227 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1230 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1233 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1236 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1239 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1242 gen_op_eval_ba(r_dst
);
1245 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1248 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1251 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1254 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1257 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1260 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1263 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1268 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1272 gen_compare(&cmp
, cc
, cond
, dc
);
1274 /* The interface is to return a boolean in r_dst. */
1276 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1278 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1282 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1285 gen_fcompare(&cmp
, cc
, cond
);
1287 /* The interface is to return a boolean in r_dst. */
1289 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1291 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1295 #ifdef TARGET_SPARC64
1297 static const int gen_tcg_cond_reg
[8] = {
1308 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1310 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1311 cmp
->is_bool
= false;
1313 cmp
->c2
= tcg_constant_tl(0);
1316 static void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1319 gen_compare_reg(&cmp
, cond
, r_src
);
1321 /* The interface is to return a boolean in r_dst. */
1322 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1326 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1328 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1329 target_ulong target
= dc
->pc
+ offset
;
1331 #ifdef TARGET_SPARC64
1332 if (unlikely(AM_CHECK(dc
))) {
1333 target
&= 0xffffffffULL
;
1337 /* unconditional not taken */
1339 dc
->pc
= dc
->npc
+ 4;
1340 dc
->npc
= dc
->pc
+ 4;
1343 dc
->npc
= dc
->pc
+ 4;
1345 } else if (cond
== 0x8) {
1346 /* unconditional taken */
1349 dc
->npc
= dc
->pc
+ 4;
1353 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1357 gen_cond(cpu_cond
, cc
, cond
, dc
);
1359 gen_branch_a(dc
, target
);
1361 gen_branch_n(dc
, target
);
1366 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1368 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1369 target_ulong target
= dc
->pc
+ offset
;
1371 #ifdef TARGET_SPARC64
1372 if (unlikely(AM_CHECK(dc
))) {
1373 target
&= 0xffffffffULL
;
1377 /* unconditional not taken */
1379 dc
->pc
= dc
->npc
+ 4;
1380 dc
->npc
= dc
->pc
+ 4;
1383 dc
->npc
= dc
->pc
+ 4;
1385 } else if (cond
== 0x8) {
1386 /* unconditional taken */
1389 dc
->npc
= dc
->pc
+ 4;
1393 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1397 gen_fcond(cpu_cond
, cc
, cond
);
1399 gen_branch_a(dc
, target
);
1401 gen_branch_n(dc
, target
);
1406 #ifdef TARGET_SPARC64
1407 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1410 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1411 target_ulong target
= dc
->pc
+ offset
;
1413 if (unlikely(AM_CHECK(dc
))) {
1414 target
&= 0xffffffffULL
;
1417 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1419 gen_branch_a(dc
, target
);
1421 gen_branch_n(dc
, target
);
1425 static void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1429 gen_helper_fcmps(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1432 gen_helper_fcmps_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1435 gen_helper_fcmps_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1438 gen_helper_fcmps_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1443 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1447 gen_helper_fcmpd(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1450 gen_helper_fcmpd_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1453 gen_helper_fcmpd_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1456 gen_helper_fcmpd_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1461 static void gen_op_fcmpq(int fccno
)
1465 gen_helper_fcmpq(cpu_fsr
, cpu_env
);
1468 gen_helper_fcmpq_fcc1(cpu_fsr
, cpu_env
);
1471 gen_helper_fcmpq_fcc2(cpu_fsr
, cpu_env
);
1474 gen_helper_fcmpq_fcc3(cpu_fsr
, cpu_env
);
1479 static void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1483 gen_helper_fcmpes(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1486 gen_helper_fcmpes_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1489 gen_helper_fcmpes_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1492 gen_helper_fcmpes_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1497 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1501 gen_helper_fcmped(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1504 gen_helper_fcmped_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1507 gen_helper_fcmped_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1510 gen_helper_fcmped_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1515 static void gen_op_fcmpeq(int fccno
)
1519 gen_helper_fcmpeq(cpu_fsr
, cpu_env
);
1522 gen_helper_fcmpeq_fcc1(cpu_fsr
, cpu_env
);
1525 gen_helper_fcmpeq_fcc2(cpu_fsr
, cpu_env
);
1528 gen_helper_fcmpeq_fcc3(cpu_fsr
, cpu_env
);
1535 static void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1537 gen_helper_fcmps(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1540 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1542 gen_helper_fcmpd(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1545 static void gen_op_fcmpq(int fccno
)
1547 gen_helper_fcmpq(cpu_fsr
, cpu_env
);
1550 static void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1552 gen_helper_fcmpes(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1555 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1557 gen_helper_fcmped(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1560 static void gen_op_fcmpeq(int fccno
)
1562 gen_helper_fcmpeq(cpu_fsr
, cpu_env
);
1566 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1568 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1569 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1570 gen_exception(dc
, TT_FP_EXCP
);
1573 static int gen_trap_ifnofpu(DisasContext
*dc
)
1575 #if !defined(CONFIG_USER_ONLY)
1576 if (!dc
->fpu_enabled
) {
1577 gen_exception(dc
, TT_NFPU_INSN
);
1584 static void gen_op_clear_ieee_excp_and_FTT(void)
1586 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1589 static void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1590 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1594 src
= gen_load_fpr_F(dc
, rs
);
1595 dst
= gen_dest_fpr_F(dc
);
1597 gen(dst
, cpu_env
, src
);
1598 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1600 gen_store_fpr_F(dc
, rd
, dst
);
1603 static void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1604 void (*gen
)(TCGv_i32
, TCGv_i32
))
1608 src
= gen_load_fpr_F(dc
, rs
);
1609 dst
= gen_dest_fpr_F(dc
);
1613 gen_store_fpr_F(dc
, rd
, dst
);
1616 static void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1617 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1619 TCGv_i32 dst
, src1
, src2
;
1621 src1
= gen_load_fpr_F(dc
, rs1
);
1622 src2
= gen_load_fpr_F(dc
, rs2
);
1623 dst
= gen_dest_fpr_F(dc
);
1625 gen(dst
, cpu_env
, src1
, src2
);
1626 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1628 gen_store_fpr_F(dc
, rd
, dst
);
1631 #ifdef TARGET_SPARC64
1632 static void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1633 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1635 TCGv_i32 dst
, src1
, src2
;
1637 src1
= gen_load_fpr_F(dc
, rs1
);
1638 src2
= gen_load_fpr_F(dc
, rs2
);
1639 dst
= gen_dest_fpr_F(dc
);
1641 gen(dst
, src1
, src2
);
1643 gen_store_fpr_F(dc
, rd
, dst
);
1647 static void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1648 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1652 src
= gen_load_fpr_D(dc
, rs
);
1653 dst
= gen_dest_fpr_D(dc
, rd
);
1655 gen(dst
, cpu_env
, src
);
1656 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1658 gen_store_fpr_D(dc
, rd
, dst
);
1661 #ifdef TARGET_SPARC64
1662 static void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1663 void (*gen
)(TCGv_i64
, TCGv_i64
))
1667 src
= gen_load_fpr_D(dc
, rs
);
1668 dst
= gen_dest_fpr_D(dc
, rd
);
1672 gen_store_fpr_D(dc
, rd
, dst
);
1676 static void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1677 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1679 TCGv_i64 dst
, src1
, src2
;
1681 src1
= gen_load_fpr_D(dc
, rs1
);
1682 src2
= gen_load_fpr_D(dc
, rs2
);
1683 dst
= gen_dest_fpr_D(dc
, rd
);
1685 gen(dst
, cpu_env
, src1
, src2
);
1686 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1688 gen_store_fpr_D(dc
, rd
, dst
);
1691 #ifdef TARGET_SPARC64
1692 static void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1693 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1695 TCGv_i64 dst
, src1
, src2
;
1697 src1
= gen_load_fpr_D(dc
, rs1
);
1698 src2
= gen_load_fpr_D(dc
, rs2
);
1699 dst
= gen_dest_fpr_D(dc
, rd
);
1701 gen(dst
, src1
, src2
);
1703 gen_store_fpr_D(dc
, rd
, dst
);
1706 static void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1707 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1709 TCGv_i64 dst
, src1
, src2
;
1711 src1
= gen_load_fpr_D(dc
, rs1
);
1712 src2
= gen_load_fpr_D(dc
, rs2
);
1713 dst
= gen_dest_fpr_D(dc
, rd
);
1715 gen(dst
, cpu_gsr
, src1
, src2
);
1717 gen_store_fpr_D(dc
, rd
, dst
);
1720 static void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1721 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1723 TCGv_i64 dst
, src0
, src1
, src2
;
1725 src1
= gen_load_fpr_D(dc
, rs1
);
1726 src2
= gen_load_fpr_D(dc
, rs2
);
1727 src0
= gen_load_fpr_D(dc
, rd
);
1728 dst
= gen_dest_fpr_D(dc
, rd
);
1730 gen(dst
, src0
, src1
, src2
);
1732 gen_store_fpr_D(dc
, rd
, dst
);
1736 static void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1737 void (*gen
)(TCGv_ptr
))
1739 gen_op_load_fpr_QT1(QFPREG(rs
));
1742 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1744 gen_op_store_QT0_fpr(QFPREG(rd
));
1745 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1748 #ifdef TARGET_SPARC64
1749 static void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1750 void (*gen
)(TCGv_ptr
))
1752 gen_op_load_fpr_QT1(QFPREG(rs
));
1756 gen_op_store_QT0_fpr(QFPREG(rd
));
1757 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1761 static void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1762 void (*gen
)(TCGv_ptr
))
1764 gen_op_load_fpr_QT0(QFPREG(rs1
));
1765 gen_op_load_fpr_QT1(QFPREG(rs2
));
1768 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1770 gen_op_store_QT0_fpr(QFPREG(rd
));
1771 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1774 static void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1775 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1778 TCGv_i32 src1
, src2
;
1780 src1
= gen_load_fpr_F(dc
, rs1
);
1781 src2
= gen_load_fpr_F(dc
, rs2
);
1782 dst
= gen_dest_fpr_D(dc
, rd
);
1784 gen(dst
, cpu_env
, src1
, src2
);
1785 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1787 gen_store_fpr_D(dc
, rd
, dst
);
1790 static void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1791 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1793 TCGv_i64 src1
, src2
;
1795 src1
= gen_load_fpr_D(dc
, rs1
);
1796 src2
= gen_load_fpr_D(dc
, rs2
);
1798 gen(cpu_env
, src1
, src2
);
1799 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1801 gen_op_store_QT0_fpr(QFPREG(rd
));
1802 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1805 #ifdef TARGET_SPARC64
1806 static void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1807 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1812 src
= gen_load_fpr_F(dc
, rs
);
1813 dst
= gen_dest_fpr_D(dc
, rd
);
1815 gen(dst
, cpu_env
, src
);
1816 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1818 gen_store_fpr_D(dc
, rd
, dst
);
1822 static void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1823 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1828 src
= gen_load_fpr_F(dc
, rs
);
1829 dst
= gen_dest_fpr_D(dc
, rd
);
1831 gen(dst
, cpu_env
, src
);
1833 gen_store_fpr_D(dc
, rd
, dst
);
1836 static void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1837 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1842 src
= gen_load_fpr_D(dc
, rs
);
1843 dst
= gen_dest_fpr_F(dc
);
1845 gen(dst
, cpu_env
, src
);
1846 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1848 gen_store_fpr_F(dc
, rd
, dst
);
1851 static void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1852 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1856 gen_op_load_fpr_QT1(QFPREG(rs
));
1857 dst
= gen_dest_fpr_F(dc
);
1860 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1862 gen_store_fpr_F(dc
, rd
, dst
);
1865 static void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1866 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1870 gen_op_load_fpr_QT1(QFPREG(rs
));
1871 dst
= gen_dest_fpr_D(dc
, rd
);
1874 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1876 gen_store_fpr_D(dc
, rd
, dst
);
1879 static void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1880 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1884 src
= gen_load_fpr_F(dc
, rs
);
1888 gen_op_store_QT0_fpr(QFPREG(rd
));
1889 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1892 static void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1893 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1897 src
= gen_load_fpr_D(dc
, rs
);
1901 gen_op_store_QT0_fpr(QFPREG(rd
));
1902 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1905 static void gen_swap(DisasContext
*dc
, TCGv dst
, TCGv src
,
1906 TCGv addr
, int mmu_idx
, MemOp memop
)
1908 gen_address_mask(dc
, addr
);
1909 tcg_gen_atomic_xchg_tl(dst
, addr
, src
, mmu_idx
, memop
| MO_ALIGN
);
1912 static void gen_ldstub(DisasContext
*dc
, TCGv dst
, TCGv addr
, int mmu_idx
)
1914 TCGv m1
= tcg_constant_tl(0xff);
1915 gen_address_mask(dc
, addr
);
1916 tcg_gen_atomic_xchg_tl(dst
, addr
, m1
, mmu_idx
, MO_UB
);
1920 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
1939 static DisasASI
get_asi(DisasContext
*dc
, int insn
, MemOp memop
)
1941 int asi
= GET_FIELD(insn
, 19, 26);
1942 ASIType type
= GET_ASI_HELPER
;
1943 int mem_idx
= dc
->mem_idx
;
1945 #ifndef TARGET_SPARC64
1946 /* Before v9, all asis are immediate and privileged. */
1948 gen_exception(dc
, TT_ILL_INSN
);
1949 type
= GET_ASI_EXCP
;
1950 } else if (supervisor(dc
)
1951 /* Note that LEON accepts ASI_USERDATA in user mode, for
1952 use with CASA. Also note that previous versions of
1953 QEMU allowed (and old versions of gcc emitted) ASI_P
1954 for LEON, which is incorrect. */
1955 || (asi
== ASI_USERDATA
1956 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
1958 case ASI_USERDATA
: /* User data access */
1959 mem_idx
= MMU_USER_IDX
;
1960 type
= GET_ASI_DIRECT
;
1962 case ASI_KERNELDATA
: /* Supervisor data access */
1963 mem_idx
= MMU_KERNEL_IDX
;
1964 type
= GET_ASI_DIRECT
;
1966 case ASI_M_BYPASS
: /* MMU passthrough */
1967 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
1968 mem_idx
= MMU_PHYS_IDX
;
1969 type
= GET_ASI_DIRECT
;
1971 case ASI_M_BCOPY
: /* Block copy, sta access */
1972 mem_idx
= MMU_KERNEL_IDX
;
1973 type
= GET_ASI_BCOPY
;
1975 case ASI_M_BFILL
: /* Block fill, stda access */
1976 mem_idx
= MMU_KERNEL_IDX
;
1977 type
= GET_ASI_BFILL
;
1981 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1982 * permissions check in get_physical_address(..).
1984 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
1986 gen_exception(dc
, TT_PRIV_INSN
);
1987 type
= GET_ASI_EXCP
;
1993 /* With v9, all asis below 0x80 are privileged. */
1994 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1995 down that bit into DisasContext. For the moment that's ok,
1996 since the direct implementations below doesn't have any ASIs
1997 in the restricted [0x30, 0x7f] range, and the check will be
1998 done properly in the helper. */
1999 if (!supervisor(dc
) && asi
< 0x80) {
2000 gen_exception(dc
, TT_PRIV_ACT
);
2001 type
= GET_ASI_EXCP
;
2004 case ASI_REAL
: /* Bypass */
2005 case ASI_REAL_IO
: /* Bypass, non-cacheable */
2006 case ASI_REAL_L
: /* Bypass LE */
2007 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
2008 case ASI_TWINX_REAL
: /* Real address, twinx */
2009 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
2010 case ASI_QUAD_LDD_PHYS
:
2011 case ASI_QUAD_LDD_PHYS_L
:
2012 mem_idx
= MMU_PHYS_IDX
;
2014 case ASI_N
: /* Nucleus */
2015 case ASI_NL
: /* Nucleus LE */
2018 case ASI_NUCLEUS_QUAD_LDD
:
2019 case ASI_NUCLEUS_QUAD_LDD_L
:
2020 if (hypervisor(dc
)) {
2021 mem_idx
= MMU_PHYS_IDX
;
2023 mem_idx
= MMU_NUCLEUS_IDX
;
2026 case ASI_AIUP
: /* As if user primary */
2027 case ASI_AIUPL
: /* As if user primary LE */
2028 case ASI_TWINX_AIUP
:
2029 case ASI_TWINX_AIUP_L
:
2030 case ASI_BLK_AIUP_4V
:
2031 case ASI_BLK_AIUP_L_4V
:
2034 mem_idx
= MMU_USER_IDX
;
2036 case ASI_AIUS
: /* As if user secondary */
2037 case ASI_AIUSL
: /* As if user secondary LE */
2038 case ASI_TWINX_AIUS
:
2039 case ASI_TWINX_AIUS_L
:
2040 case ASI_BLK_AIUS_4V
:
2041 case ASI_BLK_AIUS_L_4V
:
2044 mem_idx
= MMU_USER_SECONDARY_IDX
;
2046 case ASI_S
: /* Secondary */
2047 case ASI_SL
: /* Secondary LE */
2050 case ASI_BLK_COMMIT_S
:
2057 if (mem_idx
== MMU_USER_IDX
) {
2058 mem_idx
= MMU_USER_SECONDARY_IDX
;
2059 } else if (mem_idx
== MMU_KERNEL_IDX
) {
2060 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
2063 case ASI_P
: /* Primary */
2064 case ASI_PL
: /* Primary LE */
2067 case ASI_BLK_COMMIT_P
:
2091 type
= GET_ASI_DIRECT
;
2093 case ASI_TWINX_REAL
:
2094 case ASI_TWINX_REAL_L
:
2097 case ASI_TWINX_AIUP
:
2098 case ASI_TWINX_AIUP_L
:
2099 case ASI_TWINX_AIUS
:
2100 case ASI_TWINX_AIUS_L
:
2105 case ASI_QUAD_LDD_PHYS
:
2106 case ASI_QUAD_LDD_PHYS_L
:
2107 case ASI_NUCLEUS_QUAD_LDD
:
2108 case ASI_NUCLEUS_QUAD_LDD_L
:
2109 type
= GET_ASI_DTWINX
;
2111 case ASI_BLK_COMMIT_P
:
2112 case ASI_BLK_COMMIT_S
:
2113 case ASI_BLK_AIUP_4V
:
2114 case ASI_BLK_AIUP_L_4V
:
2117 case ASI_BLK_AIUS_4V
:
2118 case ASI_BLK_AIUS_L_4V
:
2125 type
= GET_ASI_BLOCK
;
2132 type
= GET_ASI_SHORT
;
2139 type
= GET_ASI_SHORT
;
2142 /* The little-endian asis all have bit 3 set. */
2149 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
2152 static void gen_ld_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
,
2153 int insn
, MemOp memop
)
2155 DisasASI da
= get_asi(dc
, insn
, memop
);
2160 case GET_ASI_DTWINX
: /* Reserved for ldda. */
2161 gen_exception(dc
, TT_ILL_INSN
);
2163 case GET_ASI_DIRECT
:
2164 gen_address_mask(dc
, addr
);
2165 tcg_gen_qemu_ld_tl(dst
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2169 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2170 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2173 #ifdef TARGET_SPARC64
2174 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_mop
);
2177 TCGv_i64 t64
= tcg_temp_new_i64();
2178 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2179 tcg_gen_trunc_i64_tl(dst
, t64
);
2187 static void gen_st_asi(DisasContext
*dc
, TCGv src
, TCGv addr
,
2188 int insn
, MemOp memop
)
2190 DisasASI da
= get_asi(dc
, insn
, memop
);
2195 case GET_ASI_DTWINX
: /* Reserved for stda. */
2196 #ifndef TARGET_SPARC64
2197 gen_exception(dc
, TT_ILL_INSN
);
2200 if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
2201 /* Pre OpenSPARC CPUs don't have these */
2202 gen_exception(dc
, TT_ILL_INSN
);
2205 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2206 * are ST_BLKINIT_ ASIs */
2209 case GET_ASI_DIRECT
:
2210 gen_address_mask(dc
, addr
);
2211 tcg_gen_qemu_st_tl(src
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2213 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2215 /* Copy 32 bytes from the address in SRC to ADDR. */
2216 /* ??? The original qemu code suggests 4-byte alignment, dropping
2217 the low bits, but the only place I can see this used is in the
2218 Linux kernel with 32 byte alignment, which would make more sense
2219 as a cacheline-style operation. */
2221 TCGv saddr
= tcg_temp_new();
2222 TCGv daddr
= tcg_temp_new();
2223 TCGv four
= tcg_constant_tl(4);
2224 TCGv_i32 tmp
= tcg_temp_new_i32();
2227 tcg_gen_andi_tl(saddr
, src
, -4);
2228 tcg_gen_andi_tl(daddr
, addr
, -4);
2229 for (i
= 0; i
< 32; i
+= 4) {
2230 /* Since the loads and stores are paired, allow the
2231 copy to happen in the host endianness. */
2232 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
.mem_idx
, MO_UL
);
2233 tcg_gen_qemu_st_i32(tmp
, daddr
, da
.mem_idx
, MO_UL
);
2234 tcg_gen_add_tl(saddr
, saddr
, four
);
2235 tcg_gen_add_tl(daddr
, daddr
, four
);
2242 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2243 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2246 #ifdef TARGET_SPARC64
2247 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_mop
);
2250 TCGv_i64 t64
= tcg_temp_new_i64();
2251 tcg_gen_extu_tl_i64(t64
, src
);
2252 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2256 /* A write to a TLB register may alter page maps. End the TB. */
2257 dc
->npc
= DYNAMIC_PC
;
2263 static void gen_swap_asi(DisasContext
*dc
, TCGv dst
, TCGv src
,
2264 TCGv addr
, int insn
)
2266 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2271 case GET_ASI_DIRECT
:
2272 gen_swap(dc
, dst
, src
, addr
, da
.mem_idx
, da
.memop
);
2275 /* ??? Should be DAE_invalid_asi. */
2276 gen_exception(dc
, TT_DATA_ACCESS
);
2281 static void gen_cas_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2284 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2290 case GET_ASI_DIRECT
:
2291 oldv
= tcg_temp_new();
2292 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2293 da
.mem_idx
, da
.memop
| MO_ALIGN
);
2294 gen_store_gpr(dc
, rd
, oldv
);
2297 /* ??? Should be DAE_invalid_asi. */
2298 gen_exception(dc
, TT_DATA_ACCESS
);
2303 static void gen_ldstub_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
, int insn
)
2305 DisasASI da
= get_asi(dc
, insn
, MO_UB
);
2310 case GET_ASI_DIRECT
:
2311 gen_ldstub(dc
, dst
, addr
, da
.mem_idx
);
2314 /* ??? In theory, this should be raise DAE_invalid_asi.
2315 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2316 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
2317 gen_helper_exit_atomic(cpu_env
);
2319 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2320 TCGv_i32 r_mop
= tcg_constant_i32(MO_UB
);
2324 t64
= tcg_temp_new_i64();
2325 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2327 s64
= tcg_constant_i64(0xff);
2328 gen_helper_st_asi(cpu_env
, addr
, s64
, r_asi
, r_mop
);
2330 tcg_gen_trunc_i64_tl(dst
, t64
);
2333 dc
->npc
= DYNAMIC_PC
;
2340 #ifdef TARGET_SPARC64
2341 static void gen_ldf_asi(DisasContext
*dc
, TCGv addr
,
2342 int insn
, int size
, int rd
)
2344 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2352 case GET_ASI_DIRECT
:
2353 gen_address_mask(dc
, addr
);
2356 d32
= gen_dest_fpr_F(dc
);
2357 tcg_gen_qemu_ld_i32(d32
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2358 gen_store_fpr_F(dc
, rd
, d32
);
2361 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2362 da
.memop
| MO_ALIGN_4
);
2365 d64
= tcg_temp_new_i64();
2366 tcg_gen_qemu_ld_i64(d64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_4
);
2367 tcg_gen_addi_tl(addr
, addr
, 8);
2368 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
,
2369 da
.memop
| MO_ALIGN_4
);
2370 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2373 g_assert_not_reached();
2378 /* Valid for lddfa on aligned registers only. */
2379 if (size
== 8 && (rd
& 7) == 0) {
2384 gen_address_mask(dc
, addr
);
2386 /* The first operation checks required alignment. */
2387 memop
= da
.memop
| MO_ALIGN_64
;
2388 eight
= tcg_constant_tl(8);
2389 for (i
= 0; ; ++i
) {
2390 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2395 tcg_gen_add_tl(addr
, addr
, eight
);
2399 gen_exception(dc
, TT_ILL_INSN
);
2404 /* Valid for lddfa only. */
2406 gen_address_mask(dc
, addr
);
2407 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2408 da
.memop
| MO_ALIGN
);
2410 gen_exception(dc
, TT_ILL_INSN
);
2416 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2417 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
| MO_ALIGN
);
2420 /* According to the table in the UA2011 manual, the only
2421 other asis that are valid for ldfa/lddfa/ldqfa are
2422 the NO_FAULT asis. We still need a helper for these,
2423 but we can just use the integer asi helper for them. */
2426 d64
= tcg_temp_new_i64();
2427 gen_helper_ld_asi(d64
, cpu_env
, addr
, r_asi
, r_mop
);
2428 d32
= gen_dest_fpr_F(dc
);
2429 tcg_gen_extrl_i64_i32(d32
, d64
);
2430 gen_store_fpr_F(dc
, rd
, d32
);
2433 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], cpu_env
, addr
, r_asi
, r_mop
);
2436 d64
= tcg_temp_new_i64();
2437 gen_helper_ld_asi(d64
, cpu_env
, addr
, r_asi
, r_mop
);
2438 tcg_gen_addi_tl(addr
, addr
, 8);
2439 gen_helper_ld_asi(cpu_fpr
[rd
/2+1], cpu_env
, addr
, r_asi
, r_mop
);
2440 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2443 g_assert_not_reached();
2450 static void gen_stf_asi(DisasContext
*dc
, TCGv addr
,
2451 int insn
, int size
, int rd
)
2453 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2460 case GET_ASI_DIRECT
:
2461 gen_address_mask(dc
, addr
);
2464 d32
= gen_load_fpr_F(dc
, rd
);
2465 tcg_gen_qemu_st_i32(d32
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2468 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2469 da
.memop
| MO_ALIGN_4
);
2472 /* Only 4-byte alignment required. However, it is legal for the
2473 cpu to signal the alignment fault, and the OS trap handler is
2474 required to fix it up. Requiring 16-byte alignment here avoids
2475 having to probe the second page before performing the first
2477 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2478 da
.memop
| MO_ALIGN_16
);
2479 tcg_gen_addi_tl(addr
, addr
, 8);
2480 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
, da
.memop
);
2483 g_assert_not_reached();
2488 /* Valid for stdfa on aligned registers only. */
2489 if (size
== 8 && (rd
& 7) == 0) {
2494 gen_address_mask(dc
, addr
);
2496 /* The first operation checks required alignment. */
2497 memop
= da
.memop
| MO_ALIGN_64
;
2498 eight
= tcg_constant_tl(8);
2499 for (i
= 0; ; ++i
) {
2500 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2505 tcg_gen_add_tl(addr
, addr
, eight
);
2509 gen_exception(dc
, TT_ILL_INSN
);
2514 /* Valid for stdfa only. */
2516 gen_address_mask(dc
, addr
);
2517 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2518 da
.memop
| MO_ALIGN
);
2520 gen_exception(dc
, TT_ILL_INSN
);
2525 /* According to the table in the UA2011 manual, the only
2526 other asis that are valid for ldfa/lddfa/ldqfa are
2527 the PST* asis, which aren't currently handled. */
2528 gen_exception(dc
, TT_ILL_INSN
);
2533 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2535 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2536 TCGv_i64 hi
= gen_dest_gpr(dc
, rd
);
2537 TCGv_i64 lo
= gen_dest_gpr(dc
, rd
+ 1);
2543 case GET_ASI_DTWINX
:
2544 gen_address_mask(dc
, addr
);
2545 tcg_gen_qemu_ld_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2546 tcg_gen_addi_tl(addr
, addr
, 8);
2547 tcg_gen_qemu_ld_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2550 case GET_ASI_DIRECT
:
2552 TCGv_i64 tmp
= tcg_temp_new_i64();
2554 gen_address_mask(dc
, addr
);
2555 tcg_gen_qemu_ld_i64(tmp
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2557 /* Note that LE ldda acts as if each 32-bit register
2558 result is byte swapped. Having just performed one
2559 64-bit bswap, we need now to swap the writebacks. */
2560 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2561 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2563 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2569 /* ??? In theory we've handled all of the ASIs that are valid
2570 for ldda, and this should raise DAE_invalid_asi. However,
2571 real hardware allows others. This can be seen with e.g.
2572 FreeBSD 10.3 wrt ASI_IC_TAG. */
2574 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2575 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
);
2576 TCGv_i64 tmp
= tcg_temp_new_i64();
2579 gen_helper_ld_asi(tmp
, cpu_env
, addr
, r_asi
, r_mop
);
2582 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2583 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2585 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2591 gen_store_gpr(dc
, rd
, hi
);
2592 gen_store_gpr(dc
, rd
+ 1, lo
);
2595 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2598 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2599 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2605 case GET_ASI_DTWINX
:
2606 gen_address_mask(dc
, addr
);
2607 tcg_gen_qemu_st_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2608 tcg_gen_addi_tl(addr
, addr
, 8);
2609 tcg_gen_qemu_st_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2612 case GET_ASI_DIRECT
:
2614 TCGv_i64 t64
= tcg_temp_new_i64();
2616 /* Note that LE stda acts as if each 32-bit register result is
2617 byte swapped. We will perform one 64-bit LE store, so now
2618 we must swap the order of the construction. */
2619 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2620 tcg_gen_concat32_i64(t64
, lo
, hi
);
2622 tcg_gen_concat32_i64(t64
, hi
, lo
);
2624 gen_address_mask(dc
, addr
);
2625 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2630 /* ??? In theory we've handled all of the ASIs that are valid
2631 for stda, and this should raise DAE_invalid_asi. */
2633 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2634 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
);
2635 TCGv_i64 t64
= tcg_temp_new_i64();
2638 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2639 tcg_gen_concat32_i64(t64
, lo
, hi
);
2641 tcg_gen_concat32_i64(t64
, hi
, lo
);
2645 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2651 static void gen_casx_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2654 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2660 case GET_ASI_DIRECT
:
2661 oldv
= tcg_temp_new();
2662 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2663 da
.mem_idx
, da
.memop
| MO_ALIGN
);
2664 gen_store_gpr(dc
, rd
, oldv
);
2667 /* ??? Should be DAE_invalid_asi. */
2668 gen_exception(dc
, TT_DATA_ACCESS
);
2673 #elif !defined(CONFIG_USER_ONLY)
2674 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2676 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2677 whereby "rd + 1" elicits "error: array subscript is above array".
2678 Since we have already asserted that rd is even, the semantics
2680 TCGv lo
= gen_dest_gpr(dc
, rd
| 1);
2681 TCGv hi
= gen_dest_gpr(dc
, rd
);
2682 TCGv_i64 t64
= tcg_temp_new_i64();
2683 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2688 case GET_ASI_DIRECT
:
2689 gen_address_mask(dc
, addr
);
2690 tcg_gen_qemu_ld_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2694 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2695 TCGv_i32 r_mop
= tcg_constant_i32(MO_UQ
);
2698 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2703 tcg_gen_extr_i64_i32(lo
, hi
, t64
);
2704 gen_store_gpr(dc
, rd
| 1, lo
);
2705 gen_store_gpr(dc
, rd
, hi
);
2708 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2711 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2712 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2713 TCGv_i64 t64
= tcg_temp_new_i64();
2715 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2720 case GET_ASI_DIRECT
:
2721 gen_address_mask(dc
, addr
);
2722 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2725 /* Store 32 bytes of T64 to ADDR. */
2726 /* ??? The original qemu code suggests 8-byte alignment, dropping
2727 the low bits, but the only place I can see this used is in the
2728 Linux kernel with 32 byte alignment, which would make more sense
2729 as a cacheline-style operation. */
2731 TCGv d_addr
= tcg_temp_new();
2732 TCGv eight
= tcg_constant_tl(8);
2735 tcg_gen_andi_tl(d_addr
, addr
, -8);
2736 for (i
= 0; i
< 32; i
+= 8) {
2737 tcg_gen_qemu_st_i64(t64
, d_addr
, da
.mem_idx
, da
.memop
);
2738 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2744 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2745 TCGv_i32 r_mop
= tcg_constant_i32(MO_UQ
);
2748 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2755 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2757 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2758 return gen_load_gpr(dc
, rs1
);
2761 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2763 if (IS_IMM
) { /* immediate */
2764 target_long simm
= GET_FIELDs(insn
, 19, 31);
2765 TCGv t
= tcg_temp_new();
2766 tcg_gen_movi_tl(t
, simm
);
2768 } else { /* register */
2769 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2770 return gen_load_gpr(dc
, rs2
);
2774 #ifdef TARGET_SPARC64
2775 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2777 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2779 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2780 or fold the comparison down to 32 bits and use movcond_i32. Choose
2782 c32
= tcg_temp_new_i32();
2784 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2786 TCGv_i64 c64
= tcg_temp_new_i64();
2787 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2788 tcg_gen_extrl_i64_i32(c32
, c64
);
2791 s1
= gen_load_fpr_F(dc
, rs
);
2792 s2
= gen_load_fpr_F(dc
, rd
);
2793 dst
= gen_dest_fpr_F(dc
);
2794 zero
= tcg_constant_i32(0);
2796 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2798 gen_store_fpr_F(dc
, rd
, dst
);
2801 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2803 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2804 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2805 gen_load_fpr_D(dc
, rs
),
2806 gen_load_fpr_D(dc
, rd
));
2807 gen_store_fpr_D(dc
, rd
, dst
);
2810 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2812 int qd
= QFPREG(rd
);
2813 int qs
= QFPREG(rs
);
2815 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2816 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2817 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2818 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2820 gen_update_fprs_dirty(dc
, qd
);
2823 #ifndef CONFIG_USER_ONLY
2824 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_env cpu_env
)
2826 TCGv_i32 r_tl
= tcg_temp_new_i32();
2828 /* load env->tl into r_tl */
2829 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2831 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2832 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2834 /* calculate offset to current trap state from env->ts, reuse r_tl */
2835 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2836 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2838 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2840 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2841 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2842 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2847 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2848 int width
, bool cc
, bool left
)
2851 uint64_t amask
, tabl
, tabr
;
2852 int shift
, imask
, omask
;
2855 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2856 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2857 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2858 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2859 dc
->cc_op
= CC_OP_SUB
;
2862 /* Theory of operation: there are two tables, left and right (not to
2863 be confused with the left and right versions of the opcode). These
2864 are indexed by the low 3 bits of the inputs. To make things "easy",
2865 these tables are loaded into two constants, TABL and TABR below.
2866 The operation index = (input & imask) << shift calculates the index
2867 into the constant, while val = (table >> index) & omask calculates
2868 the value we're looking for. */
2875 tabl
= 0x80c0e0f0f8fcfeffULL
;
2876 tabr
= 0xff7f3f1f0f070301ULL
;
2878 tabl
= 0x0103070f1f3f7fffULL
;
2879 tabr
= 0xfffefcf8f0e0c080ULL
;
2899 tabl
= (2 << 2) | 3;
2900 tabr
= (3 << 2) | 1;
2902 tabl
= (1 << 2) | 3;
2903 tabr
= (3 << 2) | 2;
2910 lo1
= tcg_temp_new();
2911 lo2
= tcg_temp_new();
2912 tcg_gen_andi_tl(lo1
, s1
, imask
);
2913 tcg_gen_andi_tl(lo2
, s2
, imask
);
2914 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2915 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2917 tcg_gen_shr_tl(lo1
, tcg_constant_tl(tabl
), lo1
);
2918 tcg_gen_shr_tl(lo2
, tcg_constant_tl(tabr
), lo2
);
2919 tcg_gen_andi_tl(dst
, lo1
, omask
);
2920 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2924 amask
&= 0xffffffffULL
;
2926 tcg_gen_andi_tl(s1
, s1
, amask
);
2927 tcg_gen_andi_tl(s2
, s2
, amask
);
2929 /* We want to compute
2930 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2931 We've already done dst = lo1, so this reduces to
2932 dst &= (s1 == s2 ? -1 : lo2)
2937 tcg_gen_setcond_tl(TCG_COND_EQ
, lo1
, s1
, s2
);
2938 tcg_gen_neg_tl(lo1
, lo1
);
2939 tcg_gen_or_tl(lo2
, lo2
, lo1
);
2940 tcg_gen_and_tl(dst
, dst
, lo2
);
2943 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2945 TCGv tmp
= tcg_temp_new();
2947 tcg_gen_add_tl(tmp
, s1
, s2
);
2948 tcg_gen_andi_tl(dst
, tmp
, -8);
2950 tcg_gen_neg_tl(tmp
, tmp
);
2952 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2955 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2959 t1
= tcg_temp_new();
2960 t2
= tcg_temp_new();
2961 shift
= tcg_temp_new();
2963 tcg_gen_andi_tl(shift
, gsr
, 7);
2964 tcg_gen_shli_tl(shift
, shift
, 3);
2965 tcg_gen_shl_tl(t1
, s1
, shift
);
2967 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2968 shift of (up to 63) followed by a constant shift of 1. */
2969 tcg_gen_xori_tl(shift
, shift
, 63);
2970 tcg_gen_shr_tl(t2
, s2
, shift
);
2971 tcg_gen_shri_tl(t2
, t2
, 1);
2973 tcg_gen_or_tl(dst
, t1
, t2
);
2977 #define CHECK_IU_FEATURE(dc, FEATURE) \
2978 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2980 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2981 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2984 /* before an instruction, dc->pc must be static */
2985 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2987 unsigned int opc
, rs1
, rs2
, rd
;
2988 TCGv cpu_src1
, cpu_src2
;
2989 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2990 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2993 opc
= GET_FIELD(insn
, 0, 1);
2994 rd
= GET_FIELD(insn
, 2, 6);
2997 case 0: /* branches/sethi */
2999 unsigned int xop
= GET_FIELD(insn
, 7, 9);
3002 #ifdef TARGET_SPARC64
3003 case 0x1: /* V9 BPcc */
3007 target
= GET_FIELD_SP(insn
, 0, 18);
3008 target
= sign_extend(target
, 19);
3010 cc
= GET_FIELD_SP(insn
, 20, 21);
3012 do_branch(dc
, target
, insn
, 0);
3014 do_branch(dc
, target
, insn
, 1);
3019 case 0x3: /* V9 BPr */
3021 target
= GET_FIELD_SP(insn
, 0, 13) |
3022 (GET_FIELD_SP(insn
, 20, 21) << 14);
3023 target
= sign_extend(target
, 16);
3025 cpu_src1
= get_src1(dc
, insn
);
3026 do_branch_reg(dc
, target
, insn
, cpu_src1
);
3029 case 0x5: /* V9 FBPcc */
3031 int cc
= GET_FIELD_SP(insn
, 20, 21);
3032 if (gen_trap_ifnofpu(dc
)) {
3035 target
= GET_FIELD_SP(insn
, 0, 18);
3036 target
= sign_extend(target
, 19);
3038 do_fbranch(dc
, target
, insn
, cc
);
3042 case 0x7: /* CBN+x */
3047 case 0x2: /* BN+x */
3049 target
= GET_FIELD(insn
, 10, 31);
3050 target
= sign_extend(target
, 22);
3052 do_branch(dc
, target
, insn
, 0);
3055 case 0x6: /* FBN+x */
3057 if (gen_trap_ifnofpu(dc
)) {
3060 target
= GET_FIELD(insn
, 10, 31);
3061 target
= sign_extend(target
, 22);
3063 do_fbranch(dc
, target
, insn
, 0);
3066 case 0x4: /* SETHI */
3067 /* Special-case %g0 because that's the canonical nop. */
3069 uint32_t value
= GET_FIELD(insn
, 10, 31);
3070 TCGv t
= gen_dest_gpr(dc
, rd
);
3071 tcg_gen_movi_tl(t
, value
<< 10);
3072 gen_store_gpr(dc
, rd
, t
);
3075 case 0x0: /* UNIMPL */
3084 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
3085 TCGv o7
= gen_dest_gpr(dc
, 15);
3087 tcg_gen_movi_tl(o7
, dc
->pc
);
3088 gen_store_gpr(dc
, 15, o7
);
3091 #ifdef TARGET_SPARC64
3092 if (unlikely(AM_CHECK(dc
))) {
3093 target
&= 0xffffffffULL
;
3099 case 2: /* FPU & Logical Operations */
3101 unsigned int xop
= GET_FIELD(insn
, 7, 12);
3102 TCGv cpu_dst
= tcg_temp_new();
3105 if (xop
== 0x3a) { /* generate trap */
3106 int cond
= GET_FIELD(insn
, 3, 6);
3108 TCGLabel
*l1
= NULL
;
3119 /* Conditional trap. */
3121 #ifdef TARGET_SPARC64
3123 int cc
= GET_FIELD_SP(insn
, 11, 12);
3125 gen_compare(&cmp
, 0, cond
, dc
);
3126 } else if (cc
== 2) {
3127 gen_compare(&cmp
, 1, cond
, dc
);
3132 gen_compare(&cmp
, 0, cond
, dc
);
3134 l1
= gen_new_label();
3135 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
3136 cmp
.c1
, cmp
.c2
, l1
);
3139 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
3140 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
3142 /* Don't use the normal temporaries, as they may well have
3143 gone out of scope with the branch above. While we're
3144 doing that we might as well pre-truncate to 32-bit. */
3145 trap
= tcg_temp_new_i32();
3147 rs1
= GET_FIELD_SP(insn
, 14, 18);
3149 rs2
= GET_FIELD_SP(insn
, 0, 7);
3151 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
3152 /* Signal that the trap value is fully constant. */
3155 TCGv t1
= gen_load_gpr(dc
, rs1
);
3156 tcg_gen_trunc_tl_i32(trap
, t1
);
3157 tcg_gen_addi_i32(trap
, trap
, rs2
);
3161 rs2
= GET_FIELD_SP(insn
, 0, 4);
3162 t1
= gen_load_gpr(dc
, rs1
);
3163 t2
= gen_load_gpr(dc
, rs2
);
3164 tcg_gen_add_tl(t1
, t1
, t2
);
3165 tcg_gen_trunc_tl_i32(trap
, t1
);
3168 tcg_gen_andi_i32(trap
, trap
, mask
);
3169 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
3172 gen_helper_raise_exception(cpu_env
, trap
);
3175 /* An unconditional trap ends the TB. */
3176 dc
->base
.is_jmp
= DISAS_NORETURN
;
3179 /* A conditional trap falls through to the next insn. */
3183 } else if (xop
== 0x28) {
3184 rs1
= GET_FIELD(insn
, 13, 17);
3187 #ifndef TARGET_SPARC64
3188 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3189 manual, rdy on the microSPARC
3191 case 0x0f: /* stbar in the SPARCv8 manual,
3192 rdy on the microSPARC II */
3193 case 0x10 ... 0x1f: /* implementation-dependent in the
3194 SPARCv8 manual, rdy on the
3197 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
3198 TCGv t
= gen_dest_gpr(dc
, rd
);
3199 /* Read Asr17 for a Leon3 monoprocessor */
3200 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
3201 gen_store_gpr(dc
, rd
, t
);
3205 gen_store_gpr(dc
, rd
, cpu_y
);
3207 #ifdef TARGET_SPARC64
3208 case 0x2: /* V9 rdccr */
3210 gen_helper_rdccr(cpu_dst
, cpu_env
);
3211 gen_store_gpr(dc
, rd
, cpu_dst
);
3213 case 0x3: /* V9 rdasi */
3214 tcg_gen_movi_tl(cpu_dst
, dc
->asi
);
3215 gen_store_gpr(dc
, rd
, cpu_dst
);
3217 case 0x4: /* V9 rdtick */
3222 r_tickptr
= tcg_temp_new_ptr();
3223 r_const
= tcg_constant_i32(dc
->mem_idx
);
3224 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3225 offsetof(CPUSPARCState
, tick
));
3226 if (translator_io_start(&dc
->base
)) {
3227 dc
->base
.is_jmp
= DISAS_EXIT
;
3229 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
3231 gen_store_gpr(dc
, rd
, cpu_dst
);
3234 case 0x5: /* V9 rdpc */
3236 TCGv t
= gen_dest_gpr(dc
, rd
);
3237 if (unlikely(AM_CHECK(dc
))) {
3238 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
3240 tcg_gen_movi_tl(t
, dc
->pc
);
3242 gen_store_gpr(dc
, rd
, t
);
3245 case 0x6: /* V9 rdfprs */
3246 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
3247 gen_store_gpr(dc
, rd
, cpu_dst
);
3249 case 0xf: /* V9 membar */
3250 break; /* no effect */
3251 case 0x13: /* Graphics Status */
3252 if (gen_trap_ifnofpu(dc
)) {
3255 gen_store_gpr(dc
, rd
, cpu_gsr
);
3257 case 0x16: /* Softint */
3258 tcg_gen_ld32s_tl(cpu_dst
, cpu_env
,
3259 offsetof(CPUSPARCState
, softint
));
3260 gen_store_gpr(dc
, rd
, cpu_dst
);
3262 case 0x17: /* Tick compare */
3263 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
3265 case 0x18: /* System tick */
3270 r_tickptr
= tcg_temp_new_ptr();
3271 r_const
= tcg_constant_i32(dc
->mem_idx
);
3272 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3273 offsetof(CPUSPARCState
, stick
));
3274 if (translator_io_start(&dc
->base
)) {
3275 dc
->base
.is_jmp
= DISAS_EXIT
;
3277 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
3279 gen_store_gpr(dc
, rd
, cpu_dst
);
3282 case 0x19: /* System tick compare */
3283 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
3285 case 0x1a: /* UltraSPARC-T1 Strand status */
3286 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3287 * this ASR as impl. dep
3289 CHECK_IU_FEATURE(dc
, HYPV
);
3291 TCGv t
= gen_dest_gpr(dc
, rd
);
3292 tcg_gen_movi_tl(t
, 1UL);
3293 gen_store_gpr(dc
, rd
, t
);
3296 case 0x10: /* Performance Control */
3297 case 0x11: /* Performance Instrumentation Counter */
3298 case 0x12: /* Dispatch Control */
3299 case 0x14: /* Softint set, WO */
3300 case 0x15: /* Softint clear, WO */
3305 #if !defined(CONFIG_USER_ONLY)
3306 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
3307 #ifndef TARGET_SPARC64
3308 if (!supervisor(dc
)) {
3312 gen_helper_rdpsr(cpu_dst
, cpu_env
);
3314 CHECK_IU_FEATURE(dc
, HYPV
);
3315 if (!hypervisor(dc
))
3317 rs1
= GET_FIELD(insn
, 13, 17);
3320 tcg_gen_ld_i64(cpu_dst
, cpu_env
,
3321 offsetof(CPUSPARCState
, hpstate
));
3324 // gen_op_rdhtstate();
3327 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
3330 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
3333 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
3335 case 31: // hstick_cmpr
3336 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
3342 gen_store_gpr(dc
, rd
, cpu_dst
);
3344 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
3345 if (!supervisor(dc
)) {
3348 cpu_tmp0
= tcg_temp_new();
3349 #ifdef TARGET_SPARC64
3350 rs1
= GET_FIELD(insn
, 13, 17);
3356 r_tsptr
= tcg_temp_new_ptr();
3357 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3358 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3359 offsetof(trap_state
, tpc
));
3366 r_tsptr
= tcg_temp_new_ptr();
3367 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3368 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3369 offsetof(trap_state
, tnpc
));
3376 r_tsptr
= tcg_temp_new_ptr();
3377 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3378 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3379 offsetof(trap_state
, tstate
));
3384 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3386 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3387 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
3388 offsetof(trap_state
, tt
));
3396 r_tickptr
= tcg_temp_new_ptr();
3397 r_const
= tcg_constant_i32(dc
->mem_idx
);
3398 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3399 offsetof(CPUSPARCState
, tick
));
3400 if (translator_io_start(&dc
->base
)) {
3401 dc
->base
.is_jmp
= DISAS_EXIT
;
3403 gen_helper_tick_get_count(cpu_tmp0
, cpu_env
,
3404 r_tickptr
, r_const
);
3408 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
3411 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3412 offsetof(CPUSPARCState
, pstate
));
3415 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3416 offsetof(CPUSPARCState
, tl
));
3419 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3420 offsetof(CPUSPARCState
, psrpil
));
3423 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
3426 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3427 offsetof(CPUSPARCState
, cansave
));
3429 case 11: // canrestore
3430 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3431 offsetof(CPUSPARCState
, canrestore
));
3433 case 12: // cleanwin
3434 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3435 offsetof(CPUSPARCState
, cleanwin
));
3437 case 13: // otherwin
3438 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3439 offsetof(CPUSPARCState
, otherwin
));
3442 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3443 offsetof(CPUSPARCState
, wstate
));
3445 case 16: // UA2005 gl
3446 CHECK_IU_FEATURE(dc
, GL
);
3447 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3448 offsetof(CPUSPARCState
, gl
));
3450 case 26: // UA2005 strand status
3451 CHECK_IU_FEATURE(dc
, HYPV
);
3452 if (!hypervisor(dc
))
3454 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
3457 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
3464 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
3466 gen_store_gpr(dc
, rd
, cpu_tmp0
);
3469 #if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
3470 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
3471 #ifdef TARGET_SPARC64
3472 gen_helper_flushw(cpu_env
);
3474 if (!supervisor(dc
))
3476 gen_store_gpr(dc
, rd
, cpu_tbr
);
3480 } else if (xop
== 0x34) { /* FPU Operations */
3481 if (gen_trap_ifnofpu(dc
)) {
3484 gen_op_clear_ieee_excp_and_FTT();
3485 rs1
= GET_FIELD(insn
, 13, 17);
3486 rs2
= GET_FIELD(insn
, 27, 31);
3487 xop
= GET_FIELD(insn
, 18, 26);
3490 case 0x1: /* fmovs */
3491 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
3492 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
3494 case 0x5: /* fnegs */
3495 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
3497 case 0x9: /* fabss */
3498 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
3500 case 0x29: /* fsqrts */
3501 CHECK_FPU_FEATURE(dc
, FSQRT
);
3502 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
3504 case 0x2a: /* fsqrtd */
3505 CHECK_FPU_FEATURE(dc
, FSQRT
);
3506 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
3508 case 0x2b: /* fsqrtq */
3509 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3510 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
3512 case 0x41: /* fadds */
3513 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
3515 case 0x42: /* faddd */
3516 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
3518 case 0x43: /* faddq */
3519 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3520 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
3522 case 0x45: /* fsubs */
3523 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
3525 case 0x46: /* fsubd */
3526 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3528 case 0x47: /* fsubq */
3529 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3530 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3532 case 0x49: /* fmuls */
3533 CHECK_FPU_FEATURE(dc
, FMUL
);
3534 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3536 case 0x4a: /* fmuld */
3537 CHECK_FPU_FEATURE(dc
, FMUL
);
3538 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3540 case 0x4b: /* fmulq */
3541 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3542 CHECK_FPU_FEATURE(dc
, FMUL
);
3543 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3545 case 0x4d: /* fdivs */
3546 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3548 case 0x4e: /* fdivd */
3549 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3551 case 0x4f: /* fdivq */
3552 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3553 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3555 case 0x69: /* fsmuld */
3556 CHECK_FPU_FEATURE(dc
, FSMULD
);
3557 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3559 case 0x6e: /* fdmulq */
3560 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3561 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3563 case 0xc4: /* fitos */
3564 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3566 case 0xc6: /* fdtos */
3567 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3569 case 0xc7: /* fqtos */
3570 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3571 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3573 case 0xc8: /* fitod */
3574 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3576 case 0xc9: /* fstod */
3577 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3579 case 0xcb: /* fqtod */
3580 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3581 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3583 case 0xcc: /* fitoq */
3584 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3585 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3587 case 0xcd: /* fstoq */
3588 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3589 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3591 case 0xce: /* fdtoq */
3592 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3593 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3595 case 0xd1: /* fstoi */
3596 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3598 case 0xd2: /* fdtoi */
3599 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3601 case 0xd3: /* fqtoi */
3602 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3603 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3605 #ifdef TARGET_SPARC64
3606 case 0x2: /* V9 fmovd */
3607 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3608 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3610 case 0x3: /* V9 fmovq */
3611 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3612 gen_move_Q(dc
, rd
, rs2
);
3614 case 0x6: /* V9 fnegd */
3615 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3617 case 0x7: /* V9 fnegq */
3618 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3619 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3621 case 0xa: /* V9 fabsd */
3622 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3624 case 0xb: /* V9 fabsq */
3625 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3626 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3628 case 0x81: /* V9 fstox */
3629 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3631 case 0x82: /* V9 fdtox */
3632 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3634 case 0x83: /* V9 fqtox */
3635 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3636 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3638 case 0x84: /* V9 fxtos */
3639 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3641 case 0x88: /* V9 fxtod */
3642 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3644 case 0x8c: /* V9 fxtoq */
3645 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3646 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3652 } else if (xop
== 0x35) { /* FPU Operations */
3653 #ifdef TARGET_SPARC64
3656 if (gen_trap_ifnofpu(dc
)) {
3659 gen_op_clear_ieee_excp_and_FTT();
3660 rs1
= GET_FIELD(insn
, 13, 17);
3661 rs2
= GET_FIELD(insn
, 27, 31);
3662 xop
= GET_FIELD(insn
, 18, 26);
3664 #ifdef TARGET_SPARC64
3668 cond = GET_FIELD_SP(insn, 10, 12); \
3669 cpu_src1 = get_src1(dc, insn); \
3670 gen_compare_reg(&cmp, cond, cpu_src1); \
3671 gen_fmov##sz(dc, &cmp, rd, rs2); \
3674 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3677 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3680 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3681 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3688 #ifdef TARGET_SPARC64
3689 #define FMOVCC(fcc, sz) \
3692 cond = GET_FIELD_SP(insn, 14, 17); \
3693 gen_fcompare(&cmp, fcc, cond); \
3694 gen_fmov##sz(dc, &cmp, rd, rs2); \
3697 case 0x001: /* V9 fmovscc %fcc0 */
3700 case 0x002: /* V9 fmovdcc %fcc0 */
3703 case 0x003: /* V9 fmovqcc %fcc0 */
3704 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3707 case 0x041: /* V9 fmovscc %fcc1 */
3710 case 0x042: /* V9 fmovdcc %fcc1 */
3713 case 0x043: /* V9 fmovqcc %fcc1 */
3714 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3717 case 0x081: /* V9 fmovscc %fcc2 */
3720 case 0x082: /* V9 fmovdcc %fcc2 */
3723 case 0x083: /* V9 fmovqcc %fcc2 */
3724 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3727 case 0x0c1: /* V9 fmovscc %fcc3 */
3730 case 0x0c2: /* V9 fmovdcc %fcc3 */
3733 case 0x0c3: /* V9 fmovqcc %fcc3 */
3734 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3738 #define FMOVCC(xcc, sz) \
3741 cond = GET_FIELD_SP(insn, 14, 17); \
3742 gen_compare(&cmp, xcc, cond, dc); \
3743 gen_fmov##sz(dc, &cmp, rd, rs2); \
3746 case 0x101: /* V9 fmovscc %icc */
3749 case 0x102: /* V9 fmovdcc %icc */
3752 case 0x103: /* V9 fmovqcc %icc */
3753 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3756 case 0x181: /* V9 fmovscc %xcc */
3759 case 0x182: /* V9 fmovdcc %xcc */
3762 case 0x183: /* V9 fmovqcc %xcc */
3763 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3768 case 0x51: /* fcmps, V9 %fcc */
3769 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3770 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3771 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3773 case 0x52: /* fcmpd, V9 %fcc */
3774 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3775 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3776 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3778 case 0x53: /* fcmpq, V9 %fcc */
3779 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3780 gen_op_load_fpr_QT0(QFPREG(rs1
));
3781 gen_op_load_fpr_QT1(QFPREG(rs2
));
3782 gen_op_fcmpq(rd
& 3);
3784 case 0x55: /* fcmpes, V9 %fcc */
3785 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3786 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3787 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3789 case 0x56: /* fcmped, V9 %fcc */
3790 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3791 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3792 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3794 case 0x57: /* fcmpeq, V9 %fcc */
3795 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3796 gen_op_load_fpr_QT0(QFPREG(rs1
));
3797 gen_op_load_fpr_QT1(QFPREG(rs2
));
3798 gen_op_fcmpeq(rd
& 3);
3803 } else if (xop
== 0x2) {
3804 TCGv dst
= gen_dest_gpr(dc
, rd
);
3805 rs1
= GET_FIELD(insn
, 13, 17);
3807 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3808 if (IS_IMM
) { /* immediate */
3809 simm
= GET_FIELDs(insn
, 19, 31);
3810 tcg_gen_movi_tl(dst
, simm
);
3811 gen_store_gpr(dc
, rd
, dst
);
3812 } else { /* register */
3813 rs2
= GET_FIELD(insn
, 27, 31);
3815 tcg_gen_movi_tl(dst
, 0);
3816 gen_store_gpr(dc
, rd
, dst
);
3818 cpu_src2
= gen_load_gpr(dc
, rs2
);
3819 gen_store_gpr(dc
, rd
, cpu_src2
);
3823 cpu_src1
= get_src1(dc
, insn
);
3824 if (IS_IMM
) { /* immediate */
3825 simm
= GET_FIELDs(insn
, 19, 31);
3826 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
3827 gen_store_gpr(dc
, rd
, dst
);
3828 } else { /* register */
3829 rs2
= GET_FIELD(insn
, 27, 31);
3831 /* mov shortcut: or x, %g0, y -> mov x, y */
3832 gen_store_gpr(dc
, rd
, cpu_src1
);
3834 cpu_src2
= gen_load_gpr(dc
, rs2
);
3835 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
3836 gen_store_gpr(dc
, rd
, dst
);
3840 #ifdef TARGET_SPARC64
3841 } else if (xop
== 0x25) { /* sll, V9 sllx */
3842 cpu_src1
= get_src1(dc
, insn
);
3843 if (IS_IMM
) { /* immediate */
3844 simm
= GET_FIELDs(insn
, 20, 31);
3845 if (insn
& (1 << 12)) {
3846 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3848 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3850 } else { /* register */
3851 rs2
= GET_FIELD(insn
, 27, 31);
3852 cpu_src2
= gen_load_gpr(dc
, rs2
);
3853 cpu_tmp0
= tcg_temp_new();
3854 if (insn
& (1 << 12)) {
3855 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3857 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3859 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3861 gen_store_gpr(dc
, rd
, cpu_dst
);
3862 } else if (xop
== 0x26) { /* srl, V9 srlx */
3863 cpu_src1
= get_src1(dc
, insn
);
3864 if (IS_IMM
) { /* immediate */
3865 simm
= GET_FIELDs(insn
, 20, 31);
3866 if (insn
& (1 << 12)) {
3867 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3869 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3870 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3872 } else { /* register */
3873 rs2
= GET_FIELD(insn
, 27, 31);
3874 cpu_src2
= gen_load_gpr(dc
, rs2
);
3875 cpu_tmp0
= tcg_temp_new();
3876 if (insn
& (1 << 12)) {
3877 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3878 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3880 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3881 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3882 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3885 gen_store_gpr(dc
, rd
, cpu_dst
);
3886 } else if (xop
== 0x27) { /* sra, V9 srax */
3887 cpu_src1
= get_src1(dc
, insn
);
3888 if (IS_IMM
) { /* immediate */
3889 simm
= GET_FIELDs(insn
, 20, 31);
3890 if (insn
& (1 << 12)) {
3891 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3893 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3894 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3896 } else { /* register */
3897 rs2
= GET_FIELD(insn
, 27, 31);
3898 cpu_src2
= gen_load_gpr(dc
, rs2
);
3899 cpu_tmp0
= tcg_temp_new();
3900 if (insn
& (1 << 12)) {
3901 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3902 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3904 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3905 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3906 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3909 gen_store_gpr(dc
, rd
, cpu_dst
);
3911 } else if (xop
< 0x36) {
3913 cpu_src1
= get_src1(dc
, insn
);
3914 cpu_src2
= get_src2(dc
, insn
);
3915 switch (xop
& ~0x10) {
3918 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3919 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3920 dc
->cc_op
= CC_OP_ADD
;
3922 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3926 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3928 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3929 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3930 dc
->cc_op
= CC_OP_LOGIC
;
3934 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3936 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3937 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3938 dc
->cc_op
= CC_OP_LOGIC
;
3942 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3944 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3945 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3946 dc
->cc_op
= CC_OP_LOGIC
;
3951 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3952 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3953 dc
->cc_op
= CC_OP_SUB
;
3955 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3958 case 0x5: /* andn */
3959 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3961 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3962 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3963 dc
->cc_op
= CC_OP_LOGIC
;
3967 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3969 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3970 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3971 dc
->cc_op
= CC_OP_LOGIC
;
3974 case 0x7: /* xorn */
3975 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3977 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3978 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3979 dc
->cc_op
= CC_OP_LOGIC
;
3982 case 0x8: /* addx, V9 addc */
3983 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3986 #ifdef TARGET_SPARC64
3987 case 0x9: /* V9 mulx */
3988 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3991 case 0xa: /* umul */
3992 CHECK_IU_FEATURE(dc
, MUL
);
3993 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3995 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3996 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3997 dc
->cc_op
= CC_OP_LOGIC
;
4000 case 0xb: /* smul */
4001 CHECK_IU_FEATURE(dc
, MUL
);
4002 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
4004 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4005 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4006 dc
->cc_op
= CC_OP_LOGIC
;
4009 case 0xc: /* subx, V9 subc */
4010 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
4013 #ifdef TARGET_SPARC64
4014 case 0xd: /* V9 udivx */
4015 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
4018 case 0xe: /* udiv */
4019 CHECK_IU_FEATURE(dc
, DIV
);
4021 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
4023 dc
->cc_op
= CC_OP_DIV
;
4025 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
4029 case 0xf: /* sdiv */
4030 CHECK_IU_FEATURE(dc
, DIV
);
4032 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
4034 dc
->cc_op
= CC_OP_DIV
;
4036 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
4043 gen_store_gpr(dc
, rd
, cpu_dst
);
4045 cpu_src1
= get_src1(dc
, insn
);
4046 cpu_src2
= get_src2(dc
, insn
);
4048 case 0x20: /* taddcc */
4049 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4050 gen_store_gpr(dc
, rd
, cpu_dst
);
4051 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
4052 dc
->cc_op
= CC_OP_TADD
;
4054 case 0x21: /* tsubcc */
4055 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4056 gen_store_gpr(dc
, rd
, cpu_dst
);
4057 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
4058 dc
->cc_op
= CC_OP_TSUB
;
4060 case 0x22: /* taddcctv */
4061 gen_helper_taddcctv(cpu_dst
, cpu_env
,
4062 cpu_src1
, cpu_src2
);
4063 gen_store_gpr(dc
, rd
, cpu_dst
);
4064 dc
->cc_op
= CC_OP_TADDTV
;
4066 case 0x23: /* tsubcctv */
4067 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
4068 cpu_src1
, cpu_src2
);
4069 gen_store_gpr(dc
, rd
, cpu_dst
);
4070 dc
->cc_op
= CC_OP_TSUBTV
;
4072 case 0x24: /* mulscc */
4074 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
4075 gen_store_gpr(dc
, rd
, cpu_dst
);
4076 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
4077 dc
->cc_op
= CC_OP_ADD
;
4079 #ifndef TARGET_SPARC64
4080 case 0x25: /* sll */
4081 if (IS_IMM
) { /* immediate */
4082 simm
= GET_FIELDs(insn
, 20, 31);
4083 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4084 } else { /* register */
4085 cpu_tmp0
= tcg_temp_new();
4086 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4087 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4089 gen_store_gpr(dc
, rd
, cpu_dst
);
4091 case 0x26: /* srl */
4092 if (IS_IMM
) { /* immediate */
4093 simm
= GET_FIELDs(insn
, 20, 31);
4094 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4095 } else { /* register */
4096 cpu_tmp0
= tcg_temp_new();
4097 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4098 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4100 gen_store_gpr(dc
, rd
, cpu_dst
);
4102 case 0x27: /* sra */
4103 if (IS_IMM
) { /* immediate */
4104 simm
= GET_FIELDs(insn
, 20, 31);
4105 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4106 } else { /* register */
4107 cpu_tmp0
= tcg_temp_new();
4108 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4109 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4111 gen_store_gpr(dc
, rd
, cpu_dst
);
4116 cpu_tmp0
= tcg_temp_new();
4119 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4120 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
4122 #ifndef TARGET_SPARC64
4123 case 0x01 ... 0x0f: /* undefined in the
4127 case 0x10 ... 0x1f: /* implementation-dependent
4131 if ((rd
== 0x13) && (dc
->def
->features
&
4132 CPU_FEATURE_POWERDOWN
)) {
4133 /* LEON3 power-down */
4135 gen_helper_power_down(cpu_env
);
4139 case 0x2: /* V9 wrccr */
4140 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4141 gen_helper_wrccr(cpu_env
, cpu_tmp0
);
4142 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
4143 dc
->cc_op
= CC_OP_FLAGS
;
4145 case 0x3: /* V9 wrasi */
4146 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4147 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
4148 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4149 offsetof(CPUSPARCState
, asi
));
4151 * End TB to notice changed ASI.
4152 * TODO: Could notice src1 = %g0 and IS_IMM,
4153 * update DisasContext and not exit the TB.
4157 tcg_gen_lookup_and_goto_ptr();
4158 dc
->base
.is_jmp
= DISAS_NORETURN
;
4160 case 0x6: /* V9 wrfprs */
4161 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4162 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
4166 tcg_gen_exit_tb(NULL
, 0);
4167 dc
->base
.is_jmp
= DISAS_NORETURN
;
4169 case 0xf: /* V9 sir, nop if user */
4170 #if !defined(CONFIG_USER_ONLY)
4171 if (supervisor(dc
)) {
4176 case 0x13: /* Graphics Status */
4177 if (gen_trap_ifnofpu(dc
)) {
4180 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
4182 case 0x14: /* Softint set */
4183 if (!supervisor(dc
))
4185 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4186 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
4188 case 0x15: /* Softint clear */
4189 if (!supervisor(dc
))
4191 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4192 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
4194 case 0x16: /* Softint write */
4195 if (!supervisor(dc
))
4197 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4198 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
4200 case 0x17: /* Tick compare */
4201 #if !defined(CONFIG_USER_ONLY)
4202 if (!supervisor(dc
))
4208 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
4210 r_tickptr
= tcg_temp_new_ptr();
4211 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4212 offsetof(CPUSPARCState
, tick
));
4213 translator_io_start(&dc
->base
);
4214 gen_helper_tick_set_limit(r_tickptr
,
4216 /* End TB to handle timer interrupt */
4217 dc
->base
.is_jmp
= DISAS_EXIT
;
4220 case 0x18: /* System tick */
4221 #if !defined(CONFIG_USER_ONLY)
4222 if (!supervisor(dc
))
4228 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
4230 r_tickptr
= tcg_temp_new_ptr();
4231 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4232 offsetof(CPUSPARCState
, stick
));
4233 translator_io_start(&dc
->base
);
4234 gen_helper_tick_set_count(r_tickptr
,
4236 /* End TB to handle timer interrupt */
4237 dc
->base
.is_jmp
= DISAS_EXIT
;
4240 case 0x19: /* System tick compare */
4241 #if !defined(CONFIG_USER_ONLY)
4242 if (!supervisor(dc
))
4248 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
4250 r_tickptr
= tcg_temp_new_ptr();
4251 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4252 offsetof(CPUSPARCState
, stick
));
4253 translator_io_start(&dc
->base
);
4254 gen_helper_tick_set_limit(r_tickptr
,
4256 /* End TB to handle timer interrupt */
4257 dc
->base
.is_jmp
= DISAS_EXIT
;
4261 case 0x10: /* Performance Control */
4262 case 0x11: /* Performance Instrumentation
4264 case 0x12: /* Dispatch Control */
4271 #if !defined(CONFIG_USER_ONLY)
4272 case 0x31: /* wrpsr, V9 saved, restored */
4274 if (!supervisor(dc
))
4276 #ifdef TARGET_SPARC64
4279 gen_helper_saved(cpu_env
);
4282 gen_helper_restored(cpu_env
);
4284 case 2: /* UA2005 allclean */
4285 case 3: /* UA2005 otherw */
4286 case 4: /* UA2005 normalw */
4287 case 5: /* UA2005 invalw */
4293 cpu_tmp0
= tcg_temp_new();
4294 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4295 gen_helper_wrpsr(cpu_env
, cpu_tmp0
);
4296 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
4297 dc
->cc_op
= CC_OP_FLAGS
;
4300 tcg_gen_exit_tb(NULL
, 0);
4301 dc
->base
.is_jmp
= DISAS_NORETURN
;
4305 case 0x32: /* wrwim, V9 wrpr */
4307 if (!supervisor(dc
))
4309 cpu_tmp0
= tcg_temp_new();
4310 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4311 #ifdef TARGET_SPARC64
4317 r_tsptr
= tcg_temp_new_ptr();
4318 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4319 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4320 offsetof(trap_state
, tpc
));
4327 r_tsptr
= tcg_temp_new_ptr();
4328 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4329 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4330 offsetof(trap_state
, tnpc
));
4337 r_tsptr
= tcg_temp_new_ptr();
4338 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4339 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4340 offsetof(trap_state
,
4348 r_tsptr
= tcg_temp_new_ptr();
4349 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4350 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
4351 offsetof(trap_state
, tt
));
4358 r_tickptr
= tcg_temp_new_ptr();
4359 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4360 offsetof(CPUSPARCState
, tick
));
4361 translator_io_start(&dc
->base
);
4362 gen_helper_tick_set_count(r_tickptr
,
4364 /* End TB to handle timer interrupt */
4365 dc
->base
.is_jmp
= DISAS_EXIT
;
4369 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
4373 if (translator_io_start(&dc
->base
)) {
4374 dc
->base
.is_jmp
= DISAS_EXIT
;
4376 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
4377 dc
->npc
= DYNAMIC_PC
;
4381 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4382 offsetof(CPUSPARCState
, tl
));
4383 dc
->npc
= DYNAMIC_PC
;
4386 if (translator_io_start(&dc
->base
)) {
4387 dc
->base
.is_jmp
= DISAS_EXIT
;
4389 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
4392 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
4395 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4396 offsetof(CPUSPARCState
,
4399 case 11: // canrestore
4400 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4401 offsetof(CPUSPARCState
,
4404 case 12: // cleanwin
4405 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4406 offsetof(CPUSPARCState
,
4409 case 13: // otherwin
4410 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4411 offsetof(CPUSPARCState
,
4415 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4416 offsetof(CPUSPARCState
,
4419 case 16: // UA2005 gl
4420 CHECK_IU_FEATURE(dc
, GL
);
4421 gen_helper_wrgl(cpu_env
, cpu_tmp0
);
4423 case 26: // UA2005 strand status
4424 CHECK_IU_FEATURE(dc
, HYPV
);
4425 if (!hypervisor(dc
))
4427 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
4433 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
4434 if (dc
->def
->nwindows
!= 32) {
4435 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
4436 (1 << dc
->def
->nwindows
) - 1);
4441 case 0x33: /* wrtbr, UA2005 wrhpr */
4443 #ifndef TARGET_SPARC64
4444 if (!supervisor(dc
))
4446 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
4448 CHECK_IU_FEATURE(dc
, HYPV
);
4449 if (!hypervisor(dc
))
4451 cpu_tmp0
= tcg_temp_new();
4452 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4455 tcg_gen_st_i64(cpu_tmp0
, cpu_env
,
4456 offsetof(CPUSPARCState
,
4460 tcg_gen_exit_tb(NULL
, 0);
4461 dc
->base
.is_jmp
= DISAS_NORETURN
;
4464 // XXX gen_op_wrhtstate();
4467 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
4470 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
4472 case 31: // hstick_cmpr
4476 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
4477 r_tickptr
= tcg_temp_new_ptr();
4478 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4479 offsetof(CPUSPARCState
, hstick
));
4480 translator_io_start(&dc
->base
);
4481 gen_helper_tick_set_limit(r_tickptr
,
4483 /* End TB to handle timer interrupt */
4484 dc
->base
.is_jmp
= DISAS_EXIT
;
4487 case 6: // hver readonly
4495 #ifdef TARGET_SPARC64
4496 case 0x2c: /* V9 movcc */
4498 int cc
= GET_FIELD_SP(insn
, 11, 12);
4499 int cond
= GET_FIELD_SP(insn
, 14, 17);
4503 if (insn
& (1 << 18)) {
4505 gen_compare(&cmp
, 0, cond
, dc
);
4506 } else if (cc
== 2) {
4507 gen_compare(&cmp
, 1, cond
, dc
);
4512 gen_fcompare(&cmp
, cc
, cond
);
4515 /* The get_src2 above loaded the normal 13-bit
4516 immediate field, not the 11-bit field we have
4517 in movcc. But it did handle the reg case. */
4519 simm
= GET_FIELD_SPs(insn
, 0, 10);
4520 tcg_gen_movi_tl(cpu_src2
, simm
);
4523 dst
= gen_load_gpr(dc
, rd
);
4524 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4527 gen_store_gpr(dc
, rd
, dst
);
4530 case 0x2d: /* V9 sdivx */
4531 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
4532 gen_store_gpr(dc
, rd
, cpu_dst
);
4534 case 0x2e: /* V9 popc */
4535 tcg_gen_ctpop_tl(cpu_dst
, cpu_src2
);
4536 gen_store_gpr(dc
, rd
, cpu_dst
);
4538 case 0x2f: /* V9 movr */
4540 int cond
= GET_FIELD_SP(insn
, 10, 12);
4544 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4546 /* The get_src2 above loaded the normal 13-bit
4547 immediate field, not the 10-bit field we have
4548 in movr. But it did handle the reg case. */
4550 simm
= GET_FIELD_SPs(insn
, 0, 9);
4551 tcg_gen_movi_tl(cpu_src2
, simm
);
4554 dst
= gen_load_gpr(dc
, rd
);
4555 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4558 gen_store_gpr(dc
, rd
, dst
);
4566 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4567 #ifdef TARGET_SPARC64
4568 int opf
= GET_FIELD_SP(insn
, 5, 13);
4569 rs1
= GET_FIELD(insn
, 13, 17);
4570 rs2
= GET_FIELD(insn
, 27, 31);
4571 if (gen_trap_ifnofpu(dc
)) {
4576 case 0x000: /* VIS I edge8cc */
4577 CHECK_FPU_FEATURE(dc
, VIS1
);
4578 cpu_src1
= gen_load_gpr(dc
, rs1
);
4579 cpu_src2
= gen_load_gpr(dc
, rs2
);
4580 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4581 gen_store_gpr(dc
, rd
, cpu_dst
);
4583 case 0x001: /* VIS II edge8n */
4584 CHECK_FPU_FEATURE(dc
, VIS2
);
4585 cpu_src1
= gen_load_gpr(dc
, rs1
);
4586 cpu_src2
= gen_load_gpr(dc
, rs2
);
4587 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4588 gen_store_gpr(dc
, rd
, cpu_dst
);
4590 case 0x002: /* VIS I edge8lcc */
4591 CHECK_FPU_FEATURE(dc
, VIS1
);
4592 cpu_src1
= gen_load_gpr(dc
, rs1
);
4593 cpu_src2
= gen_load_gpr(dc
, rs2
);
4594 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4595 gen_store_gpr(dc
, rd
, cpu_dst
);
4597 case 0x003: /* VIS II edge8ln */
4598 CHECK_FPU_FEATURE(dc
, VIS2
);
4599 cpu_src1
= gen_load_gpr(dc
, rs1
);
4600 cpu_src2
= gen_load_gpr(dc
, rs2
);
4601 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4602 gen_store_gpr(dc
, rd
, cpu_dst
);
4604 case 0x004: /* VIS I edge16cc */
4605 CHECK_FPU_FEATURE(dc
, VIS1
);
4606 cpu_src1
= gen_load_gpr(dc
, rs1
);
4607 cpu_src2
= gen_load_gpr(dc
, rs2
);
4608 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4609 gen_store_gpr(dc
, rd
, cpu_dst
);
4611 case 0x005: /* VIS II edge16n */
4612 CHECK_FPU_FEATURE(dc
, VIS2
);
4613 cpu_src1
= gen_load_gpr(dc
, rs1
);
4614 cpu_src2
= gen_load_gpr(dc
, rs2
);
4615 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4616 gen_store_gpr(dc
, rd
, cpu_dst
);
4618 case 0x006: /* VIS I edge16lcc */
4619 CHECK_FPU_FEATURE(dc
, VIS1
);
4620 cpu_src1
= gen_load_gpr(dc
, rs1
);
4621 cpu_src2
= gen_load_gpr(dc
, rs2
);
4622 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4623 gen_store_gpr(dc
, rd
, cpu_dst
);
4625 case 0x007: /* VIS II edge16ln */
4626 CHECK_FPU_FEATURE(dc
, VIS2
);
4627 cpu_src1
= gen_load_gpr(dc
, rs1
);
4628 cpu_src2
= gen_load_gpr(dc
, rs2
);
4629 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4630 gen_store_gpr(dc
, rd
, cpu_dst
);
4632 case 0x008: /* VIS I edge32cc */
4633 CHECK_FPU_FEATURE(dc
, VIS1
);
4634 cpu_src1
= gen_load_gpr(dc
, rs1
);
4635 cpu_src2
= gen_load_gpr(dc
, rs2
);
4636 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4637 gen_store_gpr(dc
, rd
, cpu_dst
);
4639 case 0x009: /* VIS II edge32n */
4640 CHECK_FPU_FEATURE(dc
, VIS2
);
4641 cpu_src1
= gen_load_gpr(dc
, rs1
);
4642 cpu_src2
= gen_load_gpr(dc
, rs2
);
4643 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4644 gen_store_gpr(dc
, rd
, cpu_dst
);
4646 case 0x00a: /* VIS I edge32lcc */
4647 CHECK_FPU_FEATURE(dc
, VIS1
);
4648 cpu_src1
= gen_load_gpr(dc
, rs1
);
4649 cpu_src2
= gen_load_gpr(dc
, rs2
);
4650 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4651 gen_store_gpr(dc
, rd
, cpu_dst
);
4653 case 0x00b: /* VIS II edge32ln */
4654 CHECK_FPU_FEATURE(dc
, VIS2
);
4655 cpu_src1
= gen_load_gpr(dc
, rs1
);
4656 cpu_src2
= gen_load_gpr(dc
, rs2
);
4657 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4658 gen_store_gpr(dc
, rd
, cpu_dst
);
4660 case 0x010: /* VIS I array8 */
4661 CHECK_FPU_FEATURE(dc
, VIS1
);
4662 cpu_src1
= gen_load_gpr(dc
, rs1
);
4663 cpu_src2
= gen_load_gpr(dc
, rs2
);
4664 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4665 gen_store_gpr(dc
, rd
, cpu_dst
);
4667 case 0x012: /* VIS I array16 */
4668 CHECK_FPU_FEATURE(dc
, VIS1
);
4669 cpu_src1
= gen_load_gpr(dc
, rs1
);
4670 cpu_src2
= gen_load_gpr(dc
, rs2
);
4671 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4672 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4673 gen_store_gpr(dc
, rd
, cpu_dst
);
4675 case 0x014: /* VIS I array32 */
4676 CHECK_FPU_FEATURE(dc
, VIS1
);
4677 cpu_src1
= gen_load_gpr(dc
, rs1
);
4678 cpu_src2
= gen_load_gpr(dc
, rs2
);
4679 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4680 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4681 gen_store_gpr(dc
, rd
, cpu_dst
);
4683 case 0x018: /* VIS I alignaddr */
4684 CHECK_FPU_FEATURE(dc
, VIS1
);
4685 cpu_src1
= gen_load_gpr(dc
, rs1
);
4686 cpu_src2
= gen_load_gpr(dc
, rs2
);
4687 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4688 gen_store_gpr(dc
, rd
, cpu_dst
);
4690 case 0x01a: /* VIS I alignaddrl */
4691 CHECK_FPU_FEATURE(dc
, VIS1
);
4692 cpu_src1
= gen_load_gpr(dc
, rs1
);
4693 cpu_src2
= gen_load_gpr(dc
, rs2
);
4694 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4695 gen_store_gpr(dc
, rd
, cpu_dst
);
4697 case 0x019: /* VIS II bmask */
4698 CHECK_FPU_FEATURE(dc
, VIS2
);
4699 cpu_src1
= gen_load_gpr(dc
, rs1
);
4700 cpu_src2
= gen_load_gpr(dc
, rs2
);
4701 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4702 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4703 gen_store_gpr(dc
, rd
, cpu_dst
);
4705 case 0x020: /* VIS I fcmple16 */
4706 CHECK_FPU_FEATURE(dc
, VIS1
);
4707 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4708 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4709 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4710 gen_store_gpr(dc
, rd
, cpu_dst
);
4712 case 0x022: /* VIS I fcmpne16 */
4713 CHECK_FPU_FEATURE(dc
, VIS1
);
4714 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4715 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4716 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4717 gen_store_gpr(dc
, rd
, cpu_dst
);
4719 case 0x024: /* VIS I fcmple32 */
4720 CHECK_FPU_FEATURE(dc
, VIS1
);
4721 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4722 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4723 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4724 gen_store_gpr(dc
, rd
, cpu_dst
);
4726 case 0x026: /* VIS I fcmpne32 */
4727 CHECK_FPU_FEATURE(dc
, VIS1
);
4728 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4729 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4730 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4731 gen_store_gpr(dc
, rd
, cpu_dst
);
4733 case 0x028: /* VIS I fcmpgt16 */
4734 CHECK_FPU_FEATURE(dc
, VIS1
);
4735 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4736 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4737 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4738 gen_store_gpr(dc
, rd
, cpu_dst
);
4740 case 0x02a: /* VIS I fcmpeq16 */
4741 CHECK_FPU_FEATURE(dc
, VIS1
);
4742 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4743 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4744 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4745 gen_store_gpr(dc
, rd
, cpu_dst
);
4747 case 0x02c: /* VIS I fcmpgt32 */
4748 CHECK_FPU_FEATURE(dc
, VIS1
);
4749 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4750 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4751 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4752 gen_store_gpr(dc
, rd
, cpu_dst
);
4754 case 0x02e: /* VIS I fcmpeq32 */
4755 CHECK_FPU_FEATURE(dc
, VIS1
);
4756 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4757 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4758 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4759 gen_store_gpr(dc
, rd
, cpu_dst
);
4761 case 0x031: /* VIS I fmul8x16 */
4762 CHECK_FPU_FEATURE(dc
, VIS1
);
4763 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4765 case 0x033: /* VIS I fmul8x16au */
4766 CHECK_FPU_FEATURE(dc
, VIS1
);
4767 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4769 case 0x035: /* VIS I fmul8x16al */
4770 CHECK_FPU_FEATURE(dc
, VIS1
);
4771 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4773 case 0x036: /* VIS I fmul8sux16 */
4774 CHECK_FPU_FEATURE(dc
, VIS1
);
4775 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4777 case 0x037: /* VIS I fmul8ulx16 */
4778 CHECK_FPU_FEATURE(dc
, VIS1
);
4779 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4781 case 0x038: /* VIS I fmuld8sux16 */
4782 CHECK_FPU_FEATURE(dc
, VIS1
);
4783 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4785 case 0x039: /* VIS I fmuld8ulx16 */
4786 CHECK_FPU_FEATURE(dc
, VIS1
);
4787 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4789 case 0x03a: /* VIS I fpack32 */
4790 CHECK_FPU_FEATURE(dc
, VIS1
);
4791 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4793 case 0x03b: /* VIS I fpack16 */
4794 CHECK_FPU_FEATURE(dc
, VIS1
);
4795 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4796 cpu_dst_32
= gen_dest_fpr_F(dc
);
4797 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4798 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4800 case 0x03d: /* VIS I fpackfix */
4801 CHECK_FPU_FEATURE(dc
, VIS1
);
4802 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4803 cpu_dst_32
= gen_dest_fpr_F(dc
);
4804 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4805 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4807 case 0x03e: /* VIS I pdist */
4808 CHECK_FPU_FEATURE(dc
, VIS1
);
4809 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4811 case 0x048: /* VIS I faligndata */
4812 CHECK_FPU_FEATURE(dc
, VIS1
);
4813 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4815 case 0x04b: /* VIS I fpmerge */
4816 CHECK_FPU_FEATURE(dc
, VIS1
);
4817 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4819 case 0x04c: /* VIS II bshuffle */
4820 CHECK_FPU_FEATURE(dc
, VIS2
);
4821 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4823 case 0x04d: /* VIS I fexpand */
4824 CHECK_FPU_FEATURE(dc
, VIS1
);
4825 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4827 case 0x050: /* VIS I fpadd16 */
4828 CHECK_FPU_FEATURE(dc
, VIS1
);
4829 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4831 case 0x051: /* VIS I fpadd16s */
4832 CHECK_FPU_FEATURE(dc
, VIS1
);
4833 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4835 case 0x052: /* VIS I fpadd32 */
4836 CHECK_FPU_FEATURE(dc
, VIS1
);
4837 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4839 case 0x053: /* VIS I fpadd32s */
4840 CHECK_FPU_FEATURE(dc
, VIS1
);
4841 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4843 case 0x054: /* VIS I fpsub16 */
4844 CHECK_FPU_FEATURE(dc
, VIS1
);
4845 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4847 case 0x055: /* VIS I fpsub16s */
4848 CHECK_FPU_FEATURE(dc
, VIS1
);
4849 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4851 case 0x056: /* VIS I fpsub32 */
4852 CHECK_FPU_FEATURE(dc
, VIS1
);
4853 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4855 case 0x057: /* VIS I fpsub32s */
4856 CHECK_FPU_FEATURE(dc
, VIS1
);
4857 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4859 case 0x060: /* VIS I fzero */
4860 CHECK_FPU_FEATURE(dc
, VIS1
);
4861 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4862 tcg_gen_movi_i64(cpu_dst_64
, 0);
4863 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4865 case 0x061: /* VIS I fzeros */
4866 CHECK_FPU_FEATURE(dc
, VIS1
);
4867 cpu_dst_32
= gen_dest_fpr_F(dc
);
4868 tcg_gen_movi_i32(cpu_dst_32
, 0);
4869 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4871 case 0x062: /* VIS I fnor */
4872 CHECK_FPU_FEATURE(dc
, VIS1
);
4873 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4875 case 0x063: /* VIS I fnors */
4876 CHECK_FPU_FEATURE(dc
, VIS1
);
4877 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4879 case 0x064: /* VIS I fandnot2 */
4880 CHECK_FPU_FEATURE(dc
, VIS1
);
4881 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4883 case 0x065: /* VIS I fandnot2s */
4884 CHECK_FPU_FEATURE(dc
, VIS1
);
4885 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4887 case 0x066: /* VIS I fnot2 */
4888 CHECK_FPU_FEATURE(dc
, VIS1
);
4889 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4891 case 0x067: /* VIS I fnot2s */
4892 CHECK_FPU_FEATURE(dc
, VIS1
);
4893 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4895 case 0x068: /* VIS I fandnot1 */
4896 CHECK_FPU_FEATURE(dc
, VIS1
);
4897 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4899 case 0x069: /* VIS I fandnot1s */
4900 CHECK_FPU_FEATURE(dc
, VIS1
);
4901 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4903 case 0x06a: /* VIS I fnot1 */
4904 CHECK_FPU_FEATURE(dc
, VIS1
);
4905 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4907 case 0x06b: /* VIS I fnot1s */
4908 CHECK_FPU_FEATURE(dc
, VIS1
);
4909 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4911 case 0x06c: /* VIS I fxor */
4912 CHECK_FPU_FEATURE(dc
, VIS1
);
4913 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4915 case 0x06d: /* VIS I fxors */
4916 CHECK_FPU_FEATURE(dc
, VIS1
);
4917 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4919 case 0x06e: /* VIS I fnand */
4920 CHECK_FPU_FEATURE(dc
, VIS1
);
4921 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4923 case 0x06f: /* VIS I fnands */
4924 CHECK_FPU_FEATURE(dc
, VIS1
);
4925 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4927 case 0x070: /* VIS I fand */
4928 CHECK_FPU_FEATURE(dc
, VIS1
);
4929 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4931 case 0x071: /* VIS I fands */
4932 CHECK_FPU_FEATURE(dc
, VIS1
);
4933 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4935 case 0x072: /* VIS I fxnor */
4936 CHECK_FPU_FEATURE(dc
, VIS1
);
4937 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4939 case 0x073: /* VIS I fxnors */
4940 CHECK_FPU_FEATURE(dc
, VIS1
);
4941 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4943 case 0x074: /* VIS I fsrc1 */
4944 CHECK_FPU_FEATURE(dc
, VIS1
);
4945 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4946 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4948 case 0x075: /* VIS I fsrc1s */
4949 CHECK_FPU_FEATURE(dc
, VIS1
);
4950 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4951 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4953 case 0x076: /* VIS I fornot2 */
4954 CHECK_FPU_FEATURE(dc
, VIS1
);
4955 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4957 case 0x077: /* VIS I fornot2s */
4958 CHECK_FPU_FEATURE(dc
, VIS1
);
4959 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4961 case 0x078: /* VIS I fsrc2 */
4962 CHECK_FPU_FEATURE(dc
, VIS1
);
4963 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4964 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4966 case 0x079: /* VIS I fsrc2s */
4967 CHECK_FPU_FEATURE(dc
, VIS1
);
4968 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4969 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4971 case 0x07a: /* VIS I fornot1 */
4972 CHECK_FPU_FEATURE(dc
, VIS1
);
4973 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4975 case 0x07b: /* VIS I fornot1s */
4976 CHECK_FPU_FEATURE(dc
, VIS1
);
4977 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4979 case 0x07c: /* VIS I for */
4980 CHECK_FPU_FEATURE(dc
, VIS1
);
4981 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4983 case 0x07d: /* VIS I fors */
4984 CHECK_FPU_FEATURE(dc
, VIS1
);
4985 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4987 case 0x07e: /* VIS I fone */
4988 CHECK_FPU_FEATURE(dc
, VIS1
);
4989 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4990 tcg_gen_movi_i64(cpu_dst_64
, -1);
4991 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4993 case 0x07f: /* VIS I fones */
4994 CHECK_FPU_FEATURE(dc
, VIS1
);
4995 cpu_dst_32
= gen_dest_fpr_F(dc
);
4996 tcg_gen_movi_i32(cpu_dst_32
, -1);
4997 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4999 case 0x080: /* VIS I shutdown */
5000 case 0x081: /* VIS II siam */
5009 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
5010 #ifdef TARGET_SPARC64
5015 #ifdef TARGET_SPARC64
5016 } else if (xop
== 0x39) { /* V9 return */
5018 cpu_src1
= get_src1(dc
, insn
);
5019 cpu_tmp0
= tcg_temp_new();
5020 if (IS_IMM
) { /* immediate */
5021 simm
= GET_FIELDs(insn
, 19, 31);
5022 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5023 } else { /* register */
5024 rs2
= GET_FIELD(insn
, 27, 31);
5026 cpu_src2
= gen_load_gpr(dc
, rs2
);
5027 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5029 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5032 gen_helper_restore(cpu_env
);
5034 gen_check_align(cpu_tmp0
, 3);
5035 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5036 dc
->npc
= DYNAMIC_PC_LOOKUP
;
5040 cpu_src1
= get_src1(dc
, insn
);
5041 cpu_tmp0
= tcg_temp_new();
5042 if (IS_IMM
) { /* immediate */
5043 simm
= GET_FIELDs(insn
, 19, 31);
5044 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5045 } else { /* register */
5046 rs2
= GET_FIELD(insn
, 27, 31);
5048 cpu_src2
= gen_load_gpr(dc
, rs2
);
5049 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5051 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5055 case 0x38: /* jmpl */
5057 TCGv t
= gen_dest_gpr(dc
, rd
);
5058 tcg_gen_movi_tl(t
, dc
->pc
);
5059 gen_store_gpr(dc
, rd
, t
);
5062 gen_check_align(cpu_tmp0
, 3);
5063 gen_address_mask(dc
, cpu_tmp0
);
5064 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5065 dc
->npc
= DYNAMIC_PC_LOOKUP
;
5068 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5069 case 0x39: /* rett, V9 return */
5071 if (!supervisor(dc
))
5074 gen_check_align(cpu_tmp0
, 3);
5075 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5076 dc
->npc
= DYNAMIC_PC
;
5077 gen_helper_rett(cpu_env
);
5081 case 0x3b: /* flush */
5082 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
5086 case 0x3c: /* save */
5087 gen_helper_save(cpu_env
);
5088 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5090 case 0x3d: /* restore */
5091 gen_helper_restore(cpu_env
);
5092 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5094 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5095 case 0x3e: /* V9 done/retry */
5099 if (!supervisor(dc
))
5101 dc
->npc
= DYNAMIC_PC
;
5102 dc
->pc
= DYNAMIC_PC
;
5103 translator_io_start(&dc
->base
);
5104 gen_helper_done(cpu_env
);
5107 if (!supervisor(dc
))
5109 dc
->npc
= DYNAMIC_PC
;
5110 dc
->pc
= DYNAMIC_PC
;
5111 translator_io_start(&dc
->base
);
5112 gen_helper_retry(cpu_env
);
5127 case 3: /* load/store instructions */
5129 unsigned int xop
= GET_FIELD(insn
, 7, 12);
5130 /* ??? gen_address_mask prevents us from using a source
5131 register directly. Always generate a temporary. */
5132 TCGv cpu_addr
= tcg_temp_new();
5134 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
5135 if (xop
== 0x3c || xop
== 0x3e) {
5136 /* V9 casa/casxa : no offset */
5137 } else if (IS_IMM
) { /* immediate */
5138 simm
= GET_FIELDs(insn
, 19, 31);
5140 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
5142 } else { /* register */
5143 rs2
= GET_FIELD(insn
, 27, 31);
5145 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
5148 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
5149 (xop
> 0x17 && xop
<= 0x1d ) ||
5150 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
5151 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
5154 case 0x0: /* ld, V9 lduw, load unsigned word */
5155 gen_address_mask(dc
, cpu_addr
);
5156 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5157 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5159 case 0x1: /* ldub, load unsigned byte */
5160 gen_address_mask(dc
, cpu_addr
);
5161 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5162 dc
->mem_idx
, MO_UB
);
5164 case 0x2: /* lduh, load unsigned halfword */
5165 gen_address_mask(dc
, cpu_addr
);
5166 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5167 dc
->mem_idx
, MO_TEUW
| MO_ALIGN
);
5169 case 0x3: /* ldd, load double word */
5175 gen_address_mask(dc
, cpu_addr
);
5176 t64
= tcg_temp_new_i64();
5177 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5178 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5179 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5180 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5181 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
5182 tcg_gen_shri_i64(t64
, t64
, 32);
5183 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5184 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5187 case 0x9: /* ldsb, load signed byte */
5188 gen_address_mask(dc
, cpu_addr
);
5189 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
, dc
->mem_idx
, MO_SB
);
5191 case 0xa: /* ldsh, load signed halfword */
5192 gen_address_mask(dc
, cpu_addr
);
5193 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5194 dc
->mem_idx
, MO_TESW
| MO_ALIGN
);
5196 case 0xd: /* ldstub */
5197 gen_ldstub(dc
, cpu_val
, cpu_addr
, dc
->mem_idx
);
5200 /* swap, swap register with memory. Also atomically */
5201 CHECK_IU_FEATURE(dc
, SWAP
);
5202 cpu_src1
= gen_load_gpr(dc
, rd
);
5203 gen_swap(dc
, cpu_val
, cpu_src1
, cpu_addr
,
5204 dc
->mem_idx
, MO_TEUL
);
5206 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5207 case 0x10: /* lda, V9 lduwa, load word alternate */
5208 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5210 case 0x11: /* lduba, load unsigned byte alternate */
5211 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5213 case 0x12: /* lduha, load unsigned halfword alternate */
5214 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5216 case 0x13: /* ldda, load double word alternate */
5220 gen_ldda_asi(dc
, cpu_addr
, insn
, rd
);
5222 case 0x19: /* ldsba, load signed byte alternate */
5223 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_SB
);
5225 case 0x1a: /* ldsha, load signed halfword alternate */
5226 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESW
);
5228 case 0x1d: /* ldstuba -- XXX: should be atomically */
5229 gen_ldstub_asi(dc
, cpu_val
, cpu_addr
, insn
);
5231 case 0x1f: /* swapa, swap reg with alt. memory. Also
5233 CHECK_IU_FEATURE(dc
, SWAP
);
5234 cpu_src1
= gen_load_gpr(dc
, rd
);
5235 gen_swap_asi(dc
, cpu_val
, cpu_src1
, cpu_addr
, insn
);
5238 #ifndef TARGET_SPARC64
5239 case 0x30: /* ldc */
5240 case 0x31: /* ldcsr */
5241 case 0x33: /* lddc */
5245 #ifdef TARGET_SPARC64
5246 case 0x08: /* V9 ldsw */
5247 gen_address_mask(dc
, cpu_addr
);
5248 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5249 dc
->mem_idx
, MO_TESL
| MO_ALIGN
);
5251 case 0x0b: /* V9 ldx */
5252 gen_address_mask(dc
, cpu_addr
);
5253 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5254 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5256 case 0x18: /* V9 ldswa */
5257 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESL
);
5259 case 0x1b: /* V9 ldxa */
5260 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5262 case 0x2d: /* V9 prefetch, no effect */
5264 case 0x30: /* V9 ldfa */
5265 if (gen_trap_ifnofpu(dc
)) {
5268 gen_ldf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5269 gen_update_fprs_dirty(dc
, rd
);
5271 case 0x33: /* V9 lddfa */
5272 if (gen_trap_ifnofpu(dc
)) {
5275 gen_ldf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5276 gen_update_fprs_dirty(dc
, DFPREG(rd
));
5278 case 0x3d: /* V9 prefetcha, no effect */
5280 case 0x32: /* V9 ldqfa */
5281 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5282 if (gen_trap_ifnofpu(dc
)) {
5285 gen_ldf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5286 gen_update_fprs_dirty(dc
, QFPREG(rd
));
5292 gen_store_gpr(dc
, rd
, cpu_val
);
5293 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5296 } else if (xop
>= 0x20 && xop
< 0x24) {
5297 if (gen_trap_ifnofpu(dc
)) {
5301 case 0x20: /* ldf, load fpreg */
5302 gen_address_mask(dc
, cpu_addr
);
5303 cpu_dst_32
= gen_dest_fpr_F(dc
);
5304 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5305 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5306 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5308 case 0x21: /* ldfsr, V9 ldxfsr */
5309 #ifdef TARGET_SPARC64
5310 gen_address_mask(dc
, cpu_addr
);
5312 TCGv_i64 t64
= tcg_temp_new_i64();
5313 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5314 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5315 gen_helper_ldxfsr(cpu_fsr
, cpu_env
, cpu_fsr
, t64
);
5319 cpu_dst_32
= tcg_temp_new_i32();
5320 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5321 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5322 gen_helper_ldfsr(cpu_fsr
, cpu_env
, cpu_fsr
, cpu_dst_32
);
5324 case 0x22: /* ldqf, load quad fpreg */
5325 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5326 gen_address_mask(dc
, cpu_addr
);
5327 cpu_src1_64
= tcg_temp_new_i64();
5328 tcg_gen_qemu_ld_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5329 MO_TEUQ
| MO_ALIGN_4
);
5330 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5331 cpu_src2_64
= tcg_temp_new_i64();
5332 tcg_gen_qemu_ld_i64(cpu_src2_64
, cpu_addr
, dc
->mem_idx
,
5333 MO_TEUQ
| MO_ALIGN_4
);
5334 gen_store_fpr_Q(dc
, rd
, cpu_src1_64
, cpu_src2_64
);
5336 case 0x23: /* lddf, load double fpreg */
5337 gen_address_mask(dc
, cpu_addr
);
5338 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5339 tcg_gen_qemu_ld_i64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
,
5340 MO_TEUQ
| MO_ALIGN_4
);
5341 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5346 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
5347 xop
== 0xe || xop
== 0x1e) {
5348 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
5351 case 0x4: /* st, store word */
5352 gen_address_mask(dc
, cpu_addr
);
5353 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5354 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5356 case 0x5: /* stb, store byte */
5357 gen_address_mask(dc
, cpu_addr
);
5358 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
, dc
->mem_idx
, MO_UB
);
5360 case 0x6: /* sth, store halfword */
5361 gen_address_mask(dc
, cpu_addr
);
5362 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5363 dc
->mem_idx
, MO_TEUW
| MO_ALIGN
);
5365 case 0x7: /* std, store double word */
5372 gen_address_mask(dc
, cpu_addr
);
5373 lo
= gen_load_gpr(dc
, rd
+ 1);
5374 t64
= tcg_temp_new_i64();
5375 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
5376 tcg_gen_qemu_st_i64(t64
, cpu_addr
,
5377 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5380 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5381 case 0x14: /* sta, V9 stwa, store word alternate */
5382 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5384 case 0x15: /* stba, store byte alternate */
5385 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5387 case 0x16: /* stha, store halfword alternate */
5388 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5390 case 0x17: /* stda, store double word alternate */
5394 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
5397 #ifdef TARGET_SPARC64
5398 case 0x0e: /* V9 stx */
5399 gen_address_mask(dc
, cpu_addr
);
5400 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5401 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5403 case 0x1e: /* V9 stxa */
5404 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5410 } else if (xop
> 0x23 && xop
< 0x28) {
5411 if (gen_trap_ifnofpu(dc
)) {
5415 case 0x24: /* stf, store fpreg */
5416 gen_address_mask(dc
, cpu_addr
);
5417 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5418 tcg_gen_qemu_st_i32(cpu_src1_32
, cpu_addr
,
5419 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5421 case 0x25: /* stfsr, V9 stxfsr */
5423 #ifdef TARGET_SPARC64
5424 gen_address_mask(dc
, cpu_addr
);
5426 tcg_gen_qemu_st_tl(cpu_fsr
, cpu_addr
,
5427 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5431 tcg_gen_qemu_st_tl(cpu_fsr
, cpu_addr
,
5432 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5436 #ifdef TARGET_SPARC64
5437 /* V9 stqf, store quad fpreg */
5438 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5439 gen_address_mask(dc
, cpu_addr
);
5440 /* ??? While stqf only requires 4-byte alignment, it is
5441 legal for the cpu to signal the unaligned exception.
5442 The OS trap handler is then required to fix it up.
5443 For qemu, this avoids having to probe the second page
5444 before performing the first write. */
5445 cpu_src1_64
= gen_load_fpr_Q0(dc
, rd
);
5446 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5447 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN_16
);
5448 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5449 cpu_src2_64
= gen_load_fpr_Q1(dc
, rd
);
5450 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5451 dc
->mem_idx
, MO_TEUQ
);
5453 #else /* !TARGET_SPARC64 */
5454 /* stdfq, store floating point queue */
5455 #if defined(CONFIG_USER_ONLY)
5458 if (!supervisor(dc
))
5460 if (gen_trap_ifnofpu(dc
)) {
5466 case 0x27: /* stdf, store double fpreg */
5467 gen_address_mask(dc
, cpu_addr
);
5468 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5469 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5470 MO_TEUQ
| MO_ALIGN_4
);
5475 } else if (xop
> 0x33 && xop
< 0x3f) {
5477 #ifdef TARGET_SPARC64
5478 case 0x34: /* V9 stfa */
5479 if (gen_trap_ifnofpu(dc
)) {
5482 gen_stf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5484 case 0x36: /* V9 stqfa */
5486 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5487 if (gen_trap_ifnofpu(dc
)) {
5490 gen_stf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5493 case 0x37: /* V9 stdfa */
5494 if (gen_trap_ifnofpu(dc
)) {
5497 gen_stf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5499 case 0x3e: /* V9 casxa */
5500 rs2
= GET_FIELD(insn
, 27, 31);
5501 cpu_src2
= gen_load_gpr(dc
, rs2
);
5502 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5505 case 0x34: /* stc */
5506 case 0x35: /* stcsr */
5507 case 0x36: /* stdcq */
5508 case 0x37: /* stdc */
5511 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5512 case 0x3c: /* V9 or LEON3 casa */
5513 #ifndef TARGET_SPARC64
5514 CHECK_IU_FEATURE(dc
, CASA
);
5516 rs2
= GET_FIELD(insn
, 27, 31);
5517 cpu_src2
= gen_load_gpr(dc
, rs2
);
5518 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5530 /* default case for non jump instructions */
5534 case DYNAMIC_PC_LOOKUP
:
5539 /* we can do a static jump */
5540 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5541 dc
->base
.is_jmp
= DISAS_NORETURN
;
5544 g_assert_not_reached();
5548 dc
->npc
= dc
->npc
+ 4;
5553 gen_exception(dc
, TT_ILL_INSN
);
5556 gen_exception(dc
, TT_UNIMP_FLUSH
);
5558 #if !defined(CONFIG_USER_ONLY)
5560 gen_exception(dc
, TT_PRIV_INSN
);
5564 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
5566 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5568 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
5571 #ifndef TARGET_SPARC64
5573 gen_exception(dc
, TT_NCP_INSN
);
5578 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
5580 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5581 CPUSPARCState
*env
= cs
->env_ptr
;
5584 dc
->pc
= dc
->base
.pc_first
;
5585 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
5586 dc
->cc_op
= CC_OP_DYNAMIC
;
5587 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
5588 dc
->def
= &env
->def
;
5589 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
5590 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
5591 #ifndef CONFIG_USER_ONLY
5592 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
5594 #ifdef TARGET_SPARC64
5596 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5597 #ifndef CONFIG_USER_ONLY
5598 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
5602 * if we reach a page boundary, we stop generation so that the
5603 * PC of a TT_TFAULT exception is always in the right page
5605 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
5606 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
5609 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
5613 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
5615 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5616 target_ulong npc
= dc
->npc
;
5621 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5622 npc
= dc
->jump_pc
[0] | JUMP_PC
;
5625 case DYNAMIC_PC_LOOKUP
:
5629 g_assert_not_reached();
5632 tcg_gen_insn_start(dc
->pc
, npc
);
5635 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
5637 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5638 CPUSPARCState
*env
= cs
->env_ptr
;
5641 insn
= translator_ldl(env
, &dc
->base
, dc
->pc
);
5642 dc
->base
.pc_next
+= 4;
5643 disas_sparc_insn(dc
, insn
);
5645 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
5648 if (dc
->pc
!= dc
->base
.pc_next
) {
5649 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
5653 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
5655 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5658 switch (dc
->base
.is_jmp
) {
5660 case DISAS_TOO_MANY
:
5661 if (((dc
->pc
| dc
->npc
) & 3) == 0) {
5662 /* static PC and NPC: we can use direct chaining */
5663 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5669 case DYNAMIC_PC_LOOKUP
:
5676 g_assert_not_reached();
5679 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5685 tcg_gen_lookup_and_goto_ptr();
5687 tcg_gen_exit_tb(NULL
, 0);
5691 case DISAS_NORETURN
:
5697 tcg_gen_exit_tb(NULL
, 0);
5701 g_assert_not_reached();
5705 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
,
5706 CPUState
*cpu
, FILE *logfile
)
5708 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5709 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5712 static const TranslatorOps sparc_tr_ops
= {
5713 .init_disas_context
= sparc_tr_init_disas_context
,
5714 .tb_start
= sparc_tr_tb_start
,
5715 .insn_start
= sparc_tr_insn_start
,
5716 .translate_insn
= sparc_tr_translate_insn
,
5717 .tb_stop
= sparc_tr_tb_stop
,
5718 .disas_log
= sparc_tr_disas_log
,
5721 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
5722 target_ulong pc
, void *host_pc
)
5724 DisasContext dc
= {};
5726 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &sparc_tr_ops
, &dc
.base
);
5729 void sparc_tcg_init(void)
5731 static const char gregnames
[32][4] = {
5732 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5733 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5734 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5735 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5737 static const char fregnames
[32][4] = {
5738 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5739 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5740 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5741 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5744 static const struct { TCGv_i32
*ptr
; int off
; const char *name
; } r32
[] = {
5745 #ifdef TARGET_SPARC64
5746 { &cpu_xcc
, offsetof(CPUSPARCState
, xcc
), "xcc" },
5747 { &cpu_fprs
, offsetof(CPUSPARCState
, fprs
), "fprs" },
5749 { &cpu_wim
, offsetof(CPUSPARCState
, wim
), "wim" },
5751 { &cpu_cc_op
, offsetof(CPUSPARCState
, cc_op
), "cc_op" },
5752 { &cpu_psr
, offsetof(CPUSPARCState
, psr
), "psr" },
5755 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5756 #ifdef TARGET_SPARC64
5757 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5758 { &cpu_tick_cmpr
, offsetof(CPUSPARCState
, tick_cmpr
), "tick_cmpr" },
5759 { &cpu_stick_cmpr
, offsetof(CPUSPARCState
, stick_cmpr
), "stick_cmpr" },
5760 { &cpu_hstick_cmpr
, offsetof(CPUSPARCState
, hstick_cmpr
),
5762 { &cpu_hintp
, offsetof(CPUSPARCState
, hintp
), "hintp" },
5763 { &cpu_htba
, offsetof(CPUSPARCState
, htba
), "htba" },
5764 { &cpu_hver
, offsetof(CPUSPARCState
, hver
), "hver" },
5765 { &cpu_ssr
, offsetof(CPUSPARCState
, ssr
), "ssr" },
5766 { &cpu_ver
, offsetof(CPUSPARCState
, version
), "ver" },
5768 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5769 { &cpu_cc_src
, offsetof(CPUSPARCState
, cc_src
), "cc_src" },
5770 { &cpu_cc_src2
, offsetof(CPUSPARCState
, cc_src2
), "cc_src2" },
5771 { &cpu_cc_dst
, offsetof(CPUSPARCState
, cc_dst
), "cc_dst" },
5772 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5773 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5774 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5775 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5776 #ifndef CONFIG_USER_ONLY
5777 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5783 cpu_regwptr
= tcg_global_mem_new_ptr(cpu_env
,
5784 offsetof(CPUSPARCState
, regwptr
),
5787 for (i
= 0; i
< ARRAY_SIZE(r32
); ++i
) {
5788 *r32
[i
].ptr
= tcg_global_mem_new_i32(cpu_env
, r32
[i
].off
, r32
[i
].name
);
5791 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5792 *rtl
[i
].ptr
= tcg_global_mem_new(cpu_env
, rtl
[i
].off
, rtl
[i
].name
);
5796 for (i
= 1; i
< 8; ++i
) {
5797 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
5798 offsetof(CPUSPARCState
, gregs
[i
]),
5802 for (i
= 8; i
< 32; ++i
) {
5803 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5804 (i
- 8) * sizeof(target_ulong
),
5808 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5809 cpu_fpr
[i
] = tcg_global_mem_new_i64(cpu_env
,
5810 offsetof(CPUSPARCState
, fpr
[i
]),
5815 void sparc_restore_state_to_opc(CPUState
*cs
,
5816 const TranslationBlock
*tb
,
5817 const uint64_t *data
)
5819 SPARCCPU
*cpu
= SPARC_CPU(cs
);
5820 CPUSPARCState
*env
= &cpu
->env
;
5821 target_ulong pc
= data
[0];
5822 target_ulong npc
= data
[1];
5825 if (npc
== DYNAMIC_PC
) {
5826 /* dynamic NPC: already stored */
5827 } else if (npc
& JUMP_PC
) {
5828 /* jump PC: use 'cond' and the jump targets of the translation */
5830 env
->npc
= npc
& ~3;