4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "disas/disas.h"
29 #include "exec/helper-proto.h"
31 #include "exec/cpu_ldst.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
40 #define DYNAMIC_PC 1 /* dynamic pc value */
41 #define JUMP_PC 2 /* dynamic pc value which takes only two values
42 according to jump_pc[T2] */
44 /* global register indexes */
45 static TCGv_ptr cpu_env
, cpu_regwptr
;
46 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
47 static TCGv_i32 cpu_cc_op
;
48 static TCGv_i32 cpu_psr
;
49 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
, cpu_gregs
[8];
51 #ifndef CONFIG_USER_ONLY
56 static TCGv_i32 cpu_xcc
, cpu_asi
, cpu_fprs
;
58 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
59 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
60 static TCGv_i32 cpu_softint
;
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
67 static target_ulong gen_opc_npc
[OPC_BUF_SIZE
];
68 static target_ulong gen_opc_jump_pc
[2];
70 #include "exec/gen-icount.h"
72 typedef struct DisasContext
{
73 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
74 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
75 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
79 int address_mask_32bit
;
81 uint32_t cc_op
; /* current CC operation */
82 struct TranslationBlock
*tb
;
97 // This function uses non-native bit order
98 #define GET_FIELD(X, FROM, TO) \
99 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
101 // This function uses the order in the manuals, i.e. bit 0 is 2^0
102 #define GET_FIELD_SP(X, FROM, TO) \
103 GET_FIELD(X, 31 - (TO), 31 - (FROM))
105 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
106 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
108 #ifdef TARGET_SPARC64
109 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
110 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
112 #define DFPREG(r) (r & 0x1e)
113 #define QFPREG(r) (r & 0x1c)
116 #define UA2005_HTRAP_MASK 0xff
117 #define V8_TRAP_MASK 0x7f
119 static int sign_extend(int x
, int len
)
122 return (x
<< len
) >> len
;
125 #define IS_IMM (insn & (1<<13))
127 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
130 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
131 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
135 static inline TCGv
get_temp_tl(DisasContext
*dc
)
138 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
139 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
143 static inline void gen_update_fprs_dirty(int rd
)
145 #if defined(TARGET_SPARC64)
146 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, (rd
< 32) ? 1 : 2);
150 /* floating point registers moves */
151 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
153 #if TCG_TARGET_REG_BITS == 32
155 return TCGV_LOW(cpu_fpr
[src
/ 2]);
157 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
161 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr
[src
/ 2]));
163 TCGv_i32 ret
= get_temp_i32(dc
);
164 TCGv_i64 t
= tcg_temp_new_i64();
166 tcg_gen_shri_i64(t
, cpu_fpr
[src
/ 2], 32);
167 tcg_gen_trunc_i64_i32(ret
, t
);
168 tcg_temp_free_i64(t
);
175 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
177 #if TCG_TARGET_REG_BITS == 32
179 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
181 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
184 TCGv_i64 t
= MAKE_TCGV_I64(GET_TCGV_I32(v
));
185 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
186 (dst
& 1 ? 0 : 32), 32);
188 gen_update_fprs_dirty(dst
);
191 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
193 return get_temp_i32(dc
);
196 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
199 return cpu_fpr
[src
/ 2];
202 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
205 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
206 gen_update_fprs_dirty(dst
);
209 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
211 return cpu_fpr
[DFPREG(dst
) / 2];
214 static void gen_op_load_fpr_QT0(unsigned int src
)
216 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
217 offsetof(CPU_QuadU
, ll
.upper
));
218 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
219 offsetof(CPU_QuadU
, ll
.lower
));
222 static void gen_op_load_fpr_QT1(unsigned int src
)
224 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
225 offsetof(CPU_QuadU
, ll
.upper
));
226 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
227 offsetof(CPU_QuadU
, ll
.lower
));
230 static void gen_op_store_QT0_fpr(unsigned int dst
)
232 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
233 offsetof(CPU_QuadU
, ll
.upper
));
234 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
235 offsetof(CPU_QuadU
, ll
.lower
));
238 #ifdef TARGET_SPARC64
239 static void gen_move_Q(unsigned int rd
, unsigned int rs
)
244 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
245 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
246 gen_update_fprs_dirty(rd
);
251 #ifdef CONFIG_USER_ONLY
252 #define supervisor(dc) 0
253 #ifdef TARGET_SPARC64
254 #define hypervisor(dc) 0
257 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
258 #ifdef TARGET_SPARC64
259 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
264 #ifdef TARGET_SPARC64
266 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
268 #define AM_CHECK(dc) (1)
272 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
274 #ifdef TARGET_SPARC64
276 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
280 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
282 if (reg
== 0 || reg
>= 8) {
283 TCGv t
= get_temp_tl(dc
);
285 tcg_gen_movi_tl(t
, 0);
287 tcg_gen_ld_tl(t
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
291 return cpu_gregs
[reg
];
295 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
299 tcg_gen_mov_tl(cpu_gregs
[reg
], v
);
301 tcg_gen_st_tl(v
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
306 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
308 if (reg
== 0 || reg
>= 8) {
309 return get_temp_tl(dc
);
311 return cpu_gregs
[reg
];
315 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
,
316 target_ulong pc
, target_ulong npc
)
318 TranslationBlock
*tb
;
321 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
322 (npc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
324 /* jump to same page: we can use a direct jump */
325 tcg_gen_goto_tb(tb_num
);
326 tcg_gen_movi_tl(cpu_pc
, pc
);
327 tcg_gen_movi_tl(cpu_npc
, npc
);
328 tcg_gen_exit_tb((uintptr_t)tb
+ tb_num
);
330 /* jump to another page: currently not optimized */
331 tcg_gen_movi_tl(cpu_pc
, pc
);
332 tcg_gen_movi_tl(cpu_npc
, npc
);
338 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
340 tcg_gen_extu_i32_tl(reg
, src
);
341 tcg_gen_shri_tl(reg
, reg
, PSR_NEG_SHIFT
);
342 tcg_gen_andi_tl(reg
, reg
, 0x1);
345 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
347 tcg_gen_extu_i32_tl(reg
, src
);
348 tcg_gen_shri_tl(reg
, reg
, PSR_ZERO_SHIFT
);
349 tcg_gen_andi_tl(reg
, reg
, 0x1);
352 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
354 tcg_gen_extu_i32_tl(reg
, src
);
355 tcg_gen_shri_tl(reg
, reg
, PSR_OVF_SHIFT
);
356 tcg_gen_andi_tl(reg
, reg
, 0x1);
359 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
361 tcg_gen_extu_i32_tl(reg
, src
);
362 tcg_gen_shri_tl(reg
, reg
, PSR_CARRY_SHIFT
);
363 tcg_gen_andi_tl(reg
, reg
, 0x1);
366 static inline void gen_op_addi_cc(TCGv dst
, TCGv src1
, target_long src2
)
368 tcg_gen_mov_tl(cpu_cc_src
, src1
);
369 tcg_gen_movi_tl(cpu_cc_src2
, src2
);
370 tcg_gen_addi_tl(cpu_cc_dst
, cpu_cc_src
, src2
);
371 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
374 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
376 tcg_gen_mov_tl(cpu_cc_src
, src1
);
377 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
378 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
379 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
382 static TCGv_i32
gen_add32_carry32(void)
384 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
386 /* Carry is computed from a previous add: (dst < src) */
387 #if TARGET_LONG_BITS == 64
388 cc_src1_32
= tcg_temp_new_i32();
389 cc_src2_32
= tcg_temp_new_i32();
390 tcg_gen_trunc_i64_i32(cc_src1_32
, cpu_cc_dst
);
391 tcg_gen_trunc_i64_i32(cc_src2_32
, cpu_cc_src
);
393 cc_src1_32
= cpu_cc_dst
;
394 cc_src2_32
= cpu_cc_src
;
397 carry_32
= tcg_temp_new_i32();
398 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
400 #if TARGET_LONG_BITS == 64
401 tcg_temp_free_i32(cc_src1_32
);
402 tcg_temp_free_i32(cc_src2_32
);
408 static TCGv_i32
gen_sub32_carry32(void)
410 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
412 /* Carry is computed from a previous borrow: (src1 < src2) */
413 #if TARGET_LONG_BITS == 64
414 cc_src1_32
= tcg_temp_new_i32();
415 cc_src2_32
= tcg_temp_new_i32();
416 tcg_gen_trunc_i64_i32(cc_src1_32
, cpu_cc_src
);
417 tcg_gen_trunc_i64_i32(cc_src2_32
, cpu_cc_src2
);
419 cc_src1_32
= cpu_cc_src
;
420 cc_src2_32
= cpu_cc_src2
;
423 carry_32
= tcg_temp_new_i32();
424 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
426 #if TARGET_LONG_BITS == 64
427 tcg_temp_free_i32(cc_src1_32
);
428 tcg_temp_free_i32(cc_src2_32
);
434 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
435 TCGv src2
, int update_cc
)
443 /* Carry is known to be zero. Fall back to plain ADD. */
445 gen_op_add_cc(dst
, src1
, src2
);
447 tcg_gen_add_tl(dst
, src1
, src2
);
454 if (TARGET_LONG_BITS
== 32) {
455 /* We can re-use the host's hardware carry generation by using
456 an ADD2 opcode. We discard the low part of the output.
457 Ideally we'd combine this operation with the add that
458 generated the carry in the first place. */
459 carry
= tcg_temp_new();
460 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
461 tcg_temp_free(carry
);
464 carry_32
= gen_add32_carry32();
470 carry_32
= gen_sub32_carry32();
474 /* We need external help to produce the carry. */
475 carry_32
= tcg_temp_new_i32();
476 gen_helper_compute_C_icc(carry_32
, cpu_env
);
480 #if TARGET_LONG_BITS == 64
481 carry
= tcg_temp_new();
482 tcg_gen_extu_i32_i64(carry
, carry_32
);
487 tcg_gen_add_tl(dst
, src1
, src2
);
488 tcg_gen_add_tl(dst
, dst
, carry
);
490 tcg_temp_free_i32(carry_32
);
491 #if TARGET_LONG_BITS == 64
492 tcg_temp_free(carry
);
497 tcg_gen_mov_tl(cpu_cc_src
, src1
);
498 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
499 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
500 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
501 dc
->cc_op
= CC_OP_ADDX
;
505 static inline void gen_op_subi_cc(TCGv dst
, TCGv src1
, target_long src2
, DisasContext
*dc
)
507 tcg_gen_mov_tl(cpu_cc_src
, src1
);
508 tcg_gen_movi_tl(cpu_cc_src2
, src2
);
510 tcg_gen_mov_tl(cpu_cc_dst
, src1
);
511 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
512 dc
->cc_op
= CC_OP_LOGIC
;
514 tcg_gen_subi_tl(cpu_cc_dst
, cpu_cc_src
, src2
);
515 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
516 dc
->cc_op
= CC_OP_SUB
;
518 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
521 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
523 tcg_gen_mov_tl(cpu_cc_src
, src1
);
524 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
525 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
526 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
529 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
530 TCGv src2
, int update_cc
)
538 /* Carry is known to be zero. Fall back to plain SUB. */
540 gen_op_sub_cc(dst
, src1
, src2
);
542 tcg_gen_sub_tl(dst
, src1
, src2
);
549 carry_32
= gen_add32_carry32();
555 if (TARGET_LONG_BITS
== 32) {
556 /* We can re-use the host's hardware carry generation by using
557 a SUB2 opcode. We discard the low part of the output.
558 Ideally we'd combine this operation with the add that
559 generated the carry in the first place. */
560 carry
= tcg_temp_new();
561 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
562 tcg_temp_free(carry
);
565 carry_32
= gen_sub32_carry32();
569 /* We need external help to produce the carry. */
570 carry_32
= tcg_temp_new_i32();
571 gen_helper_compute_C_icc(carry_32
, cpu_env
);
575 #if TARGET_LONG_BITS == 64
576 carry
= tcg_temp_new();
577 tcg_gen_extu_i32_i64(carry
, carry_32
);
582 tcg_gen_sub_tl(dst
, src1
, src2
);
583 tcg_gen_sub_tl(dst
, dst
, carry
);
585 tcg_temp_free_i32(carry_32
);
586 #if TARGET_LONG_BITS == 64
587 tcg_temp_free(carry
);
592 tcg_gen_mov_tl(cpu_cc_src
, src1
);
593 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
594 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
595 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
596 dc
->cc_op
= CC_OP_SUBX
;
600 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
602 TCGv r_temp
, zero
, t0
;
604 r_temp
= tcg_temp_new();
611 zero
= tcg_const_tl(0);
612 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
613 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
614 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
615 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
620 // env->y = (b2 << 31) | (env->y >> 1);
621 tcg_gen_andi_tl(r_temp
, cpu_cc_src
, 0x1);
622 tcg_gen_shli_tl(r_temp
, r_temp
, 31);
623 tcg_gen_shri_tl(t0
, cpu_y
, 1);
624 tcg_gen_andi_tl(t0
, t0
, 0x7fffffff);
625 tcg_gen_or_tl(t0
, t0
, r_temp
);
626 tcg_gen_andi_tl(cpu_y
, t0
, 0xffffffff);
629 gen_mov_reg_N(t0
, cpu_psr
);
630 gen_mov_reg_V(r_temp
, cpu_psr
);
631 tcg_gen_xor_tl(t0
, t0
, r_temp
);
632 tcg_temp_free(r_temp
);
634 // T0 = (b1 << 31) | (T0 >> 1);
636 tcg_gen_shli_tl(t0
, t0
, 31);
637 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
638 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
641 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
643 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
646 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
648 #if TARGET_LONG_BITS == 32
650 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
652 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
655 TCGv t0
= tcg_temp_new_i64();
656 TCGv t1
= tcg_temp_new_i64();
659 tcg_gen_ext32s_i64(t0
, src1
);
660 tcg_gen_ext32s_i64(t1
, src2
);
662 tcg_gen_ext32u_i64(t0
, src1
);
663 tcg_gen_ext32u_i64(t1
, src2
);
666 tcg_gen_mul_i64(dst
, t0
, t1
);
670 tcg_gen_shri_i64(cpu_y
, dst
, 32);
674 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
676 /* zero-extend truncated operands before multiplication */
677 gen_op_multiply(dst
, src1
, src2
, 0);
680 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
682 /* sign-extend truncated operands before multiplication */
683 gen_op_multiply(dst
, src1
, src2
, 1);
687 static inline void gen_op_eval_ba(TCGv dst
)
689 tcg_gen_movi_tl(dst
, 1);
693 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
695 gen_mov_reg_Z(dst
, src
);
699 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
701 TCGv t0
= tcg_temp_new();
702 gen_mov_reg_N(t0
, src
);
703 gen_mov_reg_V(dst
, src
);
704 tcg_gen_xor_tl(dst
, dst
, t0
);
705 gen_mov_reg_Z(t0
, src
);
706 tcg_gen_or_tl(dst
, dst
, t0
);
711 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
713 TCGv t0
= tcg_temp_new();
714 gen_mov_reg_V(t0
, src
);
715 gen_mov_reg_N(dst
, src
);
716 tcg_gen_xor_tl(dst
, dst
, t0
);
721 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
723 TCGv t0
= tcg_temp_new();
724 gen_mov_reg_Z(t0
, src
);
725 gen_mov_reg_C(dst
, src
);
726 tcg_gen_or_tl(dst
, dst
, t0
);
731 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
733 gen_mov_reg_C(dst
, src
);
737 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
739 gen_mov_reg_V(dst
, src
);
743 static inline void gen_op_eval_bn(TCGv dst
)
745 tcg_gen_movi_tl(dst
, 0);
749 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
751 gen_mov_reg_N(dst
, src
);
755 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
757 gen_mov_reg_Z(dst
, src
);
758 tcg_gen_xori_tl(dst
, dst
, 0x1);
762 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
764 gen_op_eval_ble(dst
, src
);
765 tcg_gen_xori_tl(dst
, dst
, 0x1);
769 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
771 gen_op_eval_bl(dst
, src
);
772 tcg_gen_xori_tl(dst
, dst
, 0x1);
776 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
778 gen_op_eval_bleu(dst
, src
);
779 tcg_gen_xori_tl(dst
, dst
, 0x1);
783 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
785 gen_mov_reg_C(dst
, src
);
786 tcg_gen_xori_tl(dst
, dst
, 0x1);
790 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
792 gen_mov_reg_N(dst
, src
);
793 tcg_gen_xori_tl(dst
, dst
, 0x1);
797 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
799 gen_mov_reg_V(dst
, src
);
800 tcg_gen_xori_tl(dst
, dst
, 0x1);
804 FPSR bit field FCC1 | FCC0:
810 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
811 unsigned int fcc_offset
)
813 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
814 tcg_gen_andi_tl(reg
, reg
, 0x1);
817 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
818 unsigned int fcc_offset
)
820 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
821 tcg_gen_andi_tl(reg
, reg
, 0x1);
825 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
826 unsigned int fcc_offset
)
828 TCGv t0
= tcg_temp_new();
829 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
830 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
831 tcg_gen_or_tl(dst
, dst
, t0
);
835 // 1 or 2: FCC0 ^ FCC1
836 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
837 unsigned int fcc_offset
)
839 TCGv t0
= tcg_temp_new();
840 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
841 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
842 tcg_gen_xor_tl(dst
, dst
, t0
);
847 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
848 unsigned int fcc_offset
)
850 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
854 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
855 unsigned int fcc_offset
)
857 TCGv t0
= tcg_temp_new();
858 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
859 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
860 tcg_gen_andc_tl(dst
, dst
, t0
);
865 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
866 unsigned int fcc_offset
)
868 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
872 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
873 unsigned int fcc_offset
)
875 TCGv t0
= tcg_temp_new();
876 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
877 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
878 tcg_gen_andc_tl(dst
, t0
, dst
);
883 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
884 unsigned int fcc_offset
)
886 TCGv t0
= tcg_temp_new();
887 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
888 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
889 tcg_gen_and_tl(dst
, dst
, t0
);
894 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
895 unsigned int fcc_offset
)
897 TCGv t0
= tcg_temp_new();
898 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
899 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
900 tcg_gen_or_tl(dst
, dst
, t0
);
901 tcg_gen_xori_tl(dst
, dst
, 0x1);
905 // 0 or 3: !(FCC0 ^ FCC1)
906 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
907 unsigned int fcc_offset
)
909 TCGv t0
= tcg_temp_new();
910 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
911 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
912 tcg_gen_xor_tl(dst
, dst
, t0
);
913 tcg_gen_xori_tl(dst
, dst
, 0x1);
918 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
919 unsigned int fcc_offset
)
921 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
922 tcg_gen_xori_tl(dst
, dst
, 0x1);
925 // !1: !(FCC0 & !FCC1)
926 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
927 unsigned int fcc_offset
)
929 TCGv t0
= tcg_temp_new();
930 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
931 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
932 tcg_gen_andc_tl(dst
, dst
, t0
);
933 tcg_gen_xori_tl(dst
, dst
, 0x1);
938 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
939 unsigned int fcc_offset
)
941 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
942 tcg_gen_xori_tl(dst
, dst
, 0x1);
945 // !2: !(!FCC0 & FCC1)
946 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
947 unsigned int fcc_offset
)
949 TCGv t0
= tcg_temp_new();
950 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
951 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
952 tcg_gen_andc_tl(dst
, t0
, dst
);
953 tcg_gen_xori_tl(dst
, dst
, 0x1);
957 // !3: !(FCC0 & FCC1)
958 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
959 unsigned int fcc_offset
)
961 TCGv t0
= tcg_temp_new();
962 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
963 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
964 tcg_gen_and_tl(dst
, dst
, t0
);
965 tcg_gen_xori_tl(dst
, dst
, 0x1);
969 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
970 target_ulong pc2
, TCGv r_cond
)
974 l1
= gen_new_label();
976 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
978 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
981 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
984 static inline void gen_branch_a(DisasContext
*dc
, target_ulong pc1
,
985 target_ulong pc2
, TCGv r_cond
)
989 l1
= gen_new_label();
991 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
993 gen_goto_tb(dc
, 0, pc2
, pc1
);
996 gen_goto_tb(dc
, 1, pc2
+ 4, pc2
+ 8);
999 static inline void gen_generic_branch(DisasContext
*dc
)
1001 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
1002 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
1003 TCGv zero
= tcg_const_tl(0);
1005 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1007 tcg_temp_free(npc0
);
1008 tcg_temp_free(npc1
);
1009 tcg_temp_free(zero
);
1012 /* call this function before using the condition register as it may
1013 have been set for a jump */
1014 static inline void flush_cond(DisasContext
*dc
)
1016 if (dc
->npc
== JUMP_PC
) {
1017 gen_generic_branch(dc
);
1018 dc
->npc
= DYNAMIC_PC
;
1022 static inline void save_npc(DisasContext
*dc
)
1024 if (dc
->npc
== JUMP_PC
) {
1025 gen_generic_branch(dc
);
1026 dc
->npc
= DYNAMIC_PC
;
1027 } else if (dc
->npc
!= DYNAMIC_PC
) {
1028 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1032 static inline void update_psr(DisasContext
*dc
)
1034 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1035 dc
->cc_op
= CC_OP_FLAGS
;
1036 gen_helper_compute_psr(cpu_env
);
1040 static inline void save_state(DisasContext
*dc
)
1042 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1046 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1048 if (dc
->npc
== JUMP_PC
) {
1049 gen_generic_branch(dc
);
1050 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1051 dc
->pc
= DYNAMIC_PC
;
1052 } else if (dc
->npc
== DYNAMIC_PC
) {
1053 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1054 dc
->pc
= DYNAMIC_PC
;
1060 static inline void gen_op_next_insn(void)
1062 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1063 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1066 static void free_compare(DisasCompare
*cmp
)
1069 tcg_temp_free(cmp
->c1
);
1072 tcg_temp_free(cmp
->c2
);
1076 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1079 static int subcc_cond
[16] = {
1095 -1, /* no overflow */
1098 static int logic_cond
[16] = {
1100 TCG_COND_EQ
, /* eq: Z */
1101 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1102 TCG_COND_LT
, /* lt: N ^ V -> N */
1103 TCG_COND_EQ
, /* leu: C | Z -> Z */
1104 TCG_COND_NEVER
, /* ltu: C -> 0 */
1105 TCG_COND_LT
, /* neg: N */
1106 TCG_COND_NEVER
, /* vs: V -> 0 */
1108 TCG_COND_NE
, /* ne: !Z */
1109 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1110 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1111 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1112 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1113 TCG_COND_GE
, /* pos: !N */
1114 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1120 #ifdef TARGET_SPARC64
1130 switch (dc
->cc_op
) {
1132 cmp
->cond
= logic_cond
[cond
];
1134 cmp
->is_bool
= false;
1136 cmp
->c2
= tcg_const_tl(0);
1137 #ifdef TARGET_SPARC64
1140 cmp
->c1
= tcg_temp_new();
1141 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1146 cmp
->c1
= cpu_cc_dst
;
1153 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1154 goto do_compare_dst_0
;
1156 case 7: /* overflow */
1157 case 15: /* !overflow */
1161 cmp
->cond
= subcc_cond
[cond
];
1162 cmp
->is_bool
= false;
1163 #ifdef TARGET_SPARC64
1165 /* Note that sign-extension works for unsigned compares as
1166 long as both operands are sign-extended. */
1167 cmp
->g1
= cmp
->g2
= false;
1168 cmp
->c1
= tcg_temp_new();
1169 cmp
->c2
= tcg_temp_new();
1170 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1171 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1175 cmp
->g1
= cmp
->g2
= true;
1176 cmp
->c1
= cpu_cc_src
;
1177 cmp
->c2
= cpu_cc_src2
;
1184 gen_helper_compute_psr(cpu_env
);
1185 dc
->cc_op
= CC_OP_FLAGS
;
1189 /* We're going to generate a boolean result. */
1190 cmp
->cond
= TCG_COND_NE
;
1191 cmp
->is_bool
= true;
1192 cmp
->g1
= cmp
->g2
= false;
1193 cmp
->c1
= r_dst
= tcg_temp_new();
1194 cmp
->c2
= tcg_const_tl(0);
1198 gen_op_eval_bn(r_dst
);
1201 gen_op_eval_be(r_dst
, r_src
);
1204 gen_op_eval_ble(r_dst
, r_src
);
1207 gen_op_eval_bl(r_dst
, r_src
);
1210 gen_op_eval_bleu(r_dst
, r_src
);
1213 gen_op_eval_bcs(r_dst
, r_src
);
1216 gen_op_eval_bneg(r_dst
, r_src
);
1219 gen_op_eval_bvs(r_dst
, r_src
);
1222 gen_op_eval_ba(r_dst
);
1225 gen_op_eval_bne(r_dst
, r_src
);
1228 gen_op_eval_bg(r_dst
, r_src
);
1231 gen_op_eval_bge(r_dst
, r_src
);
1234 gen_op_eval_bgu(r_dst
, r_src
);
1237 gen_op_eval_bcc(r_dst
, r_src
);
1240 gen_op_eval_bpos(r_dst
, r_src
);
1243 gen_op_eval_bvc(r_dst
, r_src
);
1250 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1252 unsigned int offset
;
1255 /* For now we still generate a straight boolean result. */
1256 cmp
->cond
= TCG_COND_NE
;
1257 cmp
->is_bool
= true;
1258 cmp
->g1
= cmp
->g2
= false;
1259 cmp
->c1
= r_dst
= tcg_temp_new();
1260 cmp
->c2
= tcg_const_tl(0);
1280 gen_op_eval_bn(r_dst
);
1283 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1286 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1289 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1292 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1295 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1298 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1301 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1304 gen_op_eval_ba(r_dst
);
1307 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1310 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1313 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1316 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1319 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1322 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1325 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1330 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1334 gen_compare(&cmp
, cc
, cond
, dc
);
1336 /* The interface is to return a boolean in r_dst. */
1338 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1340 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1346 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1349 gen_fcompare(&cmp
, cc
, cond
);
1351 /* The interface is to return a boolean in r_dst. */
1353 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1355 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1361 #ifdef TARGET_SPARC64
1363 static const int gen_tcg_cond_reg
[8] = {
1374 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1376 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1377 cmp
->is_bool
= false;
1381 cmp
->c2
= tcg_const_tl(0);
1384 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1387 gen_compare_reg(&cmp
, cond
, r_src
);
1389 /* The interface is to return a boolean in r_dst. */
1390 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1396 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1398 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1399 target_ulong target
= dc
->pc
+ offset
;
1401 #ifdef TARGET_SPARC64
1402 if (unlikely(AM_CHECK(dc
))) {
1403 target
&= 0xffffffffULL
;
1407 /* unconditional not taken */
1409 dc
->pc
= dc
->npc
+ 4;
1410 dc
->npc
= dc
->pc
+ 4;
1413 dc
->npc
= dc
->pc
+ 4;
1415 } else if (cond
== 0x8) {
1416 /* unconditional taken */
1419 dc
->npc
= dc
->pc
+ 4;
1423 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1427 gen_cond(cpu_cond
, cc
, cond
, dc
);
1429 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1433 dc
->jump_pc
[0] = target
;
1434 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1435 dc
->jump_pc
[1] = DYNAMIC_PC
;
1436 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1438 dc
->jump_pc
[1] = dc
->npc
+ 4;
1445 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1447 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1448 target_ulong target
= dc
->pc
+ offset
;
1450 #ifdef TARGET_SPARC64
1451 if (unlikely(AM_CHECK(dc
))) {
1452 target
&= 0xffffffffULL
;
1456 /* unconditional not taken */
1458 dc
->pc
= dc
->npc
+ 4;
1459 dc
->npc
= dc
->pc
+ 4;
1462 dc
->npc
= dc
->pc
+ 4;
1464 } else if (cond
== 0x8) {
1465 /* unconditional taken */
1468 dc
->npc
= dc
->pc
+ 4;
1472 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1476 gen_fcond(cpu_cond
, cc
, cond
);
1478 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1482 dc
->jump_pc
[0] = target
;
1483 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1484 dc
->jump_pc
[1] = DYNAMIC_PC
;
1485 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1487 dc
->jump_pc
[1] = dc
->npc
+ 4;
1494 #ifdef TARGET_SPARC64
1495 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1498 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1499 target_ulong target
= dc
->pc
+ offset
;
1501 if (unlikely(AM_CHECK(dc
))) {
1502 target
&= 0xffffffffULL
;
1505 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1507 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1511 dc
->jump_pc
[0] = target
;
1512 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1513 dc
->jump_pc
[1] = DYNAMIC_PC
;
1514 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1516 dc
->jump_pc
[1] = dc
->npc
+ 4;
1522 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1526 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1529 gen_helper_fcmps_fcc1(cpu_env
, r_rs1
, r_rs2
);
1532 gen_helper_fcmps_fcc2(cpu_env
, r_rs1
, r_rs2
);
1535 gen_helper_fcmps_fcc3(cpu_env
, r_rs1
, r_rs2
);
1540 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1544 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1547 gen_helper_fcmpd_fcc1(cpu_env
, r_rs1
, r_rs2
);
1550 gen_helper_fcmpd_fcc2(cpu_env
, r_rs1
, r_rs2
);
1553 gen_helper_fcmpd_fcc3(cpu_env
, r_rs1
, r_rs2
);
1558 static inline void gen_op_fcmpq(int fccno
)
1562 gen_helper_fcmpq(cpu_env
);
1565 gen_helper_fcmpq_fcc1(cpu_env
);
1568 gen_helper_fcmpq_fcc2(cpu_env
);
1571 gen_helper_fcmpq_fcc3(cpu_env
);
1576 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1580 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1583 gen_helper_fcmpes_fcc1(cpu_env
, r_rs1
, r_rs2
);
1586 gen_helper_fcmpes_fcc2(cpu_env
, r_rs1
, r_rs2
);
1589 gen_helper_fcmpes_fcc3(cpu_env
, r_rs1
, r_rs2
);
1594 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1598 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1601 gen_helper_fcmped_fcc1(cpu_env
, r_rs1
, r_rs2
);
1604 gen_helper_fcmped_fcc2(cpu_env
, r_rs1
, r_rs2
);
1607 gen_helper_fcmped_fcc3(cpu_env
, r_rs1
, r_rs2
);
1612 static inline void gen_op_fcmpeq(int fccno
)
1616 gen_helper_fcmpeq(cpu_env
);
1619 gen_helper_fcmpeq_fcc1(cpu_env
);
1622 gen_helper_fcmpeq_fcc2(cpu_env
);
1625 gen_helper_fcmpeq_fcc3(cpu_env
);
1632 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1634 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1637 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1639 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1642 static inline void gen_op_fcmpq(int fccno
)
1644 gen_helper_fcmpq(cpu_env
);
1647 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1649 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1652 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1654 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1657 static inline void gen_op_fcmpeq(int fccno
)
1659 gen_helper_fcmpeq(cpu_env
);
1663 static inline void gen_op_fpexception_im(int fsr_flags
)
1667 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1668 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1669 r_const
= tcg_const_i32(TT_FP_EXCP
);
1670 gen_helper_raise_exception(cpu_env
, r_const
);
1671 tcg_temp_free_i32(r_const
);
1674 static int gen_trap_ifnofpu(DisasContext
*dc
)
1676 #if !defined(CONFIG_USER_ONLY)
1677 if (!dc
->fpu_enabled
) {
1681 r_const
= tcg_const_i32(TT_NFPU_INSN
);
1682 gen_helper_raise_exception(cpu_env
, r_const
);
1683 tcg_temp_free_i32(r_const
);
1691 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1693 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1696 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1697 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1701 src
= gen_load_fpr_F(dc
, rs
);
1702 dst
= gen_dest_fpr_F(dc
);
1704 gen(dst
, cpu_env
, src
);
1706 gen_store_fpr_F(dc
, rd
, dst
);
1709 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1710 void (*gen
)(TCGv_i32
, TCGv_i32
))
1714 src
= gen_load_fpr_F(dc
, rs
);
1715 dst
= gen_dest_fpr_F(dc
);
1719 gen_store_fpr_F(dc
, rd
, dst
);
1722 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1723 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1725 TCGv_i32 dst
, src1
, src2
;
1727 src1
= gen_load_fpr_F(dc
, rs1
);
1728 src2
= gen_load_fpr_F(dc
, rs2
);
1729 dst
= gen_dest_fpr_F(dc
);
1731 gen(dst
, cpu_env
, src1
, src2
);
1733 gen_store_fpr_F(dc
, rd
, dst
);
1736 #ifdef TARGET_SPARC64
1737 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1738 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1740 TCGv_i32 dst
, src1
, src2
;
1742 src1
= gen_load_fpr_F(dc
, rs1
);
1743 src2
= gen_load_fpr_F(dc
, rs2
);
1744 dst
= gen_dest_fpr_F(dc
);
1746 gen(dst
, src1
, src2
);
1748 gen_store_fpr_F(dc
, rd
, dst
);
1752 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1753 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1757 src
= gen_load_fpr_D(dc
, rs
);
1758 dst
= gen_dest_fpr_D(dc
, rd
);
1760 gen(dst
, cpu_env
, src
);
1762 gen_store_fpr_D(dc
, rd
, dst
);
1765 #ifdef TARGET_SPARC64
1766 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1767 void (*gen
)(TCGv_i64
, TCGv_i64
))
1771 src
= gen_load_fpr_D(dc
, rs
);
1772 dst
= gen_dest_fpr_D(dc
, rd
);
1776 gen_store_fpr_D(dc
, rd
, dst
);
1780 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1781 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1783 TCGv_i64 dst
, src1
, src2
;
1785 src1
= gen_load_fpr_D(dc
, rs1
);
1786 src2
= gen_load_fpr_D(dc
, rs2
);
1787 dst
= gen_dest_fpr_D(dc
, rd
);
1789 gen(dst
, cpu_env
, src1
, src2
);
1791 gen_store_fpr_D(dc
, rd
, dst
);
1794 #ifdef TARGET_SPARC64
1795 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1796 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1798 TCGv_i64 dst
, src1
, src2
;
1800 src1
= gen_load_fpr_D(dc
, rs1
);
1801 src2
= gen_load_fpr_D(dc
, rs2
);
1802 dst
= gen_dest_fpr_D(dc
, rd
);
1804 gen(dst
, src1
, src2
);
1806 gen_store_fpr_D(dc
, rd
, dst
);
1809 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1810 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1812 TCGv_i64 dst
, src1
, src2
;
1814 src1
= gen_load_fpr_D(dc
, rs1
);
1815 src2
= gen_load_fpr_D(dc
, rs2
);
1816 dst
= gen_dest_fpr_D(dc
, rd
);
1818 gen(dst
, cpu_gsr
, src1
, src2
);
1820 gen_store_fpr_D(dc
, rd
, dst
);
1823 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1824 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1826 TCGv_i64 dst
, src0
, src1
, src2
;
1828 src1
= gen_load_fpr_D(dc
, rs1
);
1829 src2
= gen_load_fpr_D(dc
, rs2
);
1830 src0
= gen_load_fpr_D(dc
, rd
);
1831 dst
= gen_dest_fpr_D(dc
, rd
);
1833 gen(dst
, src0
, src1
, src2
);
1835 gen_store_fpr_D(dc
, rd
, dst
);
1839 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1840 void (*gen
)(TCGv_ptr
))
1842 gen_op_load_fpr_QT1(QFPREG(rs
));
1846 gen_op_store_QT0_fpr(QFPREG(rd
));
1847 gen_update_fprs_dirty(QFPREG(rd
));
1850 #ifdef TARGET_SPARC64
1851 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1852 void (*gen
)(TCGv_ptr
))
1854 gen_op_load_fpr_QT1(QFPREG(rs
));
1858 gen_op_store_QT0_fpr(QFPREG(rd
));
1859 gen_update_fprs_dirty(QFPREG(rd
));
1863 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1864 void (*gen
)(TCGv_ptr
))
1866 gen_op_load_fpr_QT0(QFPREG(rs1
));
1867 gen_op_load_fpr_QT1(QFPREG(rs2
));
1871 gen_op_store_QT0_fpr(QFPREG(rd
));
1872 gen_update_fprs_dirty(QFPREG(rd
));
1875 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1876 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1879 TCGv_i32 src1
, src2
;
1881 src1
= gen_load_fpr_F(dc
, rs1
);
1882 src2
= gen_load_fpr_F(dc
, rs2
);
1883 dst
= gen_dest_fpr_D(dc
, rd
);
1885 gen(dst
, cpu_env
, src1
, src2
);
1887 gen_store_fpr_D(dc
, rd
, dst
);
1890 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1891 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1893 TCGv_i64 src1
, src2
;
1895 src1
= gen_load_fpr_D(dc
, rs1
);
1896 src2
= gen_load_fpr_D(dc
, rs2
);
1898 gen(cpu_env
, src1
, src2
);
1900 gen_op_store_QT0_fpr(QFPREG(rd
));
1901 gen_update_fprs_dirty(QFPREG(rd
));
1904 #ifdef TARGET_SPARC64
1905 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1906 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1911 src
= gen_load_fpr_F(dc
, rs
);
1912 dst
= gen_dest_fpr_D(dc
, rd
);
1914 gen(dst
, cpu_env
, src
);
1916 gen_store_fpr_D(dc
, rd
, dst
);
1920 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1921 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1926 src
= gen_load_fpr_F(dc
, rs
);
1927 dst
= gen_dest_fpr_D(dc
, rd
);
1929 gen(dst
, cpu_env
, src
);
1931 gen_store_fpr_D(dc
, rd
, dst
);
1934 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1935 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1940 src
= gen_load_fpr_D(dc
, rs
);
1941 dst
= gen_dest_fpr_F(dc
);
1943 gen(dst
, cpu_env
, src
);
1945 gen_store_fpr_F(dc
, rd
, dst
);
1948 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1949 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1953 gen_op_load_fpr_QT1(QFPREG(rs
));
1954 dst
= gen_dest_fpr_F(dc
);
1958 gen_store_fpr_F(dc
, rd
, dst
);
1961 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1962 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1966 gen_op_load_fpr_QT1(QFPREG(rs
));
1967 dst
= gen_dest_fpr_D(dc
, rd
);
1971 gen_store_fpr_D(dc
, rd
, dst
);
1974 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1975 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1979 src
= gen_load_fpr_F(dc
, rs
);
1983 gen_op_store_QT0_fpr(QFPREG(rd
));
1984 gen_update_fprs_dirty(QFPREG(rd
));
1987 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1988 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1992 src
= gen_load_fpr_D(dc
, rs
);
1996 gen_op_store_QT0_fpr(QFPREG(rd
));
1997 gen_update_fprs_dirty(QFPREG(rd
));
2001 #ifdef TARGET_SPARC64
2002 static inline TCGv_i32
gen_get_asi(int insn
, TCGv r_addr
)
2008 r_asi
= tcg_temp_new_i32();
2009 tcg_gen_mov_i32(r_asi
, cpu_asi
);
2011 asi
= GET_FIELD(insn
, 19, 26);
2012 r_asi
= tcg_const_i32(asi
);
2017 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2020 TCGv_i32 r_asi
, r_size
, r_sign
;
2022 r_asi
= gen_get_asi(insn
, addr
);
2023 r_size
= tcg_const_i32(size
);
2024 r_sign
= tcg_const_i32(sign
);
2025 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2026 tcg_temp_free_i32(r_sign
);
2027 tcg_temp_free_i32(r_size
);
2028 tcg_temp_free_i32(r_asi
);
2031 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2033 TCGv_i32 r_asi
, r_size
;
2035 r_asi
= gen_get_asi(insn
, addr
);
2036 r_size
= tcg_const_i32(size
);
2037 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2038 tcg_temp_free_i32(r_size
);
2039 tcg_temp_free_i32(r_asi
);
2042 static inline void gen_ldf_asi(TCGv addr
, int insn
, int size
, int rd
)
2044 TCGv_i32 r_asi
, r_size
, r_rd
;
2046 r_asi
= gen_get_asi(insn
, addr
);
2047 r_size
= tcg_const_i32(size
);
2048 r_rd
= tcg_const_i32(rd
);
2049 gen_helper_ldf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2050 tcg_temp_free_i32(r_rd
);
2051 tcg_temp_free_i32(r_size
);
2052 tcg_temp_free_i32(r_asi
);
2055 static inline void gen_stf_asi(TCGv addr
, int insn
, int size
, int rd
)
2057 TCGv_i32 r_asi
, r_size
, r_rd
;
2059 r_asi
= gen_get_asi(insn
, addr
);
2060 r_size
= tcg_const_i32(size
);
2061 r_rd
= tcg_const_i32(rd
);
2062 gen_helper_stf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2063 tcg_temp_free_i32(r_rd
);
2064 tcg_temp_free_i32(r_size
);
2065 tcg_temp_free_i32(r_asi
);
2068 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2070 TCGv_i32 r_asi
, r_size
, r_sign
;
2071 TCGv_i64 t64
= tcg_temp_new_i64();
2073 r_asi
= gen_get_asi(insn
, addr
);
2074 r_size
= tcg_const_i32(4);
2075 r_sign
= tcg_const_i32(0);
2076 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2077 tcg_temp_free_i32(r_sign
);
2078 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2079 tcg_temp_free_i32(r_size
);
2080 tcg_temp_free_i32(r_asi
);
2081 tcg_gen_trunc_i64_tl(dst
, t64
);
2082 tcg_temp_free_i64(t64
);
2085 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2088 TCGv_i32 r_asi
, r_rd
;
2090 r_asi
= gen_get_asi(insn
, addr
);
2091 r_rd
= tcg_const_i32(rd
);
2092 gen_helper_ldda_asi(cpu_env
, addr
, r_asi
, r_rd
);
2093 tcg_temp_free_i32(r_rd
);
2094 tcg_temp_free_i32(r_asi
);
2097 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2100 TCGv_i32 r_asi
, r_size
;
2101 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2102 TCGv_i64 t64
= tcg_temp_new_i64();
2104 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2105 r_asi
= gen_get_asi(insn
, addr
);
2106 r_size
= tcg_const_i32(8);
2107 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2108 tcg_temp_free_i32(r_size
);
2109 tcg_temp_free_i32(r_asi
);
2110 tcg_temp_free_i64(t64
);
2113 static inline void gen_casx_asi(DisasContext
*dc
, TCGv addr
,
2114 TCGv val2
, int insn
, int rd
)
2116 TCGv val1
= gen_load_gpr(dc
, rd
);
2117 TCGv dst
= gen_dest_gpr(dc
, rd
);
2118 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2120 gen_helper_casx_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2121 tcg_temp_free_i32(r_asi
);
2122 gen_store_gpr(dc
, rd
, dst
);
2125 #elif !defined(CONFIG_USER_ONLY)
2127 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2130 TCGv_i32 r_asi
, r_size
, r_sign
;
2131 TCGv_i64 t64
= tcg_temp_new_i64();
2133 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2134 r_size
= tcg_const_i32(size
);
2135 r_sign
= tcg_const_i32(sign
);
2136 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2137 tcg_temp_free_i32(r_sign
);
2138 tcg_temp_free_i32(r_size
);
2139 tcg_temp_free_i32(r_asi
);
2140 tcg_gen_trunc_i64_tl(dst
, t64
);
2141 tcg_temp_free_i64(t64
);
2144 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2146 TCGv_i32 r_asi
, r_size
;
2147 TCGv_i64 t64
= tcg_temp_new_i64();
2149 tcg_gen_extu_tl_i64(t64
, src
);
2150 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2151 r_size
= tcg_const_i32(size
);
2152 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2153 tcg_temp_free_i32(r_size
);
2154 tcg_temp_free_i32(r_asi
);
2155 tcg_temp_free_i64(t64
);
2158 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2160 TCGv_i32 r_asi
, r_size
, r_sign
;
2161 TCGv_i64 r_val
, t64
;
2163 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2164 r_size
= tcg_const_i32(4);
2165 r_sign
= tcg_const_i32(0);
2166 t64
= tcg_temp_new_i64();
2167 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2168 tcg_temp_free(r_sign
);
2169 r_val
= tcg_temp_new_i64();
2170 tcg_gen_extu_tl_i64(r_val
, src
);
2171 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2172 tcg_temp_free_i64(r_val
);
2173 tcg_temp_free_i32(r_size
);
2174 tcg_temp_free_i32(r_asi
);
2175 tcg_gen_trunc_i64_tl(dst
, t64
);
2176 tcg_temp_free_i64(t64
);
2179 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2182 TCGv_i32 r_asi
, r_size
, r_sign
;
2186 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2187 r_size
= tcg_const_i32(8);
2188 r_sign
= tcg_const_i32(0);
2189 t64
= tcg_temp_new_i64();
2190 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2191 tcg_temp_free_i32(r_sign
);
2192 tcg_temp_free_i32(r_size
);
2193 tcg_temp_free_i32(r_asi
);
2195 t
= gen_dest_gpr(dc
, rd
+ 1);
2196 tcg_gen_trunc_i64_tl(t
, t64
);
2197 gen_store_gpr(dc
, rd
+ 1, t
);
2199 tcg_gen_shri_i64(t64
, t64
, 32);
2200 tcg_gen_trunc_i64_tl(hi
, t64
);
2201 tcg_temp_free_i64(t64
);
2202 gen_store_gpr(dc
, rd
, hi
);
2205 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2208 TCGv_i32 r_asi
, r_size
;
2209 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2210 TCGv_i64 t64
= tcg_temp_new_i64();
2212 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2213 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2214 r_size
= tcg_const_i32(8);
2215 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2216 tcg_temp_free_i32(r_size
);
2217 tcg_temp_free_i32(r_asi
);
2218 tcg_temp_free_i64(t64
);
2222 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2223 static inline void gen_cas_asi(DisasContext
*dc
, TCGv addr
,
2224 TCGv val2
, int insn
, int rd
)
2226 TCGv val1
= gen_load_gpr(dc
, rd
);
2227 TCGv dst
= gen_dest_gpr(dc
, rd
);
2228 #ifdef TARGET_SPARC64
2229 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2231 TCGv_i32 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2234 gen_helper_cas_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2235 tcg_temp_free_i32(r_asi
);
2236 gen_store_gpr(dc
, rd
, dst
);
2239 static inline void gen_ldstub_asi(TCGv dst
, TCGv addr
, int insn
)
2242 TCGv_i32 r_asi
, r_size
;
2244 gen_ld_asi(dst
, addr
, insn
, 1, 0);
2246 r_val
= tcg_const_i64(0xffULL
);
2247 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2248 r_size
= tcg_const_i32(1);
2249 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2250 tcg_temp_free_i32(r_size
);
2251 tcg_temp_free_i32(r_asi
);
2252 tcg_temp_free_i64(r_val
);
2256 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2258 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2259 return gen_load_gpr(dc
, rs1
);
2262 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2264 if (IS_IMM
) { /* immediate */
2265 target_long simm
= GET_FIELDs(insn
, 19, 31);
2266 TCGv t
= get_temp_tl(dc
);
2267 tcg_gen_movi_tl(t
, simm
);
2269 } else { /* register */
2270 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2271 return gen_load_gpr(dc
, rs2
);
2275 #ifdef TARGET_SPARC64
2276 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2278 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2280 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2281 or fold the comparison down to 32 bits and use movcond_i32. Choose
2283 c32
= tcg_temp_new_i32();
2285 tcg_gen_trunc_i64_i32(c32
, cmp
->c1
);
2287 TCGv_i64 c64
= tcg_temp_new_i64();
2288 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2289 tcg_gen_trunc_i64_i32(c32
, c64
);
2290 tcg_temp_free_i64(c64
);
2293 s1
= gen_load_fpr_F(dc
, rs
);
2294 s2
= gen_load_fpr_F(dc
, rd
);
2295 dst
= gen_dest_fpr_F(dc
);
2296 zero
= tcg_const_i32(0);
2298 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2300 tcg_temp_free_i32(c32
);
2301 tcg_temp_free_i32(zero
);
2302 gen_store_fpr_F(dc
, rd
, dst
);
2305 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2307 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2308 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2309 gen_load_fpr_D(dc
, rs
),
2310 gen_load_fpr_D(dc
, rd
));
2311 gen_store_fpr_D(dc
, rd
, dst
);
2314 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2316 int qd
= QFPREG(rd
);
2317 int qs
= QFPREG(rs
);
2319 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2320 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2321 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2322 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2324 gen_update_fprs_dirty(qd
);
2327 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_ptr cpu_env
)
2329 TCGv_i32 r_tl
= tcg_temp_new_i32();
2331 /* load env->tl into r_tl */
2332 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2334 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2335 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2337 /* calculate offset to current trap state from env->ts, reuse r_tl */
2338 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2339 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2341 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2343 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2344 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2345 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2346 tcg_temp_free_ptr(r_tl_tmp
);
2349 tcg_temp_free_i32(r_tl
);
2352 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2353 int width
, bool cc
, bool left
)
2355 TCGv lo1
, lo2
, t1
, t2
;
2356 uint64_t amask
, tabl
, tabr
;
2357 int shift
, imask
, omask
;
2360 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2361 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2362 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2363 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2364 dc
->cc_op
= CC_OP_SUB
;
2367 /* Theory of operation: there are two tables, left and right (not to
2368 be confused with the left and right versions of the opcode). These
2369 are indexed by the low 3 bits of the inputs. To make things "easy",
2370 these tables are loaded into two constants, TABL and TABR below.
2371 The operation index = (input & imask) << shift calculates the index
2372 into the constant, while val = (table >> index) & omask calculates
2373 the value we're looking for. */
2380 tabl
= 0x80c0e0f0f8fcfeffULL
;
2381 tabr
= 0xff7f3f1f0f070301ULL
;
2383 tabl
= 0x0103070f1f3f7fffULL
;
2384 tabr
= 0xfffefcf8f0e0c080ULL
;
2404 tabl
= (2 << 2) | 3;
2405 tabr
= (3 << 2) | 1;
2407 tabl
= (1 << 2) | 3;
2408 tabr
= (3 << 2) | 2;
2415 lo1
= tcg_temp_new();
2416 lo2
= tcg_temp_new();
2417 tcg_gen_andi_tl(lo1
, s1
, imask
);
2418 tcg_gen_andi_tl(lo2
, s2
, imask
);
2419 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2420 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2422 t1
= tcg_const_tl(tabl
);
2423 t2
= tcg_const_tl(tabr
);
2424 tcg_gen_shr_tl(lo1
, t1
, lo1
);
2425 tcg_gen_shr_tl(lo2
, t2
, lo2
);
2426 tcg_gen_andi_tl(dst
, lo1
, omask
);
2427 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2431 amask
&= 0xffffffffULL
;
2433 tcg_gen_andi_tl(s1
, s1
, amask
);
2434 tcg_gen_andi_tl(s2
, s2
, amask
);
2436 /* We want to compute
2437 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2438 We've already done dst = lo1, so this reduces to
2439 dst &= (s1 == s2 ? -1 : lo2)
2444 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
2445 tcg_gen_neg_tl(t1
, t1
);
2446 tcg_gen_or_tl(lo2
, lo2
, t1
);
2447 tcg_gen_and_tl(dst
, dst
, lo2
);
2455 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2457 TCGv tmp
= tcg_temp_new();
2459 tcg_gen_add_tl(tmp
, s1
, s2
);
2460 tcg_gen_andi_tl(dst
, tmp
, -8);
2462 tcg_gen_neg_tl(tmp
, tmp
);
2464 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2469 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2473 t1
= tcg_temp_new();
2474 t2
= tcg_temp_new();
2475 shift
= tcg_temp_new();
2477 tcg_gen_andi_tl(shift
, gsr
, 7);
2478 tcg_gen_shli_tl(shift
, shift
, 3);
2479 tcg_gen_shl_tl(t1
, s1
, shift
);
2481 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2482 shift of (up to 63) followed by a constant shift of 1. */
2483 tcg_gen_xori_tl(shift
, shift
, 63);
2484 tcg_gen_shr_tl(t2
, s2
, shift
);
2485 tcg_gen_shri_tl(t2
, t2
, 1);
2487 tcg_gen_or_tl(dst
, t1
, t2
);
2491 tcg_temp_free(shift
);
2495 #define CHECK_IU_FEATURE(dc, FEATURE) \
2496 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2498 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2499 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2502 /* before an instruction, dc->pc must be static */
2503 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2505 unsigned int opc
, rs1
, rs2
, rd
;
2506 TCGv cpu_src1
, cpu_src2
;
2507 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2508 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2511 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2512 tcg_gen_debug_insn_start(dc
->pc
);
2515 opc
= GET_FIELD(insn
, 0, 1);
2516 rd
= GET_FIELD(insn
, 2, 6);
2519 case 0: /* branches/sethi */
2521 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2524 #ifdef TARGET_SPARC64
2525 case 0x1: /* V9 BPcc */
2529 target
= GET_FIELD_SP(insn
, 0, 18);
2530 target
= sign_extend(target
, 19);
2532 cc
= GET_FIELD_SP(insn
, 20, 21);
2534 do_branch(dc
, target
, insn
, 0);
2536 do_branch(dc
, target
, insn
, 1);
2541 case 0x3: /* V9 BPr */
2543 target
= GET_FIELD_SP(insn
, 0, 13) |
2544 (GET_FIELD_SP(insn
, 20, 21) << 14);
2545 target
= sign_extend(target
, 16);
2547 cpu_src1
= get_src1(dc
, insn
);
2548 do_branch_reg(dc
, target
, insn
, cpu_src1
);
2551 case 0x5: /* V9 FBPcc */
2553 int cc
= GET_FIELD_SP(insn
, 20, 21);
2554 if (gen_trap_ifnofpu(dc
)) {
2557 target
= GET_FIELD_SP(insn
, 0, 18);
2558 target
= sign_extend(target
, 19);
2560 do_fbranch(dc
, target
, insn
, cc
);
2564 case 0x7: /* CBN+x */
2569 case 0x2: /* BN+x */
2571 target
= GET_FIELD(insn
, 10, 31);
2572 target
= sign_extend(target
, 22);
2574 do_branch(dc
, target
, insn
, 0);
2577 case 0x6: /* FBN+x */
2579 if (gen_trap_ifnofpu(dc
)) {
2582 target
= GET_FIELD(insn
, 10, 31);
2583 target
= sign_extend(target
, 22);
2585 do_fbranch(dc
, target
, insn
, 0);
2588 case 0x4: /* SETHI */
2589 /* Special-case %g0 because that's the canonical nop. */
2591 uint32_t value
= GET_FIELD(insn
, 10, 31);
2592 TCGv t
= gen_dest_gpr(dc
, rd
);
2593 tcg_gen_movi_tl(t
, value
<< 10);
2594 gen_store_gpr(dc
, rd
, t
);
2597 case 0x0: /* UNIMPL */
2606 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
2607 TCGv o7
= gen_dest_gpr(dc
, 15);
2609 tcg_gen_movi_tl(o7
, dc
->pc
);
2610 gen_store_gpr(dc
, 15, o7
);
2613 #ifdef TARGET_SPARC64
2614 if (unlikely(AM_CHECK(dc
))) {
2615 target
&= 0xffffffffULL
;
2621 case 2: /* FPU & Logical Operations */
2623 unsigned int xop
= GET_FIELD(insn
, 7, 12);
2624 TCGv cpu_dst
= get_temp_tl(dc
);
2627 if (xop
== 0x3a) { /* generate trap */
2628 int cond
= GET_FIELD(insn
, 3, 6);
2640 /* Conditional trap. */
2642 #ifdef TARGET_SPARC64
2644 int cc
= GET_FIELD_SP(insn
, 11, 12);
2646 gen_compare(&cmp
, 0, cond
, dc
);
2647 } else if (cc
== 2) {
2648 gen_compare(&cmp
, 1, cond
, dc
);
2653 gen_compare(&cmp
, 0, cond
, dc
);
2655 l1
= gen_new_label();
2656 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
2657 cmp
.c1
, cmp
.c2
, l1
);
2661 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2662 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2664 /* Don't use the normal temporaries, as they may well have
2665 gone out of scope with the branch above. While we're
2666 doing that we might as well pre-truncate to 32-bit. */
2667 trap
= tcg_temp_new_i32();
2669 rs1
= GET_FIELD_SP(insn
, 14, 18);
2671 rs2
= GET_FIELD_SP(insn
, 0, 6);
2673 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
2674 /* Signal that the trap value is fully constant. */
2677 TCGv t1
= gen_load_gpr(dc
, rs1
);
2678 tcg_gen_trunc_tl_i32(trap
, t1
);
2679 tcg_gen_addi_i32(trap
, trap
, rs2
);
2683 rs2
= GET_FIELD_SP(insn
, 0, 4);
2684 t1
= gen_load_gpr(dc
, rs1
);
2685 t2
= gen_load_gpr(dc
, rs2
);
2686 tcg_gen_add_tl(t1
, t1
, t2
);
2687 tcg_gen_trunc_tl_i32(trap
, t1
);
2690 tcg_gen_andi_i32(trap
, trap
, mask
);
2691 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2694 gen_helper_raise_exception(cpu_env
, trap
);
2695 tcg_temp_free_i32(trap
);
2698 /* An unconditional trap ends the TB. */
2702 /* A conditional trap falls through to the next insn. */
2706 } else if (xop
== 0x28) {
2707 rs1
= GET_FIELD(insn
, 13, 17);
2710 #ifndef TARGET_SPARC64
2711 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2712 manual, rdy on the microSPARC
2714 case 0x0f: /* stbar in the SPARCv8 manual,
2715 rdy on the microSPARC II */
2716 case 0x10 ... 0x1f: /* implementation-dependent in the
2717 SPARCv8 manual, rdy on the
2720 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
2721 TCGv t
= gen_dest_gpr(dc
, rd
);
2722 /* Read Asr17 for a Leon3 monoprocessor */
2723 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
2724 gen_store_gpr(dc
, rd
, t
);
2728 gen_store_gpr(dc
, rd
, cpu_y
);
2730 #ifdef TARGET_SPARC64
2731 case 0x2: /* V9 rdccr */
2733 gen_helper_rdccr(cpu_dst
, cpu_env
);
2734 gen_store_gpr(dc
, rd
, cpu_dst
);
2736 case 0x3: /* V9 rdasi */
2737 tcg_gen_ext_i32_tl(cpu_dst
, cpu_asi
);
2738 gen_store_gpr(dc
, rd
, cpu_dst
);
2740 case 0x4: /* V9 rdtick */
2744 r_tickptr
= tcg_temp_new_ptr();
2745 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2746 offsetof(CPUSPARCState
, tick
));
2747 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2748 tcg_temp_free_ptr(r_tickptr
);
2749 gen_store_gpr(dc
, rd
, cpu_dst
);
2752 case 0x5: /* V9 rdpc */
2754 TCGv t
= gen_dest_gpr(dc
, rd
);
2755 if (unlikely(AM_CHECK(dc
))) {
2756 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
2758 tcg_gen_movi_tl(t
, dc
->pc
);
2760 gen_store_gpr(dc
, rd
, t
);
2763 case 0x6: /* V9 rdfprs */
2764 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
2765 gen_store_gpr(dc
, rd
, cpu_dst
);
2767 case 0xf: /* V9 membar */
2768 break; /* no effect */
2769 case 0x13: /* Graphics Status */
2770 if (gen_trap_ifnofpu(dc
)) {
2773 gen_store_gpr(dc
, rd
, cpu_gsr
);
2775 case 0x16: /* Softint */
2776 tcg_gen_ext_i32_tl(cpu_dst
, cpu_softint
);
2777 gen_store_gpr(dc
, rd
, cpu_dst
);
2779 case 0x17: /* Tick compare */
2780 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
2782 case 0x18: /* System tick */
2786 r_tickptr
= tcg_temp_new_ptr();
2787 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2788 offsetof(CPUSPARCState
, stick
));
2789 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2790 tcg_temp_free_ptr(r_tickptr
);
2791 gen_store_gpr(dc
, rd
, cpu_dst
);
2794 case 0x19: /* System tick compare */
2795 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
2797 case 0x10: /* Performance Control */
2798 case 0x11: /* Performance Instrumentation Counter */
2799 case 0x12: /* Dispatch Control */
2800 case 0x14: /* Softint set, WO */
2801 case 0x15: /* Softint clear, WO */
2806 #if !defined(CONFIG_USER_ONLY)
2807 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
2808 #ifndef TARGET_SPARC64
2809 if (!supervisor(dc
)) {
2813 gen_helper_rdpsr(cpu_dst
, cpu_env
);
2815 CHECK_IU_FEATURE(dc
, HYPV
);
2816 if (!hypervisor(dc
))
2818 rs1
= GET_FIELD(insn
, 13, 17);
2821 // gen_op_rdhpstate();
2824 // gen_op_rdhtstate();
2827 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
2830 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
2833 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
2835 case 31: // hstick_cmpr
2836 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
2842 gen_store_gpr(dc
, rd
, cpu_dst
);
2844 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
2845 if (!supervisor(dc
)) {
2848 cpu_tmp0
= get_temp_tl(dc
);
2849 #ifdef TARGET_SPARC64
2850 rs1
= GET_FIELD(insn
, 13, 17);
2856 r_tsptr
= tcg_temp_new_ptr();
2857 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2858 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2859 offsetof(trap_state
, tpc
));
2860 tcg_temp_free_ptr(r_tsptr
);
2867 r_tsptr
= tcg_temp_new_ptr();
2868 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2869 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2870 offsetof(trap_state
, tnpc
));
2871 tcg_temp_free_ptr(r_tsptr
);
2878 r_tsptr
= tcg_temp_new_ptr();
2879 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2880 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2881 offsetof(trap_state
, tstate
));
2882 tcg_temp_free_ptr(r_tsptr
);
2887 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2889 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2890 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
2891 offsetof(trap_state
, tt
));
2892 tcg_temp_free_ptr(r_tsptr
);
2899 r_tickptr
= tcg_temp_new_ptr();
2900 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2901 offsetof(CPUSPARCState
, tick
));
2902 gen_helper_tick_get_count(cpu_tmp0
, r_tickptr
);
2903 tcg_temp_free_ptr(r_tickptr
);
2907 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
2910 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2911 offsetof(CPUSPARCState
, pstate
));
2914 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2915 offsetof(CPUSPARCState
, tl
));
2918 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2919 offsetof(CPUSPARCState
, psrpil
));
2922 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
2925 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2926 offsetof(CPUSPARCState
, cansave
));
2928 case 11: // canrestore
2929 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2930 offsetof(CPUSPARCState
, canrestore
));
2932 case 12: // cleanwin
2933 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2934 offsetof(CPUSPARCState
, cleanwin
));
2936 case 13: // otherwin
2937 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2938 offsetof(CPUSPARCState
, otherwin
));
2941 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2942 offsetof(CPUSPARCState
, wstate
));
2944 case 16: // UA2005 gl
2945 CHECK_IU_FEATURE(dc
, GL
);
2946 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2947 offsetof(CPUSPARCState
, gl
));
2949 case 26: // UA2005 strand status
2950 CHECK_IU_FEATURE(dc
, HYPV
);
2951 if (!hypervisor(dc
))
2953 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
2956 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
2963 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
2965 gen_store_gpr(dc
, rd
, cpu_tmp0
);
2967 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
2968 #ifdef TARGET_SPARC64
2970 gen_helper_flushw(cpu_env
);
2972 if (!supervisor(dc
))
2974 gen_store_gpr(dc
, rd
, cpu_tbr
);
2978 } else if (xop
== 0x34) { /* FPU Operations */
2979 if (gen_trap_ifnofpu(dc
)) {
2982 gen_op_clear_ieee_excp_and_FTT();
2983 rs1
= GET_FIELD(insn
, 13, 17);
2984 rs2
= GET_FIELD(insn
, 27, 31);
2985 xop
= GET_FIELD(insn
, 18, 26);
2988 case 0x1: /* fmovs */
2989 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
2990 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
2992 case 0x5: /* fnegs */
2993 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
2995 case 0x9: /* fabss */
2996 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
2998 case 0x29: /* fsqrts */
2999 CHECK_FPU_FEATURE(dc
, FSQRT
);
3000 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
3002 case 0x2a: /* fsqrtd */
3003 CHECK_FPU_FEATURE(dc
, FSQRT
);
3004 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
3006 case 0x2b: /* fsqrtq */
3007 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3008 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
3010 case 0x41: /* fadds */
3011 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
3013 case 0x42: /* faddd */
3014 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
3016 case 0x43: /* faddq */
3017 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3018 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
3020 case 0x45: /* fsubs */
3021 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
3023 case 0x46: /* fsubd */
3024 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3026 case 0x47: /* fsubq */
3027 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3028 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3030 case 0x49: /* fmuls */
3031 CHECK_FPU_FEATURE(dc
, FMUL
);
3032 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3034 case 0x4a: /* fmuld */
3035 CHECK_FPU_FEATURE(dc
, FMUL
);
3036 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3038 case 0x4b: /* fmulq */
3039 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3040 CHECK_FPU_FEATURE(dc
, FMUL
);
3041 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3043 case 0x4d: /* fdivs */
3044 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3046 case 0x4e: /* fdivd */
3047 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3049 case 0x4f: /* fdivq */
3050 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3051 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3053 case 0x69: /* fsmuld */
3054 CHECK_FPU_FEATURE(dc
, FSMULD
);
3055 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3057 case 0x6e: /* fdmulq */
3058 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3059 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3061 case 0xc4: /* fitos */
3062 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3064 case 0xc6: /* fdtos */
3065 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3067 case 0xc7: /* fqtos */
3068 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3069 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3071 case 0xc8: /* fitod */
3072 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3074 case 0xc9: /* fstod */
3075 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3077 case 0xcb: /* fqtod */
3078 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3079 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3081 case 0xcc: /* fitoq */
3082 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3083 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3085 case 0xcd: /* fstoq */
3086 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3087 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3089 case 0xce: /* fdtoq */
3090 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3091 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3093 case 0xd1: /* fstoi */
3094 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3096 case 0xd2: /* fdtoi */
3097 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3099 case 0xd3: /* fqtoi */
3100 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3101 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3103 #ifdef TARGET_SPARC64
3104 case 0x2: /* V9 fmovd */
3105 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3106 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3108 case 0x3: /* V9 fmovq */
3109 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3110 gen_move_Q(rd
, rs2
);
3112 case 0x6: /* V9 fnegd */
3113 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3115 case 0x7: /* V9 fnegq */
3116 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3117 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3119 case 0xa: /* V9 fabsd */
3120 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3122 case 0xb: /* V9 fabsq */
3123 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3124 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3126 case 0x81: /* V9 fstox */
3127 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3129 case 0x82: /* V9 fdtox */
3130 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3132 case 0x83: /* V9 fqtox */
3133 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3134 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3136 case 0x84: /* V9 fxtos */
3137 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3139 case 0x88: /* V9 fxtod */
3140 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3142 case 0x8c: /* V9 fxtoq */
3143 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3144 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3150 } else if (xop
== 0x35) { /* FPU Operations */
3151 #ifdef TARGET_SPARC64
3154 if (gen_trap_ifnofpu(dc
)) {
3157 gen_op_clear_ieee_excp_and_FTT();
3158 rs1
= GET_FIELD(insn
, 13, 17);
3159 rs2
= GET_FIELD(insn
, 27, 31);
3160 xop
= GET_FIELD(insn
, 18, 26);
3163 #ifdef TARGET_SPARC64
3167 cond = GET_FIELD_SP(insn, 10, 12); \
3168 cpu_src1 = get_src1(dc, insn); \
3169 gen_compare_reg(&cmp, cond, cpu_src1); \
3170 gen_fmov##sz(dc, &cmp, rd, rs2); \
3171 free_compare(&cmp); \
3174 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3177 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3180 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3181 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3188 #ifdef TARGET_SPARC64
3189 #define FMOVCC(fcc, sz) \
3192 cond = GET_FIELD_SP(insn, 14, 17); \
3193 gen_fcompare(&cmp, fcc, cond); \
3194 gen_fmov##sz(dc, &cmp, rd, rs2); \
3195 free_compare(&cmp); \
3198 case 0x001: /* V9 fmovscc %fcc0 */
3201 case 0x002: /* V9 fmovdcc %fcc0 */
3204 case 0x003: /* V9 fmovqcc %fcc0 */
3205 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3208 case 0x041: /* V9 fmovscc %fcc1 */
3211 case 0x042: /* V9 fmovdcc %fcc1 */
3214 case 0x043: /* V9 fmovqcc %fcc1 */
3215 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3218 case 0x081: /* V9 fmovscc %fcc2 */
3221 case 0x082: /* V9 fmovdcc %fcc2 */
3224 case 0x083: /* V9 fmovqcc %fcc2 */
3225 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3228 case 0x0c1: /* V9 fmovscc %fcc3 */
3231 case 0x0c2: /* V9 fmovdcc %fcc3 */
3234 case 0x0c3: /* V9 fmovqcc %fcc3 */
3235 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3239 #define FMOVCC(xcc, sz) \
3242 cond = GET_FIELD_SP(insn, 14, 17); \
3243 gen_compare(&cmp, xcc, cond, dc); \
3244 gen_fmov##sz(dc, &cmp, rd, rs2); \
3245 free_compare(&cmp); \
3248 case 0x101: /* V9 fmovscc %icc */
3251 case 0x102: /* V9 fmovdcc %icc */
3254 case 0x103: /* V9 fmovqcc %icc */
3255 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3258 case 0x181: /* V9 fmovscc %xcc */
3261 case 0x182: /* V9 fmovdcc %xcc */
3264 case 0x183: /* V9 fmovqcc %xcc */
3265 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3270 case 0x51: /* fcmps, V9 %fcc */
3271 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3272 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3273 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3275 case 0x52: /* fcmpd, V9 %fcc */
3276 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3277 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3278 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3280 case 0x53: /* fcmpq, V9 %fcc */
3281 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3282 gen_op_load_fpr_QT0(QFPREG(rs1
));
3283 gen_op_load_fpr_QT1(QFPREG(rs2
));
3284 gen_op_fcmpq(rd
& 3);
3286 case 0x55: /* fcmpes, V9 %fcc */
3287 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3288 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3289 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3291 case 0x56: /* fcmped, V9 %fcc */
3292 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3293 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3294 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3296 case 0x57: /* fcmpeq, V9 %fcc */
3297 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3298 gen_op_load_fpr_QT0(QFPREG(rs1
));
3299 gen_op_load_fpr_QT1(QFPREG(rs2
));
3300 gen_op_fcmpeq(rd
& 3);
3305 } else if (xop
== 0x2) {
3306 TCGv dst
= gen_dest_gpr(dc
, rd
);
3307 rs1
= GET_FIELD(insn
, 13, 17);
3309 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3310 if (IS_IMM
) { /* immediate */
3311 simm
= GET_FIELDs(insn
, 19, 31);
3312 tcg_gen_movi_tl(dst
, simm
);
3313 gen_store_gpr(dc
, rd
, dst
);
3314 } else { /* register */
3315 rs2
= GET_FIELD(insn
, 27, 31);
3317 tcg_gen_movi_tl(dst
, 0);
3318 gen_store_gpr(dc
, rd
, dst
);
3320 cpu_src2
= gen_load_gpr(dc
, rs2
);
3321 gen_store_gpr(dc
, rd
, cpu_src2
);
3325 cpu_src1
= get_src1(dc
, insn
);
3326 if (IS_IMM
) { /* immediate */
3327 simm
= GET_FIELDs(insn
, 19, 31);
3328 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
3329 gen_store_gpr(dc
, rd
, dst
);
3330 } else { /* register */
3331 rs2
= GET_FIELD(insn
, 27, 31);
3333 /* mov shortcut: or x, %g0, y -> mov x, y */
3334 gen_store_gpr(dc
, rd
, cpu_src1
);
3336 cpu_src2
= gen_load_gpr(dc
, rs2
);
3337 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
3338 gen_store_gpr(dc
, rd
, dst
);
3342 #ifdef TARGET_SPARC64
3343 } else if (xop
== 0x25) { /* sll, V9 sllx */
3344 cpu_src1
= get_src1(dc
, insn
);
3345 if (IS_IMM
) { /* immediate */
3346 simm
= GET_FIELDs(insn
, 20, 31);
3347 if (insn
& (1 << 12)) {
3348 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3350 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3352 } else { /* register */
3353 rs2
= GET_FIELD(insn
, 27, 31);
3354 cpu_src2
= gen_load_gpr(dc
, rs2
);
3355 cpu_tmp0
= get_temp_tl(dc
);
3356 if (insn
& (1 << 12)) {
3357 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3359 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3361 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3363 gen_store_gpr(dc
, rd
, cpu_dst
);
3364 } else if (xop
== 0x26) { /* srl, V9 srlx */
3365 cpu_src1
= get_src1(dc
, insn
);
3366 if (IS_IMM
) { /* immediate */
3367 simm
= GET_FIELDs(insn
, 20, 31);
3368 if (insn
& (1 << 12)) {
3369 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3371 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3372 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3374 } else { /* register */
3375 rs2
= GET_FIELD(insn
, 27, 31);
3376 cpu_src2
= gen_load_gpr(dc
, rs2
);
3377 cpu_tmp0
= get_temp_tl(dc
);
3378 if (insn
& (1 << 12)) {
3379 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3380 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3382 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3383 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3384 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3387 gen_store_gpr(dc
, rd
, cpu_dst
);
3388 } else if (xop
== 0x27) { /* sra, V9 srax */
3389 cpu_src1
= get_src1(dc
, insn
);
3390 if (IS_IMM
) { /* immediate */
3391 simm
= GET_FIELDs(insn
, 20, 31);
3392 if (insn
& (1 << 12)) {
3393 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3395 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3396 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3398 } else { /* register */
3399 rs2
= GET_FIELD(insn
, 27, 31);
3400 cpu_src2
= gen_load_gpr(dc
, rs2
);
3401 cpu_tmp0
= get_temp_tl(dc
);
3402 if (insn
& (1 << 12)) {
3403 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3404 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3406 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3407 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3408 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3411 gen_store_gpr(dc
, rd
, cpu_dst
);
3413 } else if (xop
< 0x36) {
3415 cpu_src1
= get_src1(dc
, insn
);
3416 cpu_src2
= get_src2(dc
, insn
);
3417 switch (xop
& ~0x10) {
3420 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3421 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3422 dc
->cc_op
= CC_OP_ADD
;
3424 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3428 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3430 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3431 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3432 dc
->cc_op
= CC_OP_LOGIC
;
3436 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3438 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3439 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3440 dc
->cc_op
= CC_OP_LOGIC
;
3444 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3446 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3447 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3448 dc
->cc_op
= CC_OP_LOGIC
;
3453 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3454 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3455 dc
->cc_op
= CC_OP_SUB
;
3457 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3460 case 0x5: /* andn */
3461 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3463 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3464 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3465 dc
->cc_op
= CC_OP_LOGIC
;
3469 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3471 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3472 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3473 dc
->cc_op
= CC_OP_LOGIC
;
3476 case 0x7: /* xorn */
3477 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3479 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3480 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3481 dc
->cc_op
= CC_OP_LOGIC
;
3484 case 0x8: /* addx, V9 addc */
3485 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3488 #ifdef TARGET_SPARC64
3489 case 0x9: /* V9 mulx */
3490 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3493 case 0xa: /* umul */
3494 CHECK_IU_FEATURE(dc
, MUL
);
3495 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3497 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3498 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3499 dc
->cc_op
= CC_OP_LOGIC
;
3502 case 0xb: /* smul */
3503 CHECK_IU_FEATURE(dc
, MUL
);
3504 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
3506 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3507 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3508 dc
->cc_op
= CC_OP_LOGIC
;
3511 case 0xc: /* subx, V9 subc */
3512 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3515 #ifdef TARGET_SPARC64
3516 case 0xd: /* V9 udivx */
3517 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3520 case 0xe: /* udiv */
3521 CHECK_IU_FEATURE(dc
, DIV
);
3523 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3525 dc
->cc_op
= CC_OP_DIV
;
3527 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
3531 case 0xf: /* sdiv */
3532 CHECK_IU_FEATURE(dc
, DIV
);
3534 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3536 dc
->cc_op
= CC_OP_DIV
;
3538 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
3545 gen_store_gpr(dc
, rd
, cpu_dst
);
3547 cpu_src1
= get_src1(dc
, insn
);
3548 cpu_src2
= get_src2(dc
, insn
);
3550 case 0x20: /* taddcc */
3551 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3552 gen_store_gpr(dc
, rd
, cpu_dst
);
3553 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
3554 dc
->cc_op
= CC_OP_TADD
;
3556 case 0x21: /* tsubcc */
3557 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3558 gen_store_gpr(dc
, rd
, cpu_dst
);
3559 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
3560 dc
->cc_op
= CC_OP_TSUB
;
3562 case 0x22: /* taddcctv */
3563 gen_helper_taddcctv(cpu_dst
, cpu_env
,
3564 cpu_src1
, cpu_src2
);
3565 gen_store_gpr(dc
, rd
, cpu_dst
);
3566 dc
->cc_op
= CC_OP_TADDTV
;
3568 case 0x23: /* tsubcctv */
3569 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
3570 cpu_src1
, cpu_src2
);
3571 gen_store_gpr(dc
, rd
, cpu_dst
);
3572 dc
->cc_op
= CC_OP_TSUBTV
;
3574 case 0x24: /* mulscc */
3576 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
3577 gen_store_gpr(dc
, rd
, cpu_dst
);
3578 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3579 dc
->cc_op
= CC_OP_ADD
;
3581 #ifndef TARGET_SPARC64
3582 case 0x25: /* sll */
3583 if (IS_IMM
) { /* immediate */
3584 simm
= GET_FIELDs(insn
, 20, 31);
3585 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3586 } else { /* register */
3587 cpu_tmp0
= get_temp_tl(dc
);
3588 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3589 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3591 gen_store_gpr(dc
, rd
, cpu_dst
);
3593 case 0x26: /* srl */
3594 if (IS_IMM
) { /* immediate */
3595 simm
= GET_FIELDs(insn
, 20, 31);
3596 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3597 } else { /* register */
3598 cpu_tmp0
= get_temp_tl(dc
);
3599 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3600 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3602 gen_store_gpr(dc
, rd
, cpu_dst
);
3604 case 0x27: /* sra */
3605 if (IS_IMM
) { /* immediate */
3606 simm
= GET_FIELDs(insn
, 20, 31);
3607 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3608 } else { /* register */
3609 cpu_tmp0
= get_temp_tl(dc
);
3610 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3611 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3613 gen_store_gpr(dc
, rd
, cpu_dst
);
3618 cpu_tmp0
= get_temp_tl(dc
);
3621 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3622 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
3624 #ifndef TARGET_SPARC64
3625 case 0x01 ... 0x0f: /* undefined in the
3629 case 0x10 ... 0x1f: /* implementation-dependent
3633 if ((rd
== 0x13) && (dc
->def
->features
&
3634 CPU_FEATURE_POWERDOWN
)) {
3635 /* LEON3 power-down */
3637 gen_helper_power_down(cpu_env
);
3641 case 0x2: /* V9 wrccr */
3642 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3643 gen_helper_wrccr(cpu_env
, cpu_tmp0
);
3644 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3645 dc
->cc_op
= CC_OP_FLAGS
;
3647 case 0x3: /* V9 wrasi */
3648 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3649 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
3650 tcg_gen_trunc_tl_i32(cpu_asi
, cpu_tmp0
);
3652 case 0x6: /* V9 wrfprs */
3653 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3654 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
3660 case 0xf: /* V9 sir, nop if user */
3661 #if !defined(CONFIG_USER_ONLY)
3662 if (supervisor(dc
)) {
3667 case 0x13: /* Graphics Status */
3668 if (gen_trap_ifnofpu(dc
)) {
3671 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
3673 case 0x14: /* Softint set */
3674 if (!supervisor(dc
))
3676 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3677 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
3679 case 0x15: /* Softint clear */
3680 if (!supervisor(dc
))
3682 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3683 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
3685 case 0x16: /* Softint write */
3686 if (!supervisor(dc
))
3688 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3689 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
3691 case 0x17: /* Tick compare */
3692 #if !defined(CONFIG_USER_ONLY)
3693 if (!supervisor(dc
))
3699 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
3701 r_tickptr
= tcg_temp_new_ptr();
3702 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3703 offsetof(CPUSPARCState
, tick
));
3704 gen_helper_tick_set_limit(r_tickptr
,
3706 tcg_temp_free_ptr(r_tickptr
);
3709 case 0x18: /* System tick */
3710 #if !defined(CONFIG_USER_ONLY)
3711 if (!supervisor(dc
))
3717 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
3719 r_tickptr
= tcg_temp_new_ptr();
3720 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3721 offsetof(CPUSPARCState
, stick
));
3722 gen_helper_tick_set_count(r_tickptr
,
3724 tcg_temp_free_ptr(r_tickptr
);
3727 case 0x19: /* System tick compare */
3728 #if !defined(CONFIG_USER_ONLY)
3729 if (!supervisor(dc
))
3735 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
3737 r_tickptr
= tcg_temp_new_ptr();
3738 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3739 offsetof(CPUSPARCState
, stick
));
3740 gen_helper_tick_set_limit(r_tickptr
,
3742 tcg_temp_free_ptr(r_tickptr
);
3746 case 0x10: /* Performance Control */
3747 case 0x11: /* Performance Instrumentation
3749 case 0x12: /* Dispatch Control */
3756 #if !defined(CONFIG_USER_ONLY)
3757 case 0x31: /* wrpsr, V9 saved, restored */
3759 if (!supervisor(dc
))
3761 #ifdef TARGET_SPARC64
3764 gen_helper_saved(cpu_env
);
3767 gen_helper_restored(cpu_env
);
3769 case 2: /* UA2005 allclean */
3770 case 3: /* UA2005 otherw */
3771 case 4: /* UA2005 normalw */
3772 case 5: /* UA2005 invalw */
3778 cpu_tmp0
= get_temp_tl(dc
);
3779 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3780 gen_helper_wrpsr(cpu_env
, cpu_tmp0
);
3781 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3782 dc
->cc_op
= CC_OP_FLAGS
;
3790 case 0x32: /* wrwim, V9 wrpr */
3792 if (!supervisor(dc
))
3794 cpu_tmp0
= get_temp_tl(dc
);
3795 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3796 #ifdef TARGET_SPARC64
3802 r_tsptr
= tcg_temp_new_ptr();
3803 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3804 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3805 offsetof(trap_state
, tpc
));
3806 tcg_temp_free_ptr(r_tsptr
);
3813 r_tsptr
= tcg_temp_new_ptr();
3814 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3815 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3816 offsetof(trap_state
, tnpc
));
3817 tcg_temp_free_ptr(r_tsptr
);
3824 r_tsptr
= tcg_temp_new_ptr();
3825 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3826 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3827 offsetof(trap_state
,
3829 tcg_temp_free_ptr(r_tsptr
);
3836 r_tsptr
= tcg_temp_new_ptr();
3837 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3838 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
3839 offsetof(trap_state
, tt
));
3840 tcg_temp_free_ptr(r_tsptr
);
3847 r_tickptr
= tcg_temp_new_ptr();
3848 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3849 offsetof(CPUSPARCState
, tick
));
3850 gen_helper_tick_set_count(r_tickptr
,
3852 tcg_temp_free_ptr(r_tickptr
);
3856 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
3860 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
3861 dc
->npc
= DYNAMIC_PC
;
3865 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3866 offsetof(CPUSPARCState
, tl
));
3867 dc
->npc
= DYNAMIC_PC
;
3870 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
3873 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
3876 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3877 offsetof(CPUSPARCState
,
3880 case 11: // canrestore
3881 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3882 offsetof(CPUSPARCState
,
3885 case 12: // cleanwin
3886 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3887 offsetof(CPUSPARCState
,
3890 case 13: // otherwin
3891 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3892 offsetof(CPUSPARCState
,
3896 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3897 offsetof(CPUSPARCState
,
3900 case 16: // UA2005 gl
3901 CHECK_IU_FEATURE(dc
, GL
);
3902 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3903 offsetof(CPUSPARCState
, gl
));
3905 case 26: // UA2005 strand status
3906 CHECK_IU_FEATURE(dc
, HYPV
);
3907 if (!hypervisor(dc
))
3909 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
3915 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
3916 if (dc
->def
->nwindows
!= 32) {
3917 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
3918 (1 << dc
->def
->nwindows
) - 1);
3923 case 0x33: /* wrtbr, UA2005 wrhpr */
3925 #ifndef TARGET_SPARC64
3926 if (!supervisor(dc
))
3928 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
3930 CHECK_IU_FEATURE(dc
, HYPV
);
3931 if (!hypervisor(dc
))
3933 cpu_tmp0
= get_temp_tl(dc
);
3934 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3937 // XXX gen_op_wrhpstate();
3944 // XXX gen_op_wrhtstate();
3947 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
3950 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
3952 case 31: // hstick_cmpr
3956 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
3957 r_tickptr
= tcg_temp_new_ptr();
3958 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3959 offsetof(CPUSPARCState
, hstick
));
3960 gen_helper_tick_set_limit(r_tickptr
,
3962 tcg_temp_free_ptr(r_tickptr
);
3965 case 6: // hver readonly
3973 #ifdef TARGET_SPARC64
3974 case 0x2c: /* V9 movcc */
3976 int cc
= GET_FIELD_SP(insn
, 11, 12);
3977 int cond
= GET_FIELD_SP(insn
, 14, 17);
3981 if (insn
& (1 << 18)) {
3983 gen_compare(&cmp
, 0, cond
, dc
);
3984 } else if (cc
== 2) {
3985 gen_compare(&cmp
, 1, cond
, dc
);
3990 gen_fcompare(&cmp
, cc
, cond
);
3993 /* The get_src2 above loaded the normal 13-bit
3994 immediate field, not the 11-bit field we have
3995 in movcc. But it did handle the reg case. */
3997 simm
= GET_FIELD_SPs(insn
, 0, 10);
3998 tcg_gen_movi_tl(cpu_src2
, simm
);
4001 dst
= gen_load_gpr(dc
, rd
);
4002 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4006 gen_store_gpr(dc
, rd
, dst
);
4009 case 0x2d: /* V9 sdivx */
4010 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
4011 gen_store_gpr(dc
, rd
, cpu_dst
);
4013 case 0x2e: /* V9 popc */
4014 gen_helper_popc(cpu_dst
, cpu_src2
);
4015 gen_store_gpr(dc
, rd
, cpu_dst
);
4017 case 0x2f: /* V9 movr */
4019 int cond
= GET_FIELD_SP(insn
, 10, 12);
4023 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4025 /* The get_src2 above loaded the normal 13-bit
4026 immediate field, not the 10-bit field we have
4027 in movr. But it did handle the reg case. */
4029 simm
= GET_FIELD_SPs(insn
, 0, 9);
4030 tcg_gen_movi_tl(cpu_src2
, simm
);
4033 dst
= gen_load_gpr(dc
, rd
);
4034 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4038 gen_store_gpr(dc
, rd
, dst
);
4046 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4047 #ifdef TARGET_SPARC64
4048 int opf
= GET_FIELD_SP(insn
, 5, 13);
4049 rs1
= GET_FIELD(insn
, 13, 17);
4050 rs2
= GET_FIELD(insn
, 27, 31);
4051 if (gen_trap_ifnofpu(dc
)) {
4056 case 0x000: /* VIS I edge8cc */
4057 CHECK_FPU_FEATURE(dc
, VIS1
);
4058 cpu_src1
= gen_load_gpr(dc
, rs1
);
4059 cpu_src2
= gen_load_gpr(dc
, rs2
);
4060 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4061 gen_store_gpr(dc
, rd
, cpu_dst
);
4063 case 0x001: /* VIS II edge8n */
4064 CHECK_FPU_FEATURE(dc
, VIS2
);
4065 cpu_src1
= gen_load_gpr(dc
, rs1
);
4066 cpu_src2
= gen_load_gpr(dc
, rs2
);
4067 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4068 gen_store_gpr(dc
, rd
, cpu_dst
);
4070 case 0x002: /* VIS I edge8lcc */
4071 CHECK_FPU_FEATURE(dc
, VIS1
);
4072 cpu_src1
= gen_load_gpr(dc
, rs1
);
4073 cpu_src2
= gen_load_gpr(dc
, rs2
);
4074 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4075 gen_store_gpr(dc
, rd
, cpu_dst
);
4077 case 0x003: /* VIS II edge8ln */
4078 CHECK_FPU_FEATURE(dc
, VIS2
);
4079 cpu_src1
= gen_load_gpr(dc
, rs1
);
4080 cpu_src2
= gen_load_gpr(dc
, rs2
);
4081 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4082 gen_store_gpr(dc
, rd
, cpu_dst
);
4084 case 0x004: /* VIS I edge16cc */
4085 CHECK_FPU_FEATURE(dc
, VIS1
);
4086 cpu_src1
= gen_load_gpr(dc
, rs1
);
4087 cpu_src2
= gen_load_gpr(dc
, rs2
);
4088 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4089 gen_store_gpr(dc
, rd
, cpu_dst
);
4091 case 0x005: /* VIS II edge16n */
4092 CHECK_FPU_FEATURE(dc
, VIS2
);
4093 cpu_src1
= gen_load_gpr(dc
, rs1
);
4094 cpu_src2
= gen_load_gpr(dc
, rs2
);
4095 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4096 gen_store_gpr(dc
, rd
, cpu_dst
);
4098 case 0x006: /* VIS I edge16lcc */
4099 CHECK_FPU_FEATURE(dc
, VIS1
);
4100 cpu_src1
= gen_load_gpr(dc
, rs1
);
4101 cpu_src2
= gen_load_gpr(dc
, rs2
);
4102 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4103 gen_store_gpr(dc
, rd
, cpu_dst
);
4105 case 0x007: /* VIS II edge16ln */
4106 CHECK_FPU_FEATURE(dc
, VIS2
);
4107 cpu_src1
= gen_load_gpr(dc
, rs1
);
4108 cpu_src2
= gen_load_gpr(dc
, rs2
);
4109 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4110 gen_store_gpr(dc
, rd
, cpu_dst
);
4112 case 0x008: /* VIS I edge32cc */
4113 CHECK_FPU_FEATURE(dc
, VIS1
);
4114 cpu_src1
= gen_load_gpr(dc
, rs1
);
4115 cpu_src2
= gen_load_gpr(dc
, rs2
);
4116 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4117 gen_store_gpr(dc
, rd
, cpu_dst
);
4119 case 0x009: /* VIS II edge32n */
4120 CHECK_FPU_FEATURE(dc
, VIS2
);
4121 cpu_src1
= gen_load_gpr(dc
, rs1
);
4122 cpu_src2
= gen_load_gpr(dc
, rs2
);
4123 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4124 gen_store_gpr(dc
, rd
, cpu_dst
);
4126 case 0x00a: /* VIS I edge32lcc */
4127 CHECK_FPU_FEATURE(dc
, VIS1
);
4128 cpu_src1
= gen_load_gpr(dc
, rs1
);
4129 cpu_src2
= gen_load_gpr(dc
, rs2
);
4130 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4131 gen_store_gpr(dc
, rd
, cpu_dst
);
4133 case 0x00b: /* VIS II edge32ln */
4134 CHECK_FPU_FEATURE(dc
, VIS2
);
4135 cpu_src1
= gen_load_gpr(dc
, rs1
);
4136 cpu_src2
= gen_load_gpr(dc
, rs2
);
4137 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4138 gen_store_gpr(dc
, rd
, cpu_dst
);
4140 case 0x010: /* VIS I array8 */
4141 CHECK_FPU_FEATURE(dc
, VIS1
);
4142 cpu_src1
= gen_load_gpr(dc
, rs1
);
4143 cpu_src2
= gen_load_gpr(dc
, rs2
);
4144 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4145 gen_store_gpr(dc
, rd
, cpu_dst
);
4147 case 0x012: /* VIS I array16 */
4148 CHECK_FPU_FEATURE(dc
, VIS1
);
4149 cpu_src1
= gen_load_gpr(dc
, rs1
);
4150 cpu_src2
= gen_load_gpr(dc
, rs2
);
4151 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4152 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4153 gen_store_gpr(dc
, rd
, cpu_dst
);
4155 case 0x014: /* VIS I array32 */
4156 CHECK_FPU_FEATURE(dc
, VIS1
);
4157 cpu_src1
= gen_load_gpr(dc
, rs1
);
4158 cpu_src2
= gen_load_gpr(dc
, rs2
);
4159 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4160 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4161 gen_store_gpr(dc
, rd
, cpu_dst
);
4163 case 0x018: /* VIS I alignaddr */
4164 CHECK_FPU_FEATURE(dc
, VIS1
);
4165 cpu_src1
= gen_load_gpr(dc
, rs1
);
4166 cpu_src2
= gen_load_gpr(dc
, rs2
);
4167 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4168 gen_store_gpr(dc
, rd
, cpu_dst
);
4170 case 0x01a: /* VIS I alignaddrl */
4171 CHECK_FPU_FEATURE(dc
, VIS1
);
4172 cpu_src1
= gen_load_gpr(dc
, rs1
);
4173 cpu_src2
= gen_load_gpr(dc
, rs2
);
4174 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4175 gen_store_gpr(dc
, rd
, cpu_dst
);
4177 case 0x019: /* VIS II bmask */
4178 CHECK_FPU_FEATURE(dc
, VIS2
);
4179 cpu_src1
= gen_load_gpr(dc
, rs1
);
4180 cpu_src2
= gen_load_gpr(dc
, rs2
);
4181 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4182 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4183 gen_store_gpr(dc
, rd
, cpu_dst
);
4185 case 0x020: /* VIS I fcmple16 */
4186 CHECK_FPU_FEATURE(dc
, VIS1
);
4187 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4188 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4189 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4190 gen_store_gpr(dc
, rd
, cpu_dst
);
4192 case 0x022: /* VIS I fcmpne16 */
4193 CHECK_FPU_FEATURE(dc
, VIS1
);
4194 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4195 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4196 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4197 gen_store_gpr(dc
, rd
, cpu_dst
);
4199 case 0x024: /* VIS I fcmple32 */
4200 CHECK_FPU_FEATURE(dc
, VIS1
);
4201 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4202 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4203 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4204 gen_store_gpr(dc
, rd
, cpu_dst
);
4206 case 0x026: /* VIS I fcmpne32 */
4207 CHECK_FPU_FEATURE(dc
, VIS1
);
4208 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4209 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4210 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4211 gen_store_gpr(dc
, rd
, cpu_dst
);
4213 case 0x028: /* VIS I fcmpgt16 */
4214 CHECK_FPU_FEATURE(dc
, VIS1
);
4215 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4216 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4217 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4218 gen_store_gpr(dc
, rd
, cpu_dst
);
4220 case 0x02a: /* VIS I fcmpeq16 */
4221 CHECK_FPU_FEATURE(dc
, VIS1
);
4222 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4223 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4224 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4225 gen_store_gpr(dc
, rd
, cpu_dst
);
4227 case 0x02c: /* VIS I fcmpgt32 */
4228 CHECK_FPU_FEATURE(dc
, VIS1
);
4229 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4230 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4231 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4232 gen_store_gpr(dc
, rd
, cpu_dst
);
4234 case 0x02e: /* VIS I fcmpeq32 */
4235 CHECK_FPU_FEATURE(dc
, VIS1
);
4236 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4237 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4238 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4239 gen_store_gpr(dc
, rd
, cpu_dst
);
4241 case 0x031: /* VIS I fmul8x16 */
4242 CHECK_FPU_FEATURE(dc
, VIS1
);
4243 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4245 case 0x033: /* VIS I fmul8x16au */
4246 CHECK_FPU_FEATURE(dc
, VIS1
);
4247 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4249 case 0x035: /* VIS I fmul8x16al */
4250 CHECK_FPU_FEATURE(dc
, VIS1
);
4251 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4253 case 0x036: /* VIS I fmul8sux16 */
4254 CHECK_FPU_FEATURE(dc
, VIS1
);
4255 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4257 case 0x037: /* VIS I fmul8ulx16 */
4258 CHECK_FPU_FEATURE(dc
, VIS1
);
4259 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4261 case 0x038: /* VIS I fmuld8sux16 */
4262 CHECK_FPU_FEATURE(dc
, VIS1
);
4263 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4265 case 0x039: /* VIS I fmuld8ulx16 */
4266 CHECK_FPU_FEATURE(dc
, VIS1
);
4267 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4269 case 0x03a: /* VIS I fpack32 */
4270 CHECK_FPU_FEATURE(dc
, VIS1
);
4271 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4273 case 0x03b: /* VIS I fpack16 */
4274 CHECK_FPU_FEATURE(dc
, VIS1
);
4275 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4276 cpu_dst_32
= gen_dest_fpr_F(dc
);
4277 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4278 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4280 case 0x03d: /* VIS I fpackfix */
4281 CHECK_FPU_FEATURE(dc
, VIS1
);
4282 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4283 cpu_dst_32
= gen_dest_fpr_F(dc
);
4284 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4285 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4287 case 0x03e: /* VIS I pdist */
4288 CHECK_FPU_FEATURE(dc
, VIS1
);
4289 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4291 case 0x048: /* VIS I faligndata */
4292 CHECK_FPU_FEATURE(dc
, VIS1
);
4293 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4295 case 0x04b: /* VIS I fpmerge */
4296 CHECK_FPU_FEATURE(dc
, VIS1
);
4297 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4299 case 0x04c: /* VIS II bshuffle */
4300 CHECK_FPU_FEATURE(dc
, VIS2
);
4301 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4303 case 0x04d: /* VIS I fexpand */
4304 CHECK_FPU_FEATURE(dc
, VIS1
);
4305 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4307 case 0x050: /* VIS I fpadd16 */
4308 CHECK_FPU_FEATURE(dc
, VIS1
);
4309 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4311 case 0x051: /* VIS I fpadd16s */
4312 CHECK_FPU_FEATURE(dc
, VIS1
);
4313 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4315 case 0x052: /* VIS I fpadd32 */
4316 CHECK_FPU_FEATURE(dc
, VIS1
);
4317 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4319 case 0x053: /* VIS I fpadd32s */
4320 CHECK_FPU_FEATURE(dc
, VIS1
);
4321 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4323 case 0x054: /* VIS I fpsub16 */
4324 CHECK_FPU_FEATURE(dc
, VIS1
);
4325 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4327 case 0x055: /* VIS I fpsub16s */
4328 CHECK_FPU_FEATURE(dc
, VIS1
);
4329 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4331 case 0x056: /* VIS I fpsub32 */
4332 CHECK_FPU_FEATURE(dc
, VIS1
);
4333 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4335 case 0x057: /* VIS I fpsub32s */
4336 CHECK_FPU_FEATURE(dc
, VIS1
);
4337 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4339 case 0x060: /* VIS I fzero */
4340 CHECK_FPU_FEATURE(dc
, VIS1
);
4341 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4342 tcg_gen_movi_i64(cpu_dst_64
, 0);
4343 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4345 case 0x061: /* VIS I fzeros */
4346 CHECK_FPU_FEATURE(dc
, VIS1
);
4347 cpu_dst_32
= gen_dest_fpr_F(dc
);
4348 tcg_gen_movi_i32(cpu_dst_32
, 0);
4349 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4351 case 0x062: /* VIS I fnor */
4352 CHECK_FPU_FEATURE(dc
, VIS1
);
4353 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4355 case 0x063: /* VIS I fnors */
4356 CHECK_FPU_FEATURE(dc
, VIS1
);
4357 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4359 case 0x064: /* VIS I fandnot2 */
4360 CHECK_FPU_FEATURE(dc
, VIS1
);
4361 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4363 case 0x065: /* VIS I fandnot2s */
4364 CHECK_FPU_FEATURE(dc
, VIS1
);
4365 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4367 case 0x066: /* VIS I fnot2 */
4368 CHECK_FPU_FEATURE(dc
, VIS1
);
4369 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4371 case 0x067: /* VIS I fnot2s */
4372 CHECK_FPU_FEATURE(dc
, VIS1
);
4373 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4375 case 0x068: /* VIS I fandnot1 */
4376 CHECK_FPU_FEATURE(dc
, VIS1
);
4377 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4379 case 0x069: /* VIS I fandnot1s */
4380 CHECK_FPU_FEATURE(dc
, VIS1
);
4381 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4383 case 0x06a: /* VIS I fnot1 */
4384 CHECK_FPU_FEATURE(dc
, VIS1
);
4385 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4387 case 0x06b: /* VIS I fnot1s */
4388 CHECK_FPU_FEATURE(dc
, VIS1
);
4389 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4391 case 0x06c: /* VIS I fxor */
4392 CHECK_FPU_FEATURE(dc
, VIS1
);
4393 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4395 case 0x06d: /* VIS I fxors */
4396 CHECK_FPU_FEATURE(dc
, VIS1
);
4397 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4399 case 0x06e: /* VIS I fnand */
4400 CHECK_FPU_FEATURE(dc
, VIS1
);
4401 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4403 case 0x06f: /* VIS I fnands */
4404 CHECK_FPU_FEATURE(dc
, VIS1
);
4405 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4407 case 0x070: /* VIS I fand */
4408 CHECK_FPU_FEATURE(dc
, VIS1
);
4409 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4411 case 0x071: /* VIS I fands */
4412 CHECK_FPU_FEATURE(dc
, VIS1
);
4413 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4415 case 0x072: /* VIS I fxnor */
4416 CHECK_FPU_FEATURE(dc
, VIS1
);
4417 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4419 case 0x073: /* VIS I fxnors */
4420 CHECK_FPU_FEATURE(dc
, VIS1
);
4421 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4423 case 0x074: /* VIS I fsrc1 */
4424 CHECK_FPU_FEATURE(dc
, VIS1
);
4425 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4426 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4428 case 0x075: /* VIS I fsrc1s */
4429 CHECK_FPU_FEATURE(dc
, VIS1
);
4430 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4431 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4433 case 0x076: /* VIS I fornot2 */
4434 CHECK_FPU_FEATURE(dc
, VIS1
);
4435 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4437 case 0x077: /* VIS I fornot2s */
4438 CHECK_FPU_FEATURE(dc
, VIS1
);
4439 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4441 case 0x078: /* VIS I fsrc2 */
4442 CHECK_FPU_FEATURE(dc
, VIS1
);
4443 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4444 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4446 case 0x079: /* VIS I fsrc2s */
4447 CHECK_FPU_FEATURE(dc
, VIS1
);
4448 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4449 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4451 case 0x07a: /* VIS I fornot1 */
4452 CHECK_FPU_FEATURE(dc
, VIS1
);
4453 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4455 case 0x07b: /* VIS I fornot1s */
4456 CHECK_FPU_FEATURE(dc
, VIS1
);
4457 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4459 case 0x07c: /* VIS I for */
4460 CHECK_FPU_FEATURE(dc
, VIS1
);
4461 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4463 case 0x07d: /* VIS I fors */
4464 CHECK_FPU_FEATURE(dc
, VIS1
);
4465 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4467 case 0x07e: /* VIS I fone */
4468 CHECK_FPU_FEATURE(dc
, VIS1
);
4469 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4470 tcg_gen_movi_i64(cpu_dst_64
, -1);
4471 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4473 case 0x07f: /* VIS I fones */
4474 CHECK_FPU_FEATURE(dc
, VIS1
);
4475 cpu_dst_32
= gen_dest_fpr_F(dc
);
4476 tcg_gen_movi_i32(cpu_dst_32
, -1);
4477 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4479 case 0x080: /* VIS I shutdown */
4480 case 0x081: /* VIS II siam */
4489 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
4490 #ifdef TARGET_SPARC64
4495 #ifdef TARGET_SPARC64
4496 } else if (xop
== 0x39) { /* V9 return */
4500 cpu_src1
= get_src1(dc
, insn
);
4501 cpu_tmp0
= get_temp_tl(dc
);
4502 if (IS_IMM
) { /* immediate */
4503 simm
= GET_FIELDs(insn
, 19, 31);
4504 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4505 } else { /* register */
4506 rs2
= GET_FIELD(insn
, 27, 31);
4508 cpu_src2
= gen_load_gpr(dc
, rs2
);
4509 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4511 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4514 gen_helper_restore(cpu_env
);
4516 r_const
= tcg_const_i32(3);
4517 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4518 tcg_temp_free_i32(r_const
);
4519 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4520 dc
->npc
= DYNAMIC_PC
;
4524 cpu_src1
= get_src1(dc
, insn
);
4525 cpu_tmp0
= get_temp_tl(dc
);
4526 if (IS_IMM
) { /* immediate */
4527 simm
= GET_FIELDs(insn
, 19, 31);
4528 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
4529 } else { /* register */
4530 rs2
= GET_FIELD(insn
, 27, 31);
4532 cpu_src2
= gen_load_gpr(dc
, rs2
);
4533 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4535 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
4539 case 0x38: /* jmpl */
4544 t
= gen_dest_gpr(dc
, rd
);
4545 tcg_gen_movi_tl(t
, dc
->pc
);
4546 gen_store_gpr(dc
, rd
, t
);
4548 r_const
= tcg_const_i32(3);
4549 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4550 tcg_temp_free_i32(r_const
);
4551 gen_address_mask(dc
, cpu_tmp0
);
4552 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4553 dc
->npc
= DYNAMIC_PC
;
4556 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4557 case 0x39: /* rett, V9 return */
4561 if (!supervisor(dc
))
4564 r_const
= tcg_const_i32(3);
4565 gen_helper_check_align(cpu_env
, cpu_tmp0
, r_const
);
4566 tcg_temp_free_i32(r_const
);
4567 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
4568 dc
->npc
= DYNAMIC_PC
;
4569 gen_helper_rett(cpu_env
);
4573 case 0x3b: /* flush */
4574 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
4578 case 0x3c: /* save */
4580 gen_helper_save(cpu_env
);
4581 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4583 case 0x3d: /* restore */
4585 gen_helper_restore(cpu_env
);
4586 gen_store_gpr(dc
, rd
, cpu_tmp0
);
4588 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4589 case 0x3e: /* V9 done/retry */
4593 if (!supervisor(dc
))
4595 dc
->npc
= DYNAMIC_PC
;
4596 dc
->pc
= DYNAMIC_PC
;
4597 gen_helper_done(cpu_env
);
4600 if (!supervisor(dc
))
4602 dc
->npc
= DYNAMIC_PC
;
4603 dc
->pc
= DYNAMIC_PC
;
4604 gen_helper_retry(cpu_env
);
4619 case 3: /* load/store instructions */
4621 unsigned int xop
= GET_FIELD(insn
, 7, 12);
4622 /* ??? gen_address_mask prevents us from using a source
4623 register directly. Always generate a temporary. */
4624 TCGv cpu_addr
= get_temp_tl(dc
);
4626 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
4627 if (xop
== 0x3c || xop
== 0x3e) {
4628 /* V9 casa/casxa : no offset */
4629 } else if (IS_IMM
) { /* immediate */
4630 simm
= GET_FIELDs(insn
, 19, 31);
4632 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
4634 } else { /* register */
4635 rs2
= GET_FIELD(insn
, 27, 31);
4637 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
4640 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
4641 (xop
> 0x17 && xop
<= 0x1d ) ||
4642 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
4643 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
4646 case 0x0: /* ld, V9 lduw, load unsigned word */
4647 gen_address_mask(dc
, cpu_addr
);
4648 tcg_gen_qemu_ld32u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4650 case 0x1: /* ldub, load unsigned byte */
4651 gen_address_mask(dc
, cpu_addr
);
4652 tcg_gen_qemu_ld8u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4654 case 0x2: /* lduh, load unsigned halfword */
4655 gen_address_mask(dc
, cpu_addr
);
4656 tcg_gen_qemu_ld16u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4658 case 0x3: /* ldd, load double word */
4666 r_const
= tcg_const_i32(7);
4667 /* XXX remove alignment check */
4668 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4669 tcg_temp_free_i32(r_const
);
4670 gen_address_mask(dc
, cpu_addr
);
4671 t64
= tcg_temp_new_i64();
4672 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4673 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4674 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4675 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
4676 tcg_gen_shri_i64(t64
, t64
, 32);
4677 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4678 tcg_temp_free_i64(t64
);
4679 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
4682 case 0x9: /* ldsb, load signed byte */
4683 gen_address_mask(dc
, cpu_addr
);
4684 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4686 case 0xa: /* ldsh, load signed halfword */
4687 gen_address_mask(dc
, cpu_addr
);
4688 tcg_gen_qemu_ld16s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4690 case 0xd: /* ldstub -- XXX: should be atomically */
4694 gen_address_mask(dc
, cpu_addr
);
4695 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4696 r_const
= tcg_const_tl(0xff);
4697 tcg_gen_qemu_st8(r_const
, cpu_addr
, dc
->mem_idx
);
4698 tcg_temp_free(r_const
);
4702 /* swap, swap register with memory. Also atomically */
4704 TCGv t0
= get_temp_tl(dc
);
4705 CHECK_IU_FEATURE(dc
, SWAP
);
4706 cpu_src1
= gen_load_gpr(dc
, rd
);
4707 gen_address_mask(dc
, cpu_addr
);
4708 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4709 tcg_gen_qemu_st32(cpu_src1
, cpu_addr
, dc
->mem_idx
);
4710 tcg_gen_mov_tl(cpu_val
, t0
);
4713 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4714 case 0x10: /* lda, V9 lduwa, load word alternate */
4715 #ifndef TARGET_SPARC64
4718 if (!supervisor(dc
))
4722 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 0);
4724 case 0x11: /* lduba, load unsigned byte alternate */
4725 #ifndef TARGET_SPARC64
4728 if (!supervisor(dc
))
4732 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 0);
4734 case 0x12: /* lduha, load unsigned halfword alternate */
4735 #ifndef TARGET_SPARC64
4738 if (!supervisor(dc
))
4742 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 0);
4744 case 0x13: /* ldda, load double word alternate */
4745 #ifndef TARGET_SPARC64
4748 if (!supervisor(dc
))
4754 gen_ldda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4756 case 0x19: /* ldsba, load signed byte alternate */
4757 #ifndef TARGET_SPARC64
4760 if (!supervisor(dc
))
4764 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 1);
4766 case 0x1a: /* ldsha, load signed halfword alternate */
4767 #ifndef TARGET_SPARC64
4770 if (!supervisor(dc
))
4774 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 1);
4776 case 0x1d: /* ldstuba -- XXX: should be atomically */
4777 #ifndef TARGET_SPARC64
4780 if (!supervisor(dc
))
4784 gen_ldstub_asi(cpu_val
, cpu_addr
, insn
);
4786 case 0x1f: /* swapa, swap reg with alt. memory. Also
4788 CHECK_IU_FEATURE(dc
, SWAP
);
4789 #ifndef TARGET_SPARC64
4792 if (!supervisor(dc
))
4796 cpu_src1
= gen_load_gpr(dc
, rd
);
4797 gen_swap_asi(cpu_val
, cpu_src1
, cpu_addr
, insn
);
4800 #ifndef TARGET_SPARC64
4801 case 0x30: /* ldc */
4802 case 0x31: /* ldcsr */
4803 case 0x33: /* lddc */
4807 #ifdef TARGET_SPARC64
4808 case 0x08: /* V9 ldsw */
4809 gen_address_mask(dc
, cpu_addr
);
4810 tcg_gen_qemu_ld32s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4812 case 0x0b: /* V9 ldx */
4813 gen_address_mask(dc
, cpu_addr
);
4814 tcg_gen_qemu_ld64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4816 case 0x18: /* V9 ldswa */
4818 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 1);
4820 case 0x1b: /* V9 ldxa */
4822 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 8, 0);
4824 case 0x2d: /* V9 prefetch, no effect */
4826 case 0x30: /* V9 ldfa */
4827 if (gen_trap_ifnofpu(dc
)) {
4831 gen_ldf_asi(cpu_addr
, insn
, 4, rd
);
4832 gen_update_fprs_dirty(rd
);
4834 case 0x33: /* V9 lddfa */
4835 if (gen_trap_ifnofpu(dc
)) {
4839 gen_ldf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
4840 gen_update_fprs_dirty(DFPREG(rd
));
4842 case 0x3d: /* V9 prefetcha, no effect */
4844 case 0x32: /* V9 ldqfa */
4845 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4846 if (gen_trap_ifnofpu(dc
)) {
4850 gen_ldf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
4851 gen_update_fprs_dirty(QFPREG(rd
));
4857 gen_store_gpr(dc
, rd
, cpu_val
);
4858 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4861 } else if (xop
>= 0x20 && xop
< 0x24) {
4864 if (gen_trap_ifnofpu(dc
)) {
4869 case 0x20: /* ldf, load fpreg */
4870 gen_address_mask(dc
, cpu_addr
);
4871 t0
= get_temp_tl(dc
);
4872 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4873 cpu_dst_32
= gen_dest_fpr_F(dc
);
4874 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4875 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4877 case 0x21: /* ldfsr, V9 ldxfsr */
4878 #ifdef TARGET_SPARC64
4879 gen_address_mask(dc
, cpu_addr
);
4881 TCGv_i64 t64
= tcg_temp_new_i64();
4882 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4883 gen_helper_ldxfsr(cpu_env
, t64
);
4884 tcg_temp_free_i64(t64
);
4888 cpu_dst_32
= get_temp_i32(dc
);
4889 t0
= get_temp_tl(dc
);
4890 tcg_gen_qemu_ld32u(t0
, cpu_addr
, dc
->mem_idx
);
4891 tcg_gen_trunc_tl_i32(cpu_dst_32
, t0
);
4892 gen_helper_ldfsr(cpu_env
, cpu_dst_32
);
4894 case 0x22: /* ldqf, load quad fpreg */
4898 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4899 r_const
= tcg_const_i32(dc
->mem_idx
);
4900 gen_address_mask(dc
, cpu_addr
);
4901 gen_helper_ldqf(cpu_env
, cpu_addr
, r_const
);
4902 tcg_temp_free_i32(r_const
);
4903 gen_op_store_QT0_fpr(QFPREG(rd
));
4904 gen_update_fprs_dirty(QFPREG(rd
));
4907 case 0x23: /* lddf, load double fpreg */
4908 gen_address_mask(dc
, cpu_addr
);
4909 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4910 tcg_gen_qemu_ld64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
);
4911 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4916 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
4917 xop
== 0xe || xop
== 0x1e) {
4918 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
4921 case 0x4: /* st, store word */
4922 gen_address_mask(dc
, cpu_addr
);
4923 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
4925 case 0x5: /* stb, store byte */
4926 gen_address_mask(dc
, cpu_addr
);
4927 tcg_gen_qemu_st8(cpu_val
, cpu_addr
, dc
->mem_idx
);
4929 case 0x6: /* sth, store halfword */
4930 gen_address_mask(dc
, cpu_addr
);
4931 tcg_gen_qemu_st16(cpu_val
, cpu_addr
, dc
->mem_idx
);
4933 case 0x7: /* std, store double word */
4942 gen_address_mask(dc
, cpu_addr
);
4943 r_const
= tcg_const_i32(7);
4944 /* XXX remove alignment check */
4945 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4946 tcg_temp_free_i32(r_const
);
4947 lo
= gen_load_gpr(dc
, rd
+ 1);
4949 t64
= tcg_temp_new_i64();
4950 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
4951 tcg_gen_qemu_st64(t64
, cpu_addr
, dc
->mem_idx
);
4952 tcg_temp_free_i64(t64
);
4955 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4956 case 0x14: /* sta, V9 stwa, store word alternate */
4957 #ifndef TARGET_SPARC64
4960 if (!supervisor(dc
))
4964 gen_st_asi(cpu_val
, cpu_addr
, insn
, 4);
4965 dc
->npc
= DYNAMIC_PC
;
4967 case 0x15: /* stba, store byte alternate */
4968 #ifndef TARGET_SPARC64
4971 if (!supervisor(dc
))
4975 gen_st_asi(cpu_val
, cpu_addr
, insn
, 1);
4976 dc
->npc
= DYNAMIC_PC
;
4978 case 0x16: /* stha, store halfword alternate */
4979 #ifndef TARGET_SPARC64
4982 if (!supervisor(dc
))
4986 gen_st_asi(cpu_val
, cpu_addr
, insn
, 2);
4987 dc
->npc
= DYNAMIC_PC
;
4989 case 0x17: /* stda, store double word alternate */
4990 #ifndef TARGET_SPARC64
4993 if (!supervisor(dc
))
5000 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
5004 #ifdef TARGET_SPARC64
5005 case 0x0e: /* V9 stx */
5006 gen_address_mask(dc
, cpu_addr
);
5007 tcg_gen_qemu_st64(cpu_val
, cpu_addr
, dc
->mem_idx
);
5009 case 0x1e: /* V9 stxa */
5011 gen_st_asi(cpu_val
, cpu_addr
, insn
, 8);
5012 dc
->npc
= DYNAMIC_PC
;
5018 } else if (xop
> 0x23 && xop
< 0x28) {
5019 if (gen_trap_ifnofpu(dc
)) {
5024 case 0x24: /* stf, store fpreg */
5026 TCGv t
= get_temp_tl(dc
);
5027 gen_address_mask(dc
, cpu_addr
);
5028 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5029 tcg_gen_ext_i32_tl(t
, cpu_src1_32
);
5030 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5033 case 0x25: /* stfsr, V9 stxfsr */
5035 TCGv t
= get_temp_tl(dc
);
5037 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUSPARCState
, fsr
));
5038 #ifdef TARGET_SPARC64
5039 gen_address_mask(dc
, cpu_addr
);
5041 tcg_gen_qemu_st64(t
, cpu_addr
, dc
->mem_idx
);
5045 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5049 #ifdef TARGET_SPARC64
5050 /* V9 stqf, store quad fpreg */
5054 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5055 gen_op_load_fpr_QT0(QFPREG(rd
));
5056 r_const
= tcg_const_i32(dc
->mem_idx
);
5057 gen_address_mask(dc
, cpu_addr
);
5058 gen_helper_stqf(cpu_env
, cpu_addr
, r_const
);
5059 tcg_temp_free_i32(r_const
);
5062 #else /* !TARGET_SPARC64 */
5063 /* stdfq, store floating point queue */
5064 #if defined(CONFIG_USER_ONLY)
5067 if (!supervisor(dc
))
5069 if (gen_trap_ifnofpu(dc
)) {
5075 case 0x27: /* stdf, store double fpreg */
5076 gen_address_mask(dc
, cpu_addr
);
5077 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5078 tcg_gen_qemu_st64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
);
5083 } else if (xop
> 0x33 && xop
< 0x3f) {
5086 #ifdef TARGET_SPARC64
5087 case 0x34: /* V9 stfa */
5088 if (gen_trap_ifnofpu(dc
)) {
5091 gen_stf_asi(cpu_addr
, insn
, 4, rd
);
5093 case 0x36: /* V9 stqfa */
5097 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5098 if (gen_trap_ifnofpu(dc
)) {
5101 r_const
= tcg_const_i32(7);
5102 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
5103 tcg_temp_free_i32(r_const
);
5104 gen_stf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
5107 case 0x37: /* V9 stdfa */
5108 if (gen_trap_ifnofpu(dc
)) {
5111 gen_stf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
5113 case 0x3e: /* V9 casxa */
5114 rs2
= GET_FIELD(insn
, 27, 31);
5115 cpu_src2
= gen_load_gpr(dc
, rs2
);
5116 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5119 case 0x34: /* stc */
5120 case 0x35: /* stcsr */
5121 case 0x36: /* stdcq */
5122 case 0x37: /* stdc */
5125 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5126 case 0x3c: /* V9 or LEON3 casa */
5127 #ifndef TARGET_SPARC64
5128 CHECK_IU_FEATURE(dc
, CASA
);
5132 if (!supervisor(dc
)) {
5136 rs2
= GET_FIELD(insn
, 27, 31);
5137 cpu_src2
= gen_load_gpr(dc
, rs2
);
5138 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5150 /* default case for non jump instructions */
5151 if (dc
->npc
== DYNAMIC_PC
) {
5152 dc
->pc
= DYNAMIC_PC
;
5154 } else if (dc
->npc
== JUMP_PC
) {
5155 /* we can do a static jump */
5156 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5160 dc
->npc
= dc
->npc
+ 4;
5169 r_const
= tcg_const_i32(TT_ILL_INSN
);
5170 gen_helper_raise_exception(cpu_env
, r_const
);
5171 tcg_temp_free_i32(r_const
);
5180 r_const
= tcg_const_i32(TT_UNIMP_FLUSH
);
5181 gen_helper_raise_exception(cpu_env
, r_const
);
5182 tcg_temp_free_i32(r_const
);
5186 #if !defined(CONFIG_USER_ONLY)
5192 r_const
= tcg_const_i32(TT_PRIV_INSN
);
5193 gen_helper_raise_exception(cpu_env
, r_const
);
5194 tcg_temp_free_i32(r_const
);
5201 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP
);
5204 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5207 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR
);
5211 #ifndef TARGET_SPARC64
5217 r_const
= tcg_const_i32(TT_NCP_INSN
);
5218 gen_helper_raise_exception(cpu_env
, r_const
);
5219 tcg_temp_free(r_const
);
5225 if (dc
->n_t32
!= 0) {
5227 for (i
= dc
->n_t32
- 1; i
>= 0; --i
) {
5228 tcg_temp_free_i32(dc
->t32
[i
]);
5232 if (dc
->n_ttl
!= 0) {
5234 for (i
= dc
->n_ttl
- 1; i
>= 0; --i
) {
5235 tcg_temp_free(dc
->ttl
[i
]);
5241 static inline void gen_intermediate_code_internal(SPARCCPU
*cpu
,
5242 TranslationBlock
*tb
,
5245 CPUState
*cs
= CPU(cpu
);
5246 CPUSPARCState
*env
= &cpu
->env
;
5247 target_ulong pc_start
, last_pc
;
5248 uint16_t *gen_opc_end
;
5249 DisasContext dc1
, *dc
= &dc1
;
5256 memset(dc
, 0, sizeof(DisasContext
));
5261 dc
->npc
= (target_ulong
) tb
->cs_base
;
5262 dc
->cc_op
= CC_OP_DYNAMIC
;
5263 dc
->mem_idx
= cpu_mmu_index(env
);
5265 dc
->fpu_enabled
= tb_fpu_enabled(tb
->flags
);
5266 dc
->address_mask_32bit
= tb_am_enabled(tb
->flags
);
5267 dc
->singlestep
= (cs
->singlestep_enabled
|| singlestep
);
5268 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
5271 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5273 max_insns
= CF_COUNT_MASK
;
5276 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
5277 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
5278 if (bp
->pc
== dc
->pc
) {
5279 if (dc
->pc
!= pc_start
)
5281 gen_helper_debug(cpu_env
);
5289 qemu_log("Search PC...\n");
5290 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
5294 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5295 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
5296 gen_opc_npc
[lj
] = dc
->npc
;
5297 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
5298 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
5301 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
5304 insn
= cpu_ldl_code(env
, dc
->pc
);
5306 disas_sparc_insn(dc
, insn
);
5311 /* if the next PC is different, we abort now */
5312 if (dc
->pc
!= (last_pc
+ 4))
5314 /* if we reach a page boundary, we stop generation so that the
5315 PC of a TT_TFAULT exception is always in the right page */
5316 if ((dc
->pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
5318 /* if single step mode, we generate only one instruction and
5319 generate an exception */
5320 if (dc
->singlestep
) {
5323 } while ((tcg_ctx
.gen_opc_ptr
< gen_opc_end
) &&
5324 (dc
->pc
- pc_start
) < (TARGET_PAGE_SIZE
- 32) &&
5325 num_insns
< max_insns
);
5328 if (tb
->cflags
& CF_LAST_IO
) {
5332 if (dc
->pc
!= DYNAMIC_PC
&&
5333 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
5334 /* static PC and NPC: we can use direct chaining */
5335 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5337 if (dc
->pc
!= DYNAMIC_PC
) {
5338 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5344 gen_tb_end(tb
, num_insns
);
5345 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
5347 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
5350 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5354 gen_opc_jump_pc
[0] = dc
->jump_pc
[0];
5355 gen_opc_jump_pc
[1] = dc
->jump_pc
[1];
5357 tb
->size
= last_pc
+ 4 - pc_start
;
5358 tb
->icount
= num_insns
;
5361 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5362 qemu_log("--------------\n");
5363 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5364 log_target_disas(env
, pc_start
, last_pc
+ 4 - pc_start
, 0);
5370 void gen_intermediate_code(CPUSPARCState
* env
, TranslationBlock
* tb
)
5372 gen_intermediate_code_internal(sparc_env_get_cpu(env
), tb
, false);
5375 void gen_intermediate_code_pc(CPUSPARCState
* env
, TranslationBlock
* tb
)
5377 gen_intermediate_code_internal(sparc_env_get_cpu(env
), tb
, true);
5380 void gen_intermediate_code_init(CPUSPARCState
*env
)
5384 static const char * const gregnames
[8] = {
5385 NULL
, // g0 not used
5394 static const char * const fregnames
[32] = {
5395 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5396 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5397 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5398 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5401 /* init various static tables */
5405 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
5406 cpu_regwptr
= tcg_global_mem_new_ptr(TCG_AREG0
,
5407 offsetof(CPUSPARCState
, regwptr
),
5409 #ifdef TARGET_SPARC64
5410 cpu_xcc
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, xcc
),
5412 cpu_asi
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, asi
),
5414 cpu_fprs
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, fprs
),
5416 cpu_gsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, gsr
),
5418 cpu_tick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5419 offsetof(CPUSPARCState
, tick_cmpr
),
5421 cpu_stick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5422 offsetof(CPUSPARCState
, stick_cmpr
),
5424 cpu_hstick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5425 offsetof(CPUSPARCState
, hstick_cmpr
),
5427 cpu_hintp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hintp
),
5429 cpu_htba
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, htba
),
5431 cpu_hver
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hver
),
5433 cpu_ssr
= tcg_global_mem_new(TCG_AREG0
,
5434 offsetof(CPUSPARCState
, ssr
), "ssr");
5435 cpu_ver
= tcg_global_mem_new(TCG_AREG0
,
5436 offsetof(CPUSPARCState
, version
), "ver");
5437 cpu_softint
= tcg_global_mem_new_i32(TCG_AREG0
,
5438 offsetof(CPUSPARCState
, softint
),
5441 cpu_wim
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, wim
),
5444 cpu_cond
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cond
),
5446 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_src
),
5448 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
,
5449 offsetof(CPUSPARCState
, cc_src2
),
5451 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_dst
),
5453 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, cc_op
),
5455 cpu_psr
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, psr
),
5457 cpu_fsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, fsr
),
5459 cpu_pc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, pc
),
5461 cpu_npc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, npc
),
5463 cpu_y
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, y
), "y");
5464 #ifndef CONFIG_USER_ONLY
5465 cpu_tbr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, tbr
),
5468 for (i
= 1; i
< 8; i
++) {
5469 cpu_gregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
5470 offsetof(CPUSPARCState
, gregs
[i
]),
5473 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5474 cpu_fpr
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
5475 offsetof(CPUSPARCState
, fpr
[i
]),
5481 void restore_state_to_opc(CPUSPARCState
*env
, TranslationBlock
*tb
, int pc_pos
)
5484 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
5485 npc
= gen_opc_npc
[pc_pos
];
5487 /* dynamic NPC: already stored */
5488 } else if (npc
== 2) {
5489 /* jump PC: use 'cond' and the jump targets of the translation */
5491 env
->npc
= gen_opc_jump_pc
[0];
5493 env
->npc
= gen_opc_jump_pc
[1];