4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rett(E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_done(E) qemu_build_not_reached()
46 # define gen_helper_fabsd(D, S) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_fnegd(D, S) qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
51 # define gen_helper_restored(E) qemu_build_not_reached()
52 # define gen_helper_retry(E) qemu_build_not_reached()
53 # define gen_helper_saved(E) qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
64 # define gen_helper_fabsq ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fnegq ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
90 # define FSR_LDXFSR_MASK 0
91 # define FSR_LDXFSR_OLDMASK 0
95 /* Dynamic PC, must exit to main loop. */
97 /* Dynamic PC, one of two values according to jump_pc[T2]. */
99 /* Dynamic PC, may lookup next TB. */
100 #define DYNAMIC_PC_LOOKUP 3
102 #define DISAS_EXIT DISAS_TARGET_0
104 /* global register indexes */
105 static TCGv_ptr cpu_regwptr
;
106 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
107 static TCGv cpu_regs
[32];
110 static TCGv cpu_cond
;
111 static TCGv cpu_cc_N
;
112 static TCGv cpu_cc_V
;
113 static TCGv cpu_icc_Z
;
114 static TCGv cpu_icc_C
;
115 #ifdef TARGET_SPARC64
116 static TCGv cpu_xcc_Z
;
117 static TCGv cpu_xcc_C
;
118 static TCGv_i32 cpu_fprs
;
121 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
122 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
125 #ifdef TARGET_SPARC64
126 #define cpu_cc_Z cpu_xcc_Z
127 #define cpu_cc_C cpu_xcc_C
129 #define cpu_cc_Z cpu_icc_Z
130 #define cpu_cc_C cpu_icc_C
131 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
132 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
135 /* Floating point registers */
136 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
138 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
139 #ifdef TARGET_SPARC64
140 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
141 # define env64_field_offsetof(X) env_field_offsetof(X)
143 # define env32_field_offsetof(X) env_field_offsetof(X)
144 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
147 typedef struct DisasCompare
{
153 typedef struct DisasDelayException
{
154 struct DisasDelayException
*next
;
157 /* Saved state at parent insn. */
160 } DisasDelayException
;
162 typedef struct DisasContext
{
163 DisasContextBase base
;
164 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
165 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
167 /* Used when JUMP_PC value is used. */
169 target_ulong jump_pc
[2];
174 bool address_mask_32bit
;
175 #ifndef CONFIG_USER_ONLY
177 #ifdef TARGET_SPARC64
183 #ifdef TARGET_SPARC64
187 DisasDelayException
*delay_excp_list
;
190 // This function uses non-native bit order
191 #define GET_FIELD(X, FROM, TO) \
192 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
194 // This function uses the order in the manuals, i.e. bit 0 is 2^0
195 #define GET_FIELD_SP(X, FROM, TO) \
196 GET_FIELD(X, 31 - (TO), 31 - (FROM))
198 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
199 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
201 #ifdef TARGET_SPARC64
202 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
203 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
205 #define DFPREG(r) (r & 0x1e)
206 #define QFPREG(r) (r & 0x1c)
209 #define UA2005_HTRAP_MASK 0xff
210 #define V8_TRAP_MASK 0x7f
212 #define IS_IMM (insn & (1<<13))
214 static void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
216 #if defined(TARGET_SPARC64)
217 int bit
= (rd
< 32) ? 1 : 2;
218 /* If we know we've already set this bit within the TB,
219 we can avoid setting it again. */
220 if (!(dc
->fprs_dirty
& bit
)) {
221 dc
->fprs_dirty
|= bit
;
222 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
227 /* floating point registers moves */
228 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
230 TCGv_i32 ret
= tcg_temp_new_i32();
232 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
234 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
239 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
241 TCGv_i64 t
= tcg_temp_new_i64();
243 tcg_gen_extu_i32_i64(t
, v
);
244 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
245 (dst
& 1 ? 0 : 32), 32);
246 gen_update_fprs_dirty(dc
, dst
);
249 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
251 return tcg_temp_new_i32();
254 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
257 return cpu_fpr
[src
/ 2];
260 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
263 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
264 gen_update_fprs_dirty(dc
, dst
);
267 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
269 return cpu_fpr
[DFPREG(dst
) / 2];
272 static void gen_op_load_fpr_QT0(unsigned int src
)
274 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
275 offsetof(CPU_QuadU
, ll
.upper
));
276 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
277 offsetof(CPU_QuadU
, ll
.lower
));
280 static void gen_op_load_fpr_QT1(unsigned int src
)
282 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
283 offsetof(CPU_QuadU
, ll
.upper
));
284 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
285 offsetof(CPU_QuadU
, ll
.lower
));
288 static void gen_op_store_QT0_fpr(unsigned int dst
)
290 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
291 offsetof(CPU_QuadU
, ll
.upper
));
292 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
293 offsetof(CPU_QuadU
, ll
.lower
));
297 #ifdef CONFIG_USER_ONLY
298 #define supervisor(dc) 0
299 #define hypervisor(dc) 0
301 #ifdef TARGET_SPARC64
302 #define hypervisor(dc) (dc->hypervisor)
303 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
305 #define supervisor(dc) (dc->supervisor)
306 #define hypervisor(dc) 0
310 #if !defined(TARGET_SPARC64)
311 # define AM_CHECK(dc) false
312 #elif defined(TARGET_ABI32)
313 # define AM_CHECK(dc) true
314 #elif defined(CONFIG_USER_ONLY)
315 # define AM_CHECK(dc) false
317 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
320 static void gen_address_mask(DisasContext
*dc
, TCGv addr
)
323 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
327 static target_ulong
address_mask_i(DisasContext
*dc
, target_ulong addr
)
329 return AM_CHECK(dc
) ? (uint32_t)addr
: addr
;
332 static TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
336 return cpu_regs
[reg
];
338 TCGv t
= tcg_temp_new();
339 tcg_gen_movi_tl(t
, 0);
344 static void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
348 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
352 static TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
356 return cpu_regs
[reg
];
358 return tcg_temp_new();
362 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
364 return translator_use_goto_tb(&s
->base
, pc
) &&
365 translator_use_goto_tb(&s
->base
, npc
);
368 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
369 target_ulong pc
, target_ulong npc
)
371 if (use_goto_tb(s
, pc
, npc
)) {
372 /* jump to same page: we can use a direct jump */
373 tcg_gen_goto_tb(tb_num
);
374 tcg_gen_movi_tl(cpu_pc
, pc
);
375 tcg_gen_movi_tl(cpu_npc
, npc
);
376 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
378 /* jump to another page: we can use an indirect jump */
379 tcg_gen_movi_tl(cpu_pc
, pc
);
380 tcg_gen_movi_tl(cpu_npc
, npc
);
381 tcg_gen_lookup_and_goto_ptr();
385 static TCGv
gen_carry32(void)
387 if (TARGET_LONG_BITS
== 64) {
388 TCGv t
= tcg_temp_new();
389 tcg_gen_extract_tl(t
, cpu_icc_C
, 32, 1);
395 static void gen_op_addcc_int(TCGv dst
, TCGv src1
, TCGv src2
, TCGv cin
)
397 TCGv z
= tcg_constant_tl(0);
400 tcg_gen_add2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, cin
, z
);
401 tcg_gen_add2_tl(cpu_cc_N
, cpu_cc_C
, cpu_cc_N
, cpu_cc_C
, src2
, z
);
403 tcg_gen_add2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, src2
, z
);
405 tcg_gen_xor_tl(cpu_cc_Z
, src1
, src2
);
406 tcg_gen_xor_tl(cpu_cc_V
, cpu_cc_N
, src2
);
407 tcg_gen_andc_tl(cpu_cc_V
, cpu_cc_V
, cpu_cc_Z
);
408 if (TARGET_LONG_BITS
== 64) {
410 * Carry-in to bit 32 is result ^ src1 ^ src2.
411 * We already have the src xor term in Z, from computation of V.
413 tcg_gen_xor_tl(cpu_icc_C
, cpu_cc_Z
, cpu_cc_N
);
414 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
416 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
417 tcg_gen_mov_tl(dst
, cpu_cc_N
);
420 static void gen_op_addcc(TCGv dst
, TCGv src1
, TCGv src2
)
422 gen_op_addcc_int(dst
, src1
, src2
, NULL
);
425 static void gen_op_taddcc(TCGv dst
, TCGv src1
, TCGv src2
)
427 TCGv t
= tcg_temp_new();
429 /* Save the tag bits around modification of dst. */
430 tcg_gen_or_tl(t
, src1
, src2
);
432 gen_op_addcc(dst
, src1
, src2
);
434 /* Incorprate tag bits into icc.V */
435 tcg_gen_andi_tl(t
, t
, 3);
436 tcg_gen_neg_tl(t
, t
);
437 tcg_gen_ext32u_tl(t
, t
);
438 tcg_gen_or_tl(cpu_cc_V
, cpu_cc_V
, t
);
441 static void gen_op_addc(TCGv dst
, TCGv src1
, TCGv src2
)
443 tcg_gen_add_tl(dst
, src1
, src2
);
444 tcg_gen_add_tl(dst
, dst
, gen_carry32());
447 static void gen_op_addccc(TCGv dst
, TCGv src1
, TCGv src2
)
449 gen_op_addcc_int(dst
, src1
, src2
, gen_carry32());
452 static void gen_op_subcc_int(TCGv dst
, TCGv src1
, TCGv src2
, TCGv cin
)
454 TCGv z
= tcg_constant_tl(0);
457 tcg_gen_sub2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, cin
, z
);
458 tcg_gen_sub2_tl(cpu_cc_N
, cpu_cc_C
, cpu_cc_N
, cpu_cc_C
, src2
, z
);
460 tcg_gen_sub2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, src2
, z
);
462 tcg_gen_neg_tl(cpu_cc_C
, cpu_cc_C
);
463 tcg_gen_xor_tl(cpu_cc_Z
, src1
, src2
);
464 tcg_gen_xor_tl(cpu_cc_V
, cpu_cc_N
, src1
);
465 tcg_gen_and_tl(cpu_cc_V
, cpu_cc_V
, cpu_cc_Z
);
466 #ifdef TARGET_SPARC64
467 tcg_gen_xor_tl(cpu_icc_C
, cpu_cc_Z
, cpu_cc_N
);
468 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
470 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
471 tcg_gen_mov_tl(dst
, cpu_cc_N
);
474 static void gen_op_subcc(TCGv dst
, TCGv src1
, TCGv src2
)
476 gen_op_subcc_int(dst
, src1
, src2
, NULL
);
479 static void gen_op_tsubcc(TCGv dst
, TCGv src1
, TCGv src2
)
481 TCGv t
= tcg_temp_new();
483 /* Save the tag bits around modification of dst. */
484 tcg_gen_or_tl(t
, src1
, src2
);
486 gen_op_subcc(dst
, src1
, src2
);
488 /* Incorprate tag bits into icc.V */
489 tcg_gen_andi_tl(t
, t
, 3);
490 tcg_gen_neg_tl(t
, t
);
491 tcg_gen_ext32u_tl(t
, t
);
492 tcg_gen_or_tl(cpu_cc_V
, cpu_cc_V
, t
);
495 static void gen_op_subc(TCGv dst
, TCGv src1
, TCGv src2
)
497 tcg_gen_sub_tl(dst
, src1
, src2
);
498 tcg_gen_sub_tl(dst
, dst
, gen_carry32());
501 static void gen_op_subccc(TCGv dst
, TCGv src1
, TCGv src2
)
503 gen_op_subcc_int(dst
, src1
, src2
, gen_carry32());
506 static void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
508 TCGv zero
= tcg_constant_tl(0);
509 TCGv t_src1
= tcg_temp_new();
510 TCGv t_src2
= tcg_temp_new();
511 TCGv t0
= tcg_temp_new();
513 tcg_gen_ext32u_tl(t_src1
, src1
);
514 tcg_gen_ext32u_tl(t_src2
, src2
);
520 tcg_gen_andi_tl(t0
, cpu_y
, 0x1);
521 tcg_gen_movcond_tl(TCG_COND_EQ
, t_src2
, t0
, zero
, zero
, t_src2
);
525 * y = (b2 << 31) | (y >> 1);
527 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
528 tcg_gen_deposit_tl(cpu_y
, t0
, src1
, 31, 1);
531 tcg_gen_xor_tl(t0
, cpu_cc_N
, cpu_cc_V
);
534 * src1 = (b1 << 31) | (src1 >> 1)
536 tcg_gen_andi_tl(t0
, t0
, 1u << 31);
537 tcg_gen_shri_tl(t_src1
, t_src1
, 1);
538 tcg_gen_or_tl(t_src1
, t_src1
, t0
);
540 gen_op_addcc(dst
, t_src1
, t_src2
);
543 static void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
545 #if TARGET_LONG_BITS == 32
547 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
549 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
552 TCGv t0
= tcg_temp_new_i64();
553 TCGv t1
= tcg_temp_new_i64();
556 tcg_gen_ext32s_i64(t0
, src1
);
557 tcg_gen_ext32s_i64(t1
, src2
);
559 tcg_gen_ext32u_i64(t0
, src1
);
560 tcg_gen_ext32u_i64(t1
, src2
);
563 tcg_gen_mul_i64(dst
, t0
, t1
);
564 tcg_gen_shri_i64(cpu_y
, dst
, 32);
568 static void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
570 /* zero-extend truncated operands before multiplication */
571 gen_op_multiply(dst
, src1
, src2
, 0);
574 static void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
576 /* sign-extend truncated operands before multiplication */
577 gen_op_multiply(dst
, src1
, src2
, 1);
580 static void gen_op_sdiv(TCGv dst
, TCGv src1
, TCGv src2
)
582 #ifdef TARGET_SPARC64
583 gen_helper_sdiv(dst
, tcg_env
, src1
, src2
);
584 tcg_gen_ext32s_tl(dst
, dst
);
586 TCGv_i64 t64
= tcg_temp_new_i64();
587 gen_helper_sdiv(t64
, tcg_env
, src1
, src2
);
588 tcg_gen_trunc_i64_tl(dst
, t64
);
592 static void gen_op_udivcc(TCGv dst
, TCGv src1
, TCGv src2
)
596 #ifdef TARGET_SPARC64
599 t64
= tcg_temp_new_i64();
602 gen_helper_udiv(t64
, tcg_env
, src1
, src2
);
604 #ifdef TARGET_SPARC64
605 tcg_gen_ext32u_tl(cpu_cc_N
, t64
);
606 tcg_gen_shri_tl(cpu_cc_V
, t64
, 32);
607 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
608 tcg_gen_movi_tl(cpu_icc_C
, 0);
610 tcg_gen_extr_i64_tl(cpu_cc_N
, cpu_cc_V
, t64
);
612 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
613 tcg_gen_movi_tl(cpu_cc_C
, 0);
614 tcg_gen_mov_tl(dst
, cpu_cc_N
);
617 static void gen_op_sdivcc(TCGv dst
, TCGv src1
, TCGv src2
)
621 #ifdef TARGET_SPARC64
624 t64
= tcg_temp_new_i64();
627 gen_helper_sdiv(t64
, tcg_env
, src1
, src2
);
629 #ifdef TARGET_SPARC64
630 tcg_gen_ext32s_tl(cpu_cc_N
, t64
);
631 tcg_gen_shri_tl(cpu_cc_V
, t64
, 32);
632 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
633 tcg_gen_movi_tl(cpu_icc_C
, 0);
635 tcg_gen_extr_i64_tl(cpu_cc_N
, cpu_cc_V
, t64
);
637 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
638 tcg_gen_movi_tl(cpu_cc_C
, 0);
639 tcg_gen_mov_tl(dst
, cpu_cc_N
);
642 static void gen_op_taddcctv(TCGv dst
, TCGv src1
, TCGv src2
)
644 gen_helper_taddcctv(dst
, tcg_env
, src1
, src2
);
647 static void gen_op_tsubcctv(TCGv dst
, TCGv src1
, TCGv src2
)
649 gen_helper_tsubcctv(dst
, tcg_env
, src1
, src2
);
652 static void gen_op_popc(TCGv dst
, TCGv src1
, TCGv src2
)
654 tcg_gen_ctpop_tl(dst
, src2
);
657 #ifndef TARGET_SPARC64
658 static void gen_helper_array8(TCGv dst
, TCGv src1
, TCGv src2
)
660 g_assert_not_reached();
664 static void gen_op_array16(TCGv dst
, TCGv src1
, TCGv src2
)
666 gen_helper_array8(dst
, src1
, src2
);
667 tcg_gen_shli_tl(dst
, dst
, 1);
670 static void gen_op_array32(TCGv dst
, TCGv src1
, TCGv src2
)
672 gen_helper_array8(dst
, src1
, src2
);
673 tcg_gen_shli_tl(dst
, dst
, 2);
676 static void gen_op_fpack16(TCGv_i32 dst
, TCGv_i64 src
)
678 #ifdef TARGET_SPARC64
679 gen_helper_fpack16(dst
, cpu_gsr
, src
);
681 g_assert_not_reached();
685 static void gen_op_fpackfix(TCGv_i32 dst
, TCGv_i64 src
)
687 #ifdef TARGET_SPARC64
688 gen_helper_fpackfix(dst
, cpu_gsr
, src
);
690 g_assert_not_reached();
694 static void gen_op_fpack32(TCGv_i64 dst
, TCGv_i64 src1
, TCGv_i64 src2
)
696 #ifdef TARGET_SPARC64
697 gen_helper_fpack32(dst
, cpu_gsr
, src1
, src2
);
699 g_assert_not_reached();
703 static void gen_op_faligndata(TCGv_i64 dst
, TCGv_i64 s1
, TCGv_i64 s2
)
705 #ifdef TARGET_SPARC64
710 shift
= tcg_temp_new();
712 tcg_gen_andi_tl(shift
, cpu_gsr
, 7);
713 tcg_gen_shli_tl(shift
, shift
, 3);
714 tcg_gen_shl_tl(t1
, s1
, shift
);
717 * A shift of 64 does not produce 0 in TCG. Divide this into a
718 * shift of (up to 63) followed by a constant shift of 1.
720 tcg_gen_xori_tl(shift
, shift
, 63);
721 tcg_gen_shr_tl(t2
, s2
, shift
);
722 tcg_gen_shri_tl(t2
, t2
, 1);
724 tcg_gen_or_tl(dst
, t1
, t2
);
726 g_assert_not_reached();
730 static void gen_op_bshuffle(TCGv_i64 dst
, TCGv_i64 src1
, TCGv_i64 src2
)
732 #ifdef TARGET_SPARC64
733 gen_helper_bshuffle(dst
, cpu_gsr
, src1
, src2
);
735 g_assert_not_reached();
740 static void gen_op_eval_ba(TCGv dst
)
742 tcg_gen_movi_tl(dst
, 1);
746 static void gen_op_eval_bn(TCGv dst
)
748 tcg_gen_movi_tl(dst
, 0);
752 FPSR bit field FCC1 | FCC0:
758 static void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
759 unsigned int fcc_offset
)
761 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
762 tcg_gen_andi_tl(reg
, reg
, 0x1);
765 static void gen_mov_reg_FCC1(TCGv reg
, TCGv src
, unsigned int fcc_offset
)
767 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
768 tcg_gen_andi_tl(reg
, reg
, 0x1);
772 static void gen_op_eval_fbne(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
774 TCGv t0
= tcg_temp_new();
775 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
776 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
777 tcg_gen_or_tl(dst
, dst
, t0
);
780 // 1 or 2: FCC0 ^ FCC1
781 static void gen_op_eval_fblg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
783 TCGv t0
= tcg_temp_new();
784 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
785 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
786 tcg_gen_xor_tl(dst
, dst
, t0
);
790 static void gen_op_eval_fbul(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
792 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
796 static void gen_op_eval_fbl(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
798 TCGv t0
= tcg_temp_new();
799 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
800 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
801 tcg_gen_andc_tl(dst
, dst
, t0
);
805 static void gen_op_eval_fbug(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
807 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
811 static void gen_op_eval_fbg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
813 TCGv t0
= tcg_temp_new();
814 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
815 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
816 tcg_gen_andc_tl(dst
, t0
, dst
);
820 static void gen_op_eval_fbu(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
822 TCGv t0
= tcg_temp_new();
823 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
824 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
825 tcg_gen_and_tl(dst
, dst
, t0
);
829 static void gen_op_eval_fbe(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
831 TCGv t0
= tcg_temp_new();
832 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
833 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
834 tcg_gen_or_tl(dst
, dst
, t0
);
835 tcg_gen_xori_tl(dst
, dst
, 0x1);
838 // 0 or 3: !(FCC0 ^ FCC1)
839 static void gen_op_eval_fbue(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
841 TCGv t0
= tcg_temp_new();
842 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
843 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
844 tcg_gen_xor_tl(dst
, dst
, t0
);
845 tcg_gen_xori_tl(dst
, dst
, 0x1);
849 static void gen_op_eval_fbge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
851 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
852 tcg_gen_xori_tl(dst
, dst
, 0x1);
855 // !1: !(FCC0 & !FCC1)
856 static void gen_op_eval_fbuge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
858 TCGv t0
= tcg_temp_new();
859 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
860 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
861 tcg_gen_andc_tl(dst
, dst
, t0
);
862 tcg_gen_xori_tl(dst
, dst
, 0x1);
866 static void gen_op_eval_fble(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
868 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
869 tcg_gen_xori_tl(dst
, dst
, 0x1);
872 // !2: !(!FCC0 & FCC1)
873 static void gen_op_eval_fbule(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
875 TCGv t0
= tcg_temp_new();
876 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
877 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
878 tcg_gen_andc_tl(dst
, t0
, dst
);
879 tcg_gen_xori_tl(dst
, dst
, 0x1);
882 // !3: !(FCC0 & FCC1)
883 static void gen_op_eval_fbo(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
885 TCGv t0
= tcg_temp_new();
886 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
887 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
888 tcg_gen_and_tl(dst
, dst
, t0
);
889 tcg_gen_xori_tl(dst
, dst
, 0x1);
892 static void finishing_insn(DisasContext
*dc
)
895 * From here, there is no future path through an unwinding exception.
896 * If the current insn cannot raise an exception, the computation of
897 * cpu_cond may be able to be elided.
899 if (dc
->cpu_cond_live
) {
900 tcg_gen_discard_tl(cpu_cond
);
901 dc
->cpu_cond_live
= false;
905 static void gen_generic_branch(DisasContext
*dc
)
907 TCGv npc0
= tcg_constant_tl(dc
->jump_pc
[0]);
908 TCGv npc1
= tcg_constant_tl(dc
->jump_pc
[1]);
909 TCGv c2
= tcg_constant_tl(dc
->jump
.c2
);
911 tcg_gen_movcond_tl(dc
->jump
.cond
, cpu_npc
, dc
->jump
.c1
, c2
, npc0
, npc1
);
914 /* call this function before using the condition register as it may
915 have been set for a jump */
916 static void flush_cond(DisasContext
*dc
)
918 if (dc
->npc
== JUMP_PC
) {
919 gen_generic_branch(dc
);
920 dc
->npc
= DYNAMIC_PC_LOOKUP
;
924 static void save_npc(DisasContext
*dc
)
929 gen_generic_branch(dc
);
930 dc
->npc
= DYNAMIC_PC_LOOKUP
;
933 case DYNAMIC_PC_LOOKUP
:
936 g_assert_not_reached();
939 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
943 static void save_state(DisasContext
*dc
)
945 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
949 static void gen_exception(DisasContext
*dc
, int which
)
953 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(which
));
954 dc
->base
.is_jmp
= DISAS_NORETURN
;
957 static TCGLabel
*delay_exceptionv(DisasContext
*dc
, TCGv_i32 excp
)
959 DisasDelayException
*e
= g_new0(DisasDelayException
, 1);
961 e
->next
= dc
->delay_excp_list
;
962 dc
->delay_excp_list
= e
;
964 e
->lab
= gen_new_label();
967 /* Caller must have used flush_cond before branch. */
968 assert(e
->npc
!= JUMP_PC
);
974 static TCGLabel
*delay_exception(DisasContext
*dc
, int excp
)
976 return delay_exceptionv(dc
, tcg_constant_i32(excp
));
979 static void gen_check_align(DisasContext
*dc
, TCGv addr
, int mask
)
981 TCGv t
= tcg_temp_new();
984 tcg_gen_andi_tl(t
, addr
, mask
);
987 lab
= delay_exception(dc
, TT_UNALIGNED
);
988 tcg_gen_brcondi_tl(TCG_COND_NE
, t
, 0, lab
);
991 static void gen_mov_pc_npc(DisasContext
*dc
)
998 gen_generic_branch(dc
);
999 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1000 dc
->pc
= DYNAMIC_PC_LOOKUP
;
1003 case DYNAMIC_PC_LOOKUP
:
1004 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1008 g_assert_not_reached();
1015 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1020 cmp
->c1
= t1
= tcg_temp_new();
1024 case 0x0: /* never */
1025 cmp
->cond
= TCG_COND_NEVER
;
1026 cmp
->c1
= tcg_constant_tl(0);
1029 case 0x1: /* eq: Z */
1030 cmp
->cond
= TCG_COND_EQ
;
1031 if (TARGET_LONG_BITS
== 32 || xcc
) {
1032 tcg_gen_mov_tl(t1
, cpu_cc_Z
);
1034 tcg_gen_ext32u_tl(t1
, cpu_icc_Z
);
1038 case 0x2: /* le: Z | (N ^ V) */
1041 * cc_Z || (N ^ V) < 0 NE
1042 * cc_Z && !((N ^ V) < 0) EQ
1043 * cc_Z & ~((N ^ V) >> TLB) EQ
1045 cmp
->cond
= TCG_COND_EQ
;
1046 tcg_gen_xor_tl(t1
, cpu_cc_N
, cpu_cc_V
);
1047 tcg_gen_sextract_tl(t1
, t1
, xcc
? 63 : 31, 1);
1048 tcg_gen_andc_tl(t1
, xcc
? cpu_cc_Z
: cpu_icc_Z
, t1
);
1049 if (TARGET_LONG_BITS
== 64 && !xcc
) {
1050 tcg_gen_ext32u_tl(t1
, t1
);
1054 case 0x3: /* lt: N ^ V */
1055 cmp
->cond
= TCG_COND_LT
;
1056 tcg_gen_xor_tl(t1
, cpu_cc_N
, cpu_cc_V
);
1057 if (TARGET_LONG_BITS
== 64 && !xcc
) {
1058 tcg_gen_ext32s_tl(t1
, t1
);
1062 case 0x4: /* leu: Z | C */
1065 * cc_Z == 0 || cc_C != 0 NE
1066 * cc_Z != 0 && cc_C == 0 EQ
1067 * cc_Z & (cc_C ? 0 : -1) EQ
1068 * cc_Z & (cc_C - 1) EQ
1070 cmp
->cond
= TCG_COND_EQ
;
1071 if (TARGET_LONG_BITS
== 32 || xcc
) {
1072 tcg_gen_subi_tl(t1
, cpu_cc_C
, 1);
1073 tcg_gen_and_tl(t1
, t1
, cpu_cc_Z
);
1075 tcg_gen_extract_tl(t1
, cpu_icc_C
, 32, 1);
1076 tcg_gen_subi_tl(t1
, t1
, 1);
1077 tcg_gen_and_tl(t1
, t1
, cpu_icc_Z
);
1078 tcg_gen_ext32u_tl(t1
, t1
);
1082 case 0x5: /* ltu: C */
1083 cmp
->cond
= TCG_COND_NE
;
1084 if (TARGET_LONG_BITS
== 32 || xcc
) {
1085 tcg_gen_mov_tl(t1
, cpu_cc_C
);
1087 tcg_gen_extract_tl(t1
, cpu_icc_C
, 32, 1);
1091 case 0x6: /* neg: N */
1092 cmp
->cond
= TCG_COND_LT
;
1093 if (TARGET_LONG_BITS
== 32 || xcc
) {
1094 tcg_gen_mov_tl(t1
, cpu_cc_N
);
1096 tcg_gen_ext32s_tl(t1
, cpu_cc_N
);
1100 case 0x7: /* vs: V */
1101 cmp
->cond
= TCG_COND_LT
;
1102 if (TARGET_LONG_BITS
== 32 || xcc
) {
1103 tcg_gen_mov_tl(t1
, cpu_cc_V
);
1105 tcg_gen_ext32s_tl(t1
, cpu_cc_V
);
1110 cmp
->cond
= tcg_invert_cond(cmp
->cond
);
1114 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1116 unsigned int offset
;
1119 /* For now we still generate a straight boolean result. */
1120 cmp
->cond
= TCG_COND_NE
;
1121 cmp
->c1
= r_dst
= tcg_temp_new();
1142 gen_op_eval_bn(r_dst
);
1145 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1148 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1151 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1154 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1157 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1160 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1163 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1166 gen_op_eval_ba(r_dst
);
1169 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1172 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1175 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1178 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1181 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1184 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1187 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1192 static bool gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1194 static const TCGCond cond_reg
[4] = {
1195 TCG_COND_NEVER
, /* reserved */
1202 if ((cond
& 3) == 0) {
1205 tcond
= cond_reg
[cond
& 3];
1207 tcond
= tcg_invert_cond(tcond
);
1211 cmp
->c1
= tcg_temp_new();
1213 tcg_gen_mov_tl(cmp
->c1
, r_src
);
1217 static void gen_op_clear_ieee_excp_and_FTT(void)
1219 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1222 static void gen_op_fmovs(TCGv_i32 dst
, TCGv_i32 src
)
1224 gen_op_clear_ieee_excp_and_FTT();
1225 tcg_gen_mov_i32(dst
, src
);
1228 static void gen_op_fnegs(TCGv_i32 dst
, TCGv_i32 src
)
1230 gen_op_clear_ieee_excp_and_FTT();
1231 gen_helper_fnegs(dst
, src
);
1234 static void gen_op_fabss(TCGv_i32 dst
, TCGv_i32 src
)
1236 gen_op_clear_ieee_excp_and_FTT();
1237 gen_helper_fabss(dst
, src
);
1240 static void gen_op_fmovd(TCGv_i64 dst
, TCGv_i64 src
)
1242 gen_op_clear_ieee_excp_and_FTT();
1243 tcg_gen_mov_i64(dst
, src
);
1246 static void gen_op_fnegd(TCGv_i64 dst
, TCGv_i64 src
)
1248 gen_op_clear_ieee_excp_and_FTT();
1249 gen_helper_fnegd(dst
, src
);
1252 static void gen_op_fabsd(TCGv_i64 dst
, TCGv_i64 src
)
1254 gen_op_clear_ieee_excp_and_FTT();
1255 gen_helper_fabsd(dst
, src
);
1258 #ifdef TARGET_SPARC64
1259 static void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1263 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1266 gen_helper_fcmps_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1269 gen_helper_fcmps_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1272 gen_helper_fcmps_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1277 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1281 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1284 gen_helper_fcmpd_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1287 gen_helper_fcmpd_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1290 gen_helper_fcmpd_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1295 static void gen_op_fcmpq(int fccno
)
1299 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1302 gen_helper_fcmpq_fcc1(cpu_fsr
, tcg_env
);
1305 gen_helper_fcmpq_fcc2(cpu_fsr
, tcg_env
);
1308 gen_helper_fcmpq_fcc3(cpu_fsr
, tcg_env
);
1313 static void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1317 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1320 gen_helper_fcmpes_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1323 gen_helper_fcmpes_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1326 gen_helper_fcmpes_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1331 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1335 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1338 gen_helper_fcmped_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1341 gen_helper_fcmped_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1344 gen_helper_fcmped_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1349 static void gen_op_fcmpeq(int fccno
)
1353 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1356 gen_helper_fcmpeq_fcc1(cpu_fsr
, tcg_env
);
1359 gen_helper_fcmpeq_fcc2(cpu_fsr
, tcg_env
);
1362 gen_helper_fcmpeq_fcc3(cpu_fsr
, tcg_env
);
1369 static void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1371 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1374 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1376 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1379 static void gen_op_fcmpq(int fccno
)
1381 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1384 static void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1386 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1389 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1391 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1394 static void gen_op_fcmpeq(int fccno
)
1396 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1400 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1402 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1403 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1404 gen_exception(dc
, TT_FP_EXCP
);
1407 static int gen_trap_ifnofpu(DisasContext
*dc
)
1409 #if !defined(CONFIG_USER_ONLY)
1410 if (!dc
->fpu_enabled
) {
1411 gen_exception(dc
, TT_NFPU_INSN
);
1439 * For asi == -1, treat as non-asi.
1440 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1442 static DisasASI
resolve_asi(DisasContext
*dc
, int asi
, MemOp memop
)
1444 ASIType type
= GET_ASI_HELPER
;
1445 int mem_idx
= dc
->mem_idx
;
1448 /* Artificial "non-asi" case. */
1449 type
= GET_ASI_DIRECT
;
1453 #ifndef TARGET_SPARC64
1454 /* Before v9, all asis are immediate and privileged. */
1456 gen_exception(dc
, TT_ILL_INSN
);
1457 type
= GET_ASI_EXCP
;
1458 } else if (supervisor(dc
)
1459 /* Note that LEON accepts ASI_USERDATA in user mode, for
1460 use with CASA. Also note that previous versions of
1461 QEMU allowed (and old versions of gcc emitted) ASI_P
1462 for LEON, which is incorrect. */
1463 || (asi
== ASI_USERDATA
1464 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
1466 case ASI_USERDATA
: /* User data access */
1467 mem_idx
= MMU_USER_IDX
;
1468 type
= GET_ASI_DIRECT
;
1470 case ASI_KERNELDATA
: /* Supervisor data access */
1471 mem_idx
= MMU_KERNEL_IDX
;
1472 type
= GET_ASI_DIRECT
;
1474 case ASI_M_BYPASS
: /* MMU passthrough */
1475 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
1476 mem_idx
= MMU_PHYS_IDX
;
1477 type
= GET_ASI_DIRECT
;
1479 case ASI_M_BCOPY
: /* Block copy, sta access */
1480 mem_idx
= MMU_KERNEL_IDX
;
1481 type
= GET_ASI_BCOPY
;
1483 case ASI_M_BFILL
: /* Block fill, stda access */
1484 mem_idx
= MMU_KERNEL_IDX
;
1485 type
= GET_ASI_BFILL
;
1489 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1490 * permissions check in get_physical_address(..).
1492 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
1494 gen_exception(dc
, TT_PRIV_INSN
);
1495 type
= GET_ASI_EXCP
;
1501 /* With v9, all asis below 0x80 are privileged. */
1502 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1503 down that bit into DisasContext. For the moment that's ok,
1504 since the direct implementations below doesn't have any ASIs
1505 in the restricted [0x30, 0x7f] range, and the check will be
1506 done properly in the helper. */
1507 if (!supervisor(dc
) && asi
< 0x80) {
1508 gen_exception(dc
, TT_PRIV_ACT
);
1509 type
= GET_ASI_EXCP
;
1512 case ASI_REAL
: /* Bypass */
1513 case ASI_REAL_IO
: /* Bypass, non-cacheable */
1514 case ASI_REAL_L
: /* Bypass LE */
1515 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
1516 case ASI_TWINX_REAL
: /* Real address, twinx */
1517 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
1518 case ASI_QUAD_LDD_PHYS
:
1519 case ASI_QUAD_LDD_PHYS_L
:
1520 mem_idx
= MMU_PHYS_IDX
;
1522 case ASI_N
: /* Nucleus */
1523 case ASI_NL
: /* Nucleus LE */
1526 case ASI_NUCLEUS_QUAD_LDD
:
1527 case ASI_NUCLEUS_QUAD_LDD_L
:
1528 if (hypervisor(dc
)) {
1529 mem_idx
= MMU_PHYS_IDX
;
1531 mem_idx
= MMU_NUCLEUS_IDX
;
1534 case ASI_AIUP
: /* As if user primary */
1535 case ASI_AIUPL
: /* As if user primary LE */
1536 case ASI_TWINX_AIUP
:
1537 case ASI_TWINX_AIUP_L
:
1538 case ASI_BLK_AIUP_4V
:
1539 case ASI_BLK_AIUP_L_4V
:
1542 mem_idx
= MMU_USER_IDX
;
1544 case ASI_AIUS
: /* As if user secondary */
1545 case ASI_AIUSL
: /* As if user secondary LE */
1546 case ASI_TWINX_AIUS
:
1547 case ASI_TWINX_AIUS_L
:
1548 case ASI_BLK_AIUS_4V
:
1549 case ASI_BLK_AIUS_L_4V
:
1552 mem_idx
= MMU_USER_SECONDARY_IDX
;
1554 case ASI_S
: /* Secondary */
1555 case ASI_SL
: /* Secondary LE */
1558 case ASI_BLK_COMMIT_S
:
1565 if (mem_idx
== MMU_USER_IDX
) {
1566 mem_idx
= MMU_USER_SECONDARY_IDX
;
1567 } else if (mem_idx
== MMU_KERNEL_IDX
) {
1568 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
1571 case ASI_P
: /* Primary */
1572 case ASI_PL
: /* Primary LE */
1575 case ASI_BLK_COMMIT_P
:
1599 type
= GET_ASI_DIRECT
;
1601 case ASI_TWINX_REAL
:
1602 case ASI_TWINX_REAL_L
:
1605 case ASI_TWINX_AIUP
:
1606 case ASI_TWINX_AIUP_L
:
1607 case ASI_TWINX_AIUS
:
1608 case ASI_TWINX_AIUS_L
:
1613 case ASI_QUAD_LDD_PHYS
:
1614 case ASI_QUAD_LDD_PHYS_L
:
1615 case ASI_NUCLEUS_QUAD_LDD
:
1616 case ASI_NUCLEUS_QUAD_LDD_L
:
1617 type
= GET_ASI_DTWINX
;
1619 case ASI_BLK_COMMIT_P
:
1620 case ASI_BLK_COMMIT_S
:
1621 case ASI_BLK_AIUP_4V
:
1622 case ASI_BLK_AIUP_L_4V
:
1625 case ASI_BLK_AIUS_4V
:
1626 case ASI_BLK_AIUS_L_4V
:
1633 type
= GET_ASI_BLOCK
;
1640 type
= GET_ASI_SHORT
;
1647 type
= GET_ASI_SHORT
;
1650 /* The little-endian asis all have bit 3 set. */
1658 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
1661 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1662 static void gen_helper_ld_asi(TCGv_i64 r
, TCGv_env e
, TCGv a
,
1663 TCGv_i32 asi
, TCGv_i32 mop
)
1665 g_assert_not_reached();
1668 static void gen_helper_st_asi(TCGv_env e
, TCGv a
, TCGv_i64 r
,
1669 TCGv_i32 asi
, TCGv_i32 mop
)
1671 g_assert_not_reached();
1675 static void gen_ld_asi(DisasContext
*dc
, DisasASI
*da
, TCGv dst
, TCGv addr
)
1680 case GET_ASI_DTWINX
: /* Reserved for ldda. */
1681 gen_exception(dc
, TT_ILL_INSN
);
1683 case GET_ASI_DIRECT
:
1684 tcg_gen_qemu_ld_tl(dst
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1688 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1689 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
| MO_ALIGN
);
1692 #ifdef TARGET_SPARC64
1693 gen_helper_ld_asi(dst
, tcg_env
, addr
, r_asi
, r_mop
);
1696 TCGv_i64 t64
= tcg_temp_new_i64();
1697 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
1698 tcg_gen_trunc_i64_tl(dst
, t64
);
1706 static void gen_st_asi(DisasContext
*dc
, DisasASI
*da
, TCGv src
, TCGv addr
)
1712 case GET_ASI_DTWINX
: /* Reserved for stda. */
1713 if (TARGET_LONG_BITS
== 32) {
1714 gen_exception(dc
, TT_ILL_INSN
);
1716 } else if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
1717 /* Pre OpenSPARC CPUs don't have these */
1718 gen_exception(dc
, TT_ILL_INSN
);
1721 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1724 case GET_ASI_DIRECT
:
1725 tcg_gen_qemu_st_tl(src
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1729 assert(TARGET_LONG_BITS
== 32);
1730 /* Copy 32 bytes from the address in SRC to ADDR. */
1731 /* ??? The original qemu code suggests 4-byte alignment, dropping
1732 the low bits, but the only place I can see this used is in the
1733 Linux kernel with 32 byte alignment, which would make more sense
1734 as a cacheline-style operation. */
1736 TCGv saddr
= tcg_temp_new();
1737 TCGv daddr
= tcg_temp_new();
1738 TCGv four
= tcg_constant_tl(4);
1739 TCGv_i32 tmp
= tcg_temp_new_i32();
1742 tcg_gen_andi_tl(saddr
, src
, -4);
1743 tcg_gen_andi_tl(daddr
, addr
, -4);
1744 for (i
= 0; i
< 32; i
+= 4) {
1745 /* Since the loads and stores are paired, allow the
1746 copy to happen in the host endianness. */
1747 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
->mem_idx
, MO_UL
);
1748 tcg_gen_qemu_st_i32(tmp
, daddr
, da
->mem_idx
, MO_UL
);
1749 tcg_gen_add_tl(saddr
, saddr
, four
);
1750 tcg_gen_add_tl(daddr
, daddr
, four
);
1757 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1758 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
| MO_ALIGN
);
1761 #ifdef TARGET_SPARC64
1762 gen_helper_st_asi(tcg_env
, addr
, src
, r_asi
, r_mop
);
1765 TCGv_i64 t64
= tcg_temp_new_i64();
1766 tcg_gen_extu_tl_i64(t64
, src
);
1767 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
1771 /* A write to a TLB register may alter page maps. End the TB. */
1772 dc
->npc
= DYNAMIC_PC
;
1778 static void gen_swap_asi(DisasContext
*dc
, DisasASI
*da
,
1779 TCGv dst
, TCGv src
, TCGv addr
)
1784 case GET_ASI_DIRECT
:
1785 tcg_gen_atomic_xchg_tl(dst
, addr
, src
,
1786 da
->mem_idx
, da
->memop
| MO_ALIGN
);
1789 /* ??? Should be DAE_invalid_asi. */
1790 gen_exception(dc
, TT_DATA_ACCESS
);
1795 static void gen_cas_asi(DisasContext
*dc
, DisasASI
*da
,
1796 TCGv oldv
, TCGv newv
, TCGv cmpv
, TCGv addr
)
1801 case GET_ASI_DIRECT
:
1802 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, newv
,
1803 da
->mem_idx
, da
->memop
| MO_ALIGN
);
1806 /* ??? Should be DAE_invalid_asi. */
1807 gen_exception(dc
, TT_DATA_ACCESS
);
1812 static void gen_ldstub_asi(DisasContext
*dc
, DisasASI
*da
, TCGv dst
, TCGv addr
)
1817 case GET_ASI_DIRECT
:
1818 tcg_gen_atomic_xchg_tl(dst
, addr
, tcg_constant_tl(0xff),
1819 da
->mem_idx
, MO_UB
);
1822 /* ??? In theory, this should be raise DAE_invalid_asi.
1823 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1824 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
1825 gen_helper_exit_atomic(tcg_env
);
1827 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1828 TCGv_i32 r_mop
= tcg_constant_i32(MO_UB
);
1832 t64
= tcg_temp_new_i64();
1833 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
1835 s64
= tcg_constant_i64(0xff);
1836 gen_helper_st_asi(tcg_env
, addr
, s64
, r_asi
, r_mop
);
1838 tcg_gen_trunc_i64_tl(dst
, t64
);
1841 dc
->npc
= DYNAMIC_PC
;
1847 static void gen_ldf_asi(DisasContext
*dc
, DisasASI
*da
, MemOp orig_size
,
1850 MemOp memop
= da
->memop
;
1851 MemOp size
= memop
& MO_SIZE
;
1856 /* TODO: Use 128-bit load/store below. */
1857 if (size
== MO_128
) {
1858 memop
= (memop
& ~MO_SIZE
) | MO_64
;
1865 case GET_ASI_DIRECT
:
1866 memop
|= MO_ALIGN_4
;
1869 d32
= gen_dest_fpr_F(dc
);
1870 tcg_gen_qemu_ld_i32(d32
, addr
, da
->mem_idx
, memop
);
1871 gen_store_fpr_F(dc
, rd
, d32
);
1875 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
, memop
);
1879 d64
= tcg_temp_new_i64();
1880 tcg_gen_qemu_ld_i64(d64
, addr
, da
->mem_idx
, memop
);
1881 addr_tmp
= tcg_temp_new();
1882 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1883 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + 1], addr_tmp
, da
->mem_idx
, memop
);
1884 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
1887 g_assert_not_reached();
1892 /* Valid for lddfa on aligned registers only. */
1893 if (orig_size
== MO_64
&& (rd
& 7) == 0) {
1894 /* The first operation checks required alignment. */
1895 addr_tmp
= tcg_temp_new();
1896 for (int i
= 0; ; ++i
) {
1897 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
, da
->mem_idx
,
1898 memop
| (i
== 0 ? MO_ALIGN_64
: 0));
1902 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1906 gen_exception(dc
, TT_ILL_INSN
);
1911 /* Valid for lddfa only. */
1912 if (orig_size
== MO_64
) {
1913 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
1916 gen_exception(dc
, TT_ILL_INSN
);
1922 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1923 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
1926 /* According to the table in the UA2011 manual, the only
1927 other asis that are valid for ldfa/lddfa/ldqfa are
1928 the NO_FAULT asis. We still need a helper for these,
1929 but we can just use the integer asi helper for them. */
1932 d64
= tcg_temp_new_i64();
1933 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
1934 d32
= gen_dest_fpr_F(dc
);
1935 tcg_gen_extrl_i64_i32(d32
, d64
);
1936 gen_store_fpr_F(dc
, rd
, d32
);
1939 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], tcg_env
, addr
,
1943 d64
= tcg_temp_new_i64();
1944 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
1945 addr_tmp
= tcg_temp_new();
1946 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1947 gen_helper_ld_asi(cpu_fpr
[rd
/ 2 + 1], tcg_env
, addr_tmp
,
1949 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
1952 g_assert_not_reached();
1959 static void gen_stf_asi(DisasContext
*dc
, DisasASI
*da
, MemOp orig_size
,
1962 MemOp memop
= da
->memop
;
1963 MemOp size
= memop
& MO_SIZE
;
1967 /* TODO: Use 128-bit load/store below. */
1968 if (size
== MO_128
) {
1969 memop
= (memop
& ~MO_SIZE
) | MO_64
;
1976 case GET_ASI_DIRECT
:
1977 memop
|= MO_ALIGN_4
;
1980 d32
= gen_load_fpr_F(dc
, rd
);
1981 tcg_gen_qemu_st_i32(d32
, addr
, da
->mem_idx
, memop
| MO_ALIGN
);
1984 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
1985 memop
| MO_ALIGN_4
);
1988 /* Only 4-byte alignment required. However, it is legal for the
1989 cpu to signal the alignment fault, and the OS trap handler is
1990 required to fix it up. Requiring 16-byte alignment here avoids
1991 having to probe the second page before performing the first
1993 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
1994 memop
| MO_ALIGN_16
);
1995 addr_tmp
= tcg_temp_new();
1996 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1997 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + 1], addr_tmp
, da
->mem_idx
, memop
);
2000 g_assert_not_reached();
2005 /* Valid for stdfa on aligned registers only. */
2006 if (orig_size
== MO_64
&& (rd
& 7) == 0) {
2007 /* The first operation checks required alignment. */
2008 addr_tmp
= tcg_temp_new();
2009 for (int i
= 0; ; ++i
) {
2010 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
, da
->mem_idx
,
2011 memop
| (i
== 0 ? MO_ALIGN_64
: 0));
2015 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2019 gen_exception(dc
, TT_ILL_INSN
);
2024 /* Valid for stdfa only. */
2025 if (orig_size
== MO_64
) {
2026 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
2029 gen_exception(dc
, TT_ILL_INSN
);
2034 /* According to the table in the UA2011 manual, the only
2035 other asis that are valid for ldfa/lddfa/ldqfa are
2036 the PST* asis, which aren't currently handled. */
2037 gen_exception(dc
, TT_ILL_INSN
);
2042 static void gen_ldda_asi(DisasContext
*dc
, DisasASI
*da
, TCGv addr
, int rd
)
2044 TCGv hi
= gen_dest_gpr(dc
, rd
);
2045 TCGv lo
= gen_dest_gpr(dc
, rd
+ 1);
2051 case GET_ASI_DTWINX
:
2052 #ifdef TARGET_SPARC64
2054 MemOp mop
= (da
->memop
& MO_BSWAP
) | MO_128
| MO_ALIGN_16
;
2055 TCGv_i128 t
= tcg_temp_new_i128();
2057 tcg_gen_qemu_ld_i128(t
, addr
, da
->mem_idx
, mop
);
2059 * Note that LE twinx acts as if each 64-bit register result is
2060 * byte swapped. We perform one 128-bit LE load, so must swap
2061 * the order of the writebacks.
2063 if ((mop
& MO_BSWAP
) == MO_TE
) {
2064 tcg_gen_extr_i128_i64(lo
, hi
, t
);
2066 tcg_gen_extr_i128_i64(hi
, lo
, t
);
2071 g_assert_not_reached();
2074 case GET_ASI_DIRECT
:
2076 TCGv_i64 tmp
= tcg_temp_new_i64();
2078 tcg_gen_qemu_ld_i64(tmp
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
2080 /* Note that LE ldda acts as if each 32-bit register
2081 result is byte swapped. Having just performed one
2082 64-bit bswap, we need now to swap the writebacks. */
2083 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2084 tcg_gen_extr_i64_tl(lo
, hi
, tmp
);
2086 tcg_gen_extr_i64_tl(hi
, lo
, tmp
);
2092 /* ??? In theory we've handled all of the ASIs that are valid
2093 for ldda, and this should raise DAE_invalid_asi. However,
2094 real hardware allows others. This can be seen with e.g.
2095 FreeBSD 10.3 wrt ASI_IC_TAG. */
2097 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2098 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
);
2099 TCGv_i64 tmp
= tcg_temp_new_i64();
2102 gen_helper_ld_asi(tmp
, tcg_env
, addr
, r_asi
, r_mop
);
2105 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2106 tcg_gen_extr_i64_tl(lo
, hi
, tmp
);
2108 tcg_gen_extr_i64_tl(hi
, lo
, tmp
);
2114 gen_store_gpr(dc
, rd
, hi
);
2115 gen_store_gpr(dc
, rd
+ 1, lo
);
2118 static void gen_stda_asi(DisasContext
*dc
, DisasASI
*da
, TCGv addr
, int rd
)
2120 TCGv hi
= gen_load_gpr(dc
, rd
);
2121 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2127 case GET_ASI_DTWINX
:
2128 #ifdef TARGET_SPARC64
2130 MemOp mop
= (da
->memop
& MO_BSWAP
) | MO_128
| MO_ALIGN_16
;
2131 TCGv_i128 t
= tcg_temp_new_i128();
2134 * Note that LE twinx acts as if each 64-bit register result is
2135 * byte swapped. We perform one 128-bit LE store, so must swap
2136 * the order of the construction.
2138 if ((mop
& MO_BSWAP
) == MO_TE
) {
2139 tcg_gen_concat_i64_i128(t
, lo
, hi
);
2141 tcg_gen_concat_i64_i128(t
, hi
, lo
);
2143 tcg_gen_qemu_st_i128(t
, addr
, da
->mem_idx
, mop
);
2147 g_assert_not_reached();
2150 case GET_ASI_DIRECT
:
2152 TCGv_i64 t64
= tcg_temp_new_i64();
2154 /* Note that LE stda acts as if each 32-bit register result is
2155 byte swapped. We will perform one 64-bit LE store, so now
2156 we must swap the order of the construction. */
2157 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2158 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2160 tcg_gen_concat_tl_i64(t64
, hi
, lo
);
2162 tcg_gen_qemu_st_i64(t64
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
2167 assert(TARGET_LONG_BITS
== 32);
2168 /* Store 32 bytes of T64 to ADDR. */
2169 /* ??? The original qemu code suggests 8-byte alignment, dropping
2170 the low bits, but the only place I can see this used is in the
2171 Linux kernel with 32 byte alignment, which would make more sense
2172 as a cacheline-style operation. */
2174 TCGv_i64 t64
= tcg_temp_new_i64();
2175 TCGv d_addr
= tcg_temp_new();
2176 TCGv eight
= tcg_constant_tl(8);
2179 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2180 tcg_gen_andi_tl(d_addr
, addr
, -8);
2181 for (i
= 0; i
< 32; i
+= 8) {
2182 tcg_gen_qemu_st_i64(t64
, d_addr
, da
->mem_idx
, da
->memop
);
2183 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2189 /* ??? In theory we've handled all of the ASIs that are valid
2190 for stda, and this should raise DAE_invalid_asi. */
2192 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2193 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
);
2194 TCGv_i64 t64
= tcg_temp_new_i64();
2197 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2198 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2200 tcg_gen_concat_tl_i64(t64
, hi
, lo
);
2204 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2210 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2212 #ifdef TARGET_SPARC64
2213 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2214 TCGv_i64 c64
= tcg_temp_new_i64();
2216 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2217 or fold the comparison down to 32 bits and use movcond_i32. Choose
2219 c32
= tcg_temp_new_i32();
2220 tcg_gen_setcondi_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2221 tcg_gen_extrl_i64_i32(c32
, c64
);
2223 s1
= gen_load_fpr_F(dc
, rs
);
2224 s2
= gen_load_fpr_F(dc
, rd
);
2225 dst
= gen_dest_fpr_F(dc
);
2226 zero
= tcg_constant_i32(0);
2228 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2230 gen_store_fpr_F(dc
, rd
, dst
);
2232 qemu_build_not_reached();
2236 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2238 #ifdef TARGET_SPARC64
2239 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2240 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, tcg_constant_tl(cmp
->c2
),
2241 gen_load_fpr_D(dc
, rs
),
2242 gen_load_fpr_D(dc
, rd
));
2243 gen_store_fpr_D(dc
, rd
, dst
);
2245 qemu_build_not_reached();
2249 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2251 #ifdef TARGET_SPARC64
2252 int qd
= QFPREG(rd
);
2253 int qs
= QFPREG(rs
);
2254 TCGv c2
= tcg_constant_tl(cmp
->c2
);
2256 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, c2
,
2257 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2258 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, c2
,
2259 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2261 gen_update_fprs_dirty(dc
, qd
);
2263 qemu_build_not_reached();
2267 #ifdef TARGET_SPARC64
2268 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
)
2270 TCGv_i32 r_tl
= tcg_temp_new_i32();
2272 /* load env->tl into r_tl */
2273 tcg_gen_ld_i32(r_tl
, tcg_env
, offsetof(CPUSPARCState
, tl
));
2275 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2276 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2278 /* calculate offset to current trap state from env->ts, reuse r_tl */
2279 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2280 tcg_gen_addi_ptr(r_tsptr
, tcg_env
, offsetof(CPUSPARCState
, ts
));
2282 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2284 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2285 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2286 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2291 static int extract_dfpreg(DisasContext
*dc
, int x
)
2296 static int extract_qfpreg(DisasContext
*dc
, int x
)
2301 /* Include the auto-generated decoder. */
2302 #include "decode-insns.c.inc"
2304 #define TRANS(NAME, AVAIL, FUNC, ...) \
2305 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2306 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2308 #define avail_ALL(C) true
2309 #ifdef TARGET_SPARC64
2310 # define avail_32(C) false
2311 # define avail_ASR17(C) false
2312 # define avail_CASA(C) true
2313 # define avail_DIV(C) true
2314 # define avail_MUL(C) true
2315 # define avail_POWERDOWN(C) false
2316 # define avail_64(C) true
2317 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2318 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2319 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2320 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2322 # define avail_32(C) true
2323 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2324 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2325 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2326 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2327 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2328 # define avail_64(C) false
2329 # define avail_GL(C) false
2330 # define avail_HYPV(C) false
2331 # define avail_VIS1(C) false
2332 # define avail_VIS2(C) false
2335 /* Default case for non jump instructions. */
2336 static bool advance_pc(DisasContext
*dc
)
2345 case DYNAMIC_PC_LOOKUP
:
2347 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2348 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
2352 /* we can do a static jump */
2353 l1
= gen_new_label();
2354 tcg_gen_brcondi_tl(dc
->jump
.cond
, dc
->jump
.c1
, dc
->jump
.c2
, l1
);
2356 /* jump not taken */
2357 gen_goto_tb(dc
, 1, dc
->jump_pc
[1], dc
->jump_pc
[1] + 4);
2361 gen_goto_tb(dc
, 0, dc
->jump_pc
[0], dc
->jump_pc
[0] + 4);
2363 dc
->base
.is_jmp
= DISAS_NORETURN
;
2367 g_assert_not_reached();
2371 dc
->npc
= dc
->npc
+ 4;
2377 * Major opcodes 00 and 01 -- branches, call, and sethi
2380 static bool advance_jump_cond(DisasContext
*dc
, DisasCompare
*cmp
,
2381 bool annul
, int disp
)
2383 target_ulong dest
= address_mask_i(dc
, dc
->pc
+ disp
* 4);
2388 if (cmp
->cond
== TCG_COND_ALWAYS
) {
2399 if (cmp
->cond
== TCG_COND_NEVER
) {
2404 tcg_gen_addi_tl(cpu_pc
, cpu_pc
, 4);
2406 tcg_gen_addi_tl(cpu_npc
, cpu_pc
, 4);
2408 dc
->pc
= npc
+ (annul
? 4 : 0);
2409 dc
->npc
= dc
->pc
+ 4;
2418 TCGLabel
*l1
= gen_new_label();
2420 tcg_gen_brcondi_tl(tcg_invert_cond(cmp
->cond
), cmp
->c1
, cmp
->c2
, l1
);
2421 gen_goto_tb(dc
, 0, npc
, dest
);
2423 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
2425 dc
->base
.is_jmp
= DISAS_NORETURN
;
2430 case DYNAMIC_PC_LOOKUP
:
2431 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2432 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
2433 tcg_gen_movcond_tl(cmp
->cond
, cpu_npc
,
2434 cmp
->c1
, tcg_constant_tl(cmp
->c2
),
2435 tcg_constant_tl(dest
), cpu_npc
);
2439 g_assert_not_reached();
2445 dc
->jump_pc
[0] = dest
;
2446 dc
->jump_pc
[1] = npc
+ 4;
2448 /* The condition for cpu_cond is always NE -- normalize. */
2449 if (cmp
->cond
== TCG_COND_NE
) {
2450 tcg_gen_xori_tl(cpu_cond
, cmp
->c1
, cmp
->c2
);
2452 tcg_gen_setcondi_tl(cmp
->cond
, cpu_cond
, cmp
->c1
, cmp
->c2
);
2454 dc
->cpu_cond_live
= true;
2460 static bool raise_priv(DisasContext
*dc
)
2462 gen_exception(dc
, TT_PRIV_INSN
);
2466 static bool raise_unimpfpop(DisasContext
*dc
)
2468 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
2472 static bool gen_trap_float128(DisasContext
*dc
)
2474 if (dc
->def
->features
& CPU_FEATURE_FLOAT128
) {
2477 return raise_unimpfpop(dc
);
2480 static bool do_bpcc(DisasContext
*dc
, arg_bcc
*a
)
2484 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
2485 return advance_jump_cond(dc
, &cmp
, a
->a
, a
->i
);
2488 TRANS(Bicc
, ALL
, do_bpcc
, a
)
2489 TRANS(BPcc
, 64, do_bpcc
, a
)
2491 static bool do_fbpfcc(DisasContext
*dc
, arg_bcc
*a
)
2495 if (gen_trap_ifnofpu(dc
)) {
2498 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
2499 return advance_jump_cond(dc
, &cmp
, a
->a
, a
->i
);
2502 TRANS(FBPfcc
, 64, do_fbpfcc
, a
)
2503 TRANS(FBfcc
, ALL
, do_fbpfcc
, a
)
2505 static bool trans_BPr(DisasContext
*dc
, arg_BPr
*a
)
2509 if (!avail_64(dc
)) {
2512 if (!gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
))) {
2515 return advance_jump_cond(dc
, &cmp
, a
->a
, a
->i
);
2518 static bool trans_CALL(DisasContext
*dc
, arg_CALL
*a
)
2520 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
2522 gen_store_gpr(dc
, 15, tcg_constant_tl(dc
->pc
));
2528 static bool trans_NCP(DisasContext
*dc
, arg_NCP
*a
)
2531 * For sparc32, always generate the no-coprocessor exception.
2532 * For sparc64, always generate illegal instruction.
2534 #ifdef TARGET_SPARC64
2537 gen_exception(dc
, TT_NCP_INSN
);
2542 static bool trans_SETHI(DisasContext
*dc
, arg_SETHI
*a
)
2544 /* Special-case %g0 because that's the canonical nop. */
2546 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl((uint32_t)a
->i
<< 10));
2548 return advance_pc(dc
);
2552 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2555 static bool do_tcc(DisasContext
*dc
, int cond
, int cc
,
2556 int rs1
, bool imm
, int rs2_or_imm
)
2558 int mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2559 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2566 return advance_pc(dc
);
2570 * Immediate traps are the most common case. Since this value is
2571 * live across the branch, it really pays to evaluate the constant.
2573 if (rs1
== 0 && (imm
|| rs2_or_imm
== 0)) {
2574 trap
= tcg_constant_i32((rs2_or_imm
& mask
) + TT_TRAP
);
2576 trap
= tcg_temp_new_i32();
2577 tcg_gen_trunc_tl_i32(trap
, gen_load_gpr(dc
, rs1
));
2579 tcg_gen_addi_i32(trap
, trap
, rs2_or_imm
);
2581 TCGv_i32 t2
= tcg_temp_new_i32();
2582 tcg_gen_trunc_tl_i32(t2
, gen_load_gpr(dc
, rs2_or_imm
));
2583 tcg_gen_add_i32(trap
, trap
, t2
);
2585 tcg_gen_andi_i32(trap
, trap
, mask
);
2586 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2594 gen_helper_raise_exception(tcg_env
, trap
);
2595 dc
->base
.is_jmp
= DISAS_NORETURN
;
2599 /* Conditional trap. */
2601 lab
= delay_exceptionv(dc
, trap
);
2602 gen_compare(&cmp
, cc
, cond
, dc
);
2603 tcg_gen_brcondi_tl(cmp
.cond
, cmp
.c1
, cmp
.c2
, lab
);
2605 return advance_pc(dc
);
2608 static bool trans_Tcc_r(DisasContext
*dc
, arg_Tcc_r
*a
)
2610 if (avail_32(dc
) && a
->cc
) {
2613 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, false, a
->rs2
);
2616 static bool trans_Tcc_i_v7(DisasContext
*dc
, arg_Tcc_i_v7
*a
)
2621 return do_tcc(dc
, a
->cond
, 0, a
->rs1
, true, a
->i
);
2624 static bool trans_Tcc_i_v9(DisasContext
*dc
, arg_Tcc_i_v9
*a
)
2629 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, true, a
->i
);
2632 static bool trans_STBAR(DisasContext
*dc
, arg_STBAR
*a
)
2634 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2635 return advance_pc(dc
);
2638 static bool trans_MEMBAR(DisasContext
*dc
, arg_MEMBAR
*a
)
2644 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2645 tcg_gen_mb(a
->mmask
| TCG_BAR_SC
);
2648 /* For #Sync, etc, end the TB to recognize interrupts. */
2649 dc
->base
.is_jmp
= DISAS_EXIT
;
2651 return advance_pc(dc
);
2654 static bool do_rd_special(DisasContext
*dc
, bool priv
, int rd
,
2655 TCGv (*func
)(DisasContext
*, TCGv
))
2658 return raise_priv(dc
);
2660 gen_store_gpr(dc
, rd
, func(dc
, gen_dest_gpr(dc
, rd
)));
2661 return advance_pc(dc
);
2664 static TCGv
do_rdy(DisasContext
*dc
, TCGv dst
)
2669 static bool trans_RDY(DisasContext
*dc
, arg_RDY
*a
)
2672 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2673 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2674 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2676 if (avail_64(dc
) && a
->rs1
!= 0) {
2679 return do_rd_special(dc
, true, a
->rd
, do_rdy
);
2682 static TCGv
do_rd_leon3_config(DisasContext
*dc
, TCGv dst
)
2687 * TODO: There are many more fields to be filled,
2688 * some of which are writable.
2690 val
= dc
->def
->nwindows
- 1; /* [4:0] NWIN */
2691 val
|= 1 << 8; /* [8] V8 */
2693 return tcg_constant_tl(val
);
2696 TRANS(RDASR17
, ASR17
, do_rd_special
, true, a
->rd
, do_rd_leon3_config
)
2698 static TCGv
do_rdccr(DisasContext
*dc
, TCGv dst
)
2700 gen_helper_rdccr(dst
, tcg_env
);
2704 TRANS(RDCCR
, 64, do_rd_special
, true, a
->rd
, do_rdccr
)
2706 static TCGv
do_rdasi(DisasContext
*dc
, TCGv dst
)
2708 #ifdef TARGET_SPARC64
2709 return tcg_constant_tl(dc
->asi
);
2711 qemu_build_not_reached();
2715 TRANS(RDASI
, 64, do_rd_special
, true, a
->rd
, do_rdasi
)
2717 static TCGv
do_rdtick(DisasContext
*dc
, TCGv dst
)
2719 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
2721 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
2722 if (translator_io_start(&dc
->base
)) {
2723 dc
->base
.is_jmp
= DISAS_EXIT
;
2725 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
2726 tcg_constant_i32(dc
->mem_idx
));
2730 /* TODO: non-priv access only allowed when enabled. */
2731 TRANS(RDTICK
, 64, do_rd_special
, true, a
->rd
, do_rdtick
)
2733 static TCGv
do_rdpc(DisasContext
*dc
, TCGv dst
)
2735 return tcg_constant_tl(address_mask_i(dc
, dc
->pc
));
2738 TRANS(RDPC
, 64, do_rd_special
, true, a
->rd
, do_rdpc
)
2740 static TCGv
do_rdfprs(DisasContext
*dc
, TCGv dst
)
2742 tcg_gen_ext_i32_tl(dst
, cpu_fprs
);
2746 TRANS(RDFPRS
, 64, do_rd_special
, true, a
->rd
, do_rdfprs
)
2748 static TCGv
do_rdgsr(DisasContext
*dc
, TCGv dst
)
2750 gen_trap_ifnofpu(dc
);
2754 TRANS(RDGSR
, 64, do_rd_special
, true, a
->rd
, do_rdgsr
)
2756 static TCGv
do_rdsoftint(DisasContext
*dc
, TCGv dst
)
2758 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(softint
));
2762 TRANS(RDSOFTINT
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdsoftint
)
2764 static TCGv
do_rdtick_cmpr(DisasContext
*dc
, TCGv dst
)
2766 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(tick_cmpr
));
2770 /* TODO: non-priv access only allowed when enabled. */
2771 TRANS(RDTICK_CMPR
, 64, do_rd_special
, true, a
->rd
, do_rdtick_cmpr
)
2773 static TCGv
do_rdstick(DisasContext
*dc
, TCGv dst
)
2775 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
2777 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
2778 if (translator_io_start(&dc
->base
)) {
2779 dc
->base
.is_jmp
= DISAS_EXIT
;
2781 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
2782 tcg_constant_i32(dc
->mem_idx
));
2786 /* TODO: non-priv access only allowed when enabled. */
2787 TRANS(RDSTICK
, 64, do_rd_special
, true, a
->rd
, do_rdstick
)
2789 static TCGv
do_rdstick_cmpr(DisasContext
*dc
, TCGv dst
)
2791 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(stick_cmpr
));
2795 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2796 TRANS(RDSTICK_CMPR
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdstick_cmpr
)
2799 * UltraSPARC-T1 Strand status.
2800 * HYPV check maybe not enough, UA2005 & UA2007 describe
2801 * this ASR as impl. dep
2803 static TCGv
do_rdstrand_status(DisasContext
*dc
, TCGv dst
)
2805 return tcg_constant_tl(1);
2808 TRANS(RDSTRAND_STATUS
, HYPV
, do_rd_special
, true, a
->rd
, do_rdstrand_status
)
2810 static TCGv
do_rdpsr(DisasContext
*dc
, TCGv dst
)
2812 gen_helper_rdpsr(dst
, tcg_env
);
2816 TRANS(RDPSR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpsr
)
2818 static TCGv
do_rdhpstate(DisasContext
*dc
, TCGv dst
)
2820 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hpstate
));
2824 TRANS(RDHPR_hpstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhpstate
)
2826 static TCGv
do_rdhtstate(DisasContext
*dc
, TCGv dst
)
2828 TCGv_i32 tl
= tcg_temp_new_i32();
2829 TCGv_ptr tp
= tcg_temp_new_ptr();
2831 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
2832 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
2833 tcg_gen_shli_i32(tl
, tl
, 3);
2834 tcg_gen_ext_i32_ptr(tp
, tl
);
2835 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
2837 tcg_gen_ld_tl(dst
, tp
, env64_field_offsetof(htstate
));
2841 TRANS(RDHPR_htstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtstate
)
2843 static TCGv
do_rdhintp(DisasContext
*dc
, TCGv dst
)
2845 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hintp
));
2849 TRANS(RDHPR_hintp
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhintp
)
2851 static TCGv
do_rdhtba(DisasContext
*dc
, TCGv dst
)
2853 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(htba
));
2857 TRANS(RDHPR_htba
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtba
)
2859 static TCGv
do_rdhver(DisasContext
*dc
, TCGv dst
)
2861 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hver
));
2865 TRANS(RDHPR_hver
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhver
)
2867 static TCGv
do_rdhstick_cmpr(DisasContext
*dc
, TCGv dst
)
2869 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
2873 TRANS(RDHPR_hstick_cmpr
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
,
2876 static TCGv
do_rdwim(DisasContext
*dc
, TCGv dst
)
2878 tcg_gen_ld_tl(dst
, tcg_env
, env32_field_offsetof(wim
));
2882 TRANS(RDWIM
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwim
)
2884 static TCGv
do_rdtpc(DisasContext
*dc
, TCGv dst
)
2886 #ifdef TARGET_SPARC64
2887 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2889 gen_load_trap_state_at_tl(r_tsptr
);
2890 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tpc
));
2893 qemu_build_not_reached();
2897 TRANS(RDPR_tpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtpc
)
2899 static TCGv
do_rdtnpc(DisasContext
*dc
, TCGv dst
)
2901 #ifdef TARGET_SPARC64
2902 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2904 gen_load_trap_state_at_tl(r_tsptr
);
2905 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tnpc
));
2908 qemu_build_not_reached();
2912 TRANS(RDPR_tnpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtnpc
)
2914 static TCGv
do_rdtstate(DisasContext
*dc
, TCGv dst
)
2916 #ifdef TARGET_SPARC64
2917 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2919 gen_load_trap_state_at_tl(r_tsptr
);
2920 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tstate
));
2923 qemu_build_not_reached();
2927 TRANS(RDPR_tstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtstate
)
2929 static TCGv
do_rdtt(DisasContext
*dc
, TCGv dst
)
2931 #ifdef TARGET_SPARC64
2932 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2934 gen_load_trap_state_at_tl(r_tsptr
);
2935 tcg_gen_ld32s_tl(dst
, r_tsptr
, offsetof(trap_state
, tt
));
2938 qemu_build_not_reached();
2942 TRANS(RDPR_tt
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtt
)
2943 TRANS(RDPR_tick
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtick
)
2945 static TCGv
do_rdtba(DisasContext
*dc
, TCGv dst
)
2950 TRANS(RDTBR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
2951 TRANS(RDPR_tba
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
2953 static TCGv
do_rdpstate(DisasContext
*dc
, TCGv dst
)
2955 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(pstate
));
2959 TRANS(RDPR_pstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpstate
)
2961 static TCGv
do_rdtl(DisasContext
*dc
, TCGv dst
)
2963 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(tl
));
2967 TRANS(RDPR_tl
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtl
)
2969 static TCGv
do_rdpil(DisasContext
*dc
, TCGv dst
)
2971 tcg_gen_ld32s_tl(dst
, tcg_env
, env_field_offsetof(psrpil
));
2975 TRANS(RDPR_pil
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpil
)
2977 static TCGv
do_rdcwp(DisasContext
*dc
, TCGv dst
)
2979 gen_helper_rdcwp(dst
, tcg_env
);
2983 TRANS(RDPR_cwp
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcwp
)
2985 static TCGv
do_rdcansave(DisasContext
*dc
, TCGv dst
)
2987 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cansave
));
2991 TRANS(RDPR_cansave
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcansave
)
2993 static TCGv
do_rdcanrestore(DisasContext
*dc
, TCGv dst
)
2995 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(canrestore
));
2999 TRANS(RDPR_canrestore
, 64, do_rd_special
, supervisor(dc
), a
->rd
,
3002 static TCGv
do_rdcleanwin(DisasContext
*dc
, TCGv dst
)
3004 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cleanwin
));
3008 TRANS(RDPR_cleanwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcleanwin
)
3010 static TCGv
do_rdotherwin(DisasContext
*dc
, TCGv dst
)
3012 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(otherwin
));
3016 TRANS(RDPR_otherwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdotherwin
)
3018 static TCGv
do_rdwstate(DisasContext
*dc
, TCGv dst
)
3020 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(wstate
));
3024 TRANS(RDPR_wstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwstate
)
3026 static TCGv
do_rdgl(DisasContext
*dc
, TCGv dst
)
3028 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(gl
));
3032 TRANS(RDPR_gl
, GL
, do_rd_special
, supervisor(dc
), a
->rd
, do_rdgl
)
3034 /* UA2005 strand status */
3035 static TCGv
do_rdssr(DisasContext
*dc
, TCGv dst
)
3037 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(ssr
));
3041 TRANS(RDPR_strand_status
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdssr
)
3043 static TCGv
do_rdver(DisasContext
*dc
, TCGv dst
)
3045 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(version
));
3049 TRANS(RDPR_ver
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdver
)
3051 static bool trans_FLUSHW(DisasContext
*dc
, arg_FLUSHW
*a
)
3054 gen_helper_flushw(tcg_env
);
3055 return advance_pc(dc
);
3060 static bool do_wr_special(DisasContext
*dc
, arg_r_r_ri
*a
, bool priv
,
3061 void (*func
)(DisasContext
*, TCGv
))
3065 /* For simplicity, we under-decoded the rs2 form. */
3066 if (!a
->imm
&& (a
->rs2_or_imm
& ~0x1f)) {
3070 return raise_priv(dc
);
3073 if (a
->rs1
== 0 && (a
->imm
|| a
->rs2_or_imm
== 0)) {
3074 src
= tcg_constant_tl(a
->rs2_or_imm
);
3076 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
3077 if (a
->rs2_or_imm
== 0) {
3080 src
= tcg_temp_new();
3082 tcg_gen_xori_tl(src
, src1
, a
->rs2_or_imm
);
3084 tcg_gen_xor_tl(src
, src1
, gen_load_gpr(dc
, a
->rs2_or_imm
));
3089 return advance_pc(dc
);
3092 static void do_wry(DisasContext
*dc
, TCGv src
)
3094 tcg_gen_ext32u_tl(cpu_y
, src
);
3097 TRANS(WRY
, ALL
, do_wr_special
, a
, true, do_wry
)
3099 static void do_wrccr(DisasContext
*dc
, TCGv src
)
3101 gen_helper_wrccr(tcg_env
, src
);
3104 TRANS(WRCCR
, 64, do_wr_special
, a
, true, do_wrccr
)
3106 static void do_wrasi(DisasContext
*dc
, TCGv src
)
3108 TCGv tmp
= tcg_temp_new();
3110 tcg_gen_ext8u_tl(tmp
, src
);
3111 tcg_gen_st32_tl(tmp
, tcg_env
, env64_field_offsetof(asi
));
3112 /* End TB to notice changed ASI. */
3113 dc
->base
.is_jmp
= DISAS_EXIT
;
3116 TRANS(WRASI
, 64, do_wr_special
, a
, true, do_wrasi
)
3118 static void do_wrfprs(DisasContext
*dc
, TCGv src
)
3120 #ifdef TARGET_SPARC64
3121 tcg_gen_trunc_tl_i32(cpu_fprs
, src
);
3123 dc
->base
.is_jmp
= DISAS_EXIT
;
3125 qemu_build_not_reached();
3129 TRANS(WRFPRS
, 64, do_wr_special
, a
, true, do_wrfprs
)
3131 static void do_wrgsr(DisasContext
*dc
, TCGv src
)
3133 gen_trap_ifnofpu(dc
);
3134 tcg_gen_mov_tl(cpu_gsr
, src
);
3137 TRANS(WRGSR
, 64, do_wr_special
, a
, true, do_wrgsr
)
3139 static void do_wrsoftint_set(DisasContext
*dc
, TCGv src
)
3141 gen_helper_set_softint(tcg_env
, src
);
3144 TRANS(WRSOFTINT_SET
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_set
)
3146 static void do_wrsoftint_clr(DisasContext
*dc
, TCGv src
)
3148 gen_helper_clear_softint(tcg_env
, src
);
3151 TRANS(WRSOFTINT_CLR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_clr
)
3153 static void do_wrsoftint(DisasContext
*dc
, TCGv src
)
3155 gen_helper_write_softint(tcg_env
, src
);
3158 TRANS(WRSOFTINT
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint
)
3160 static void do_wrtick_cmpr(DisasContext
*dc
, TCGv src
)
3162 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3164 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(tick_cmpr
));
3165 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3166 translator_io_start(&dc
->base
);
3167 gen_helper_tick_set_limit(r_tickptr
, src
);
3168 /* End TB to handle timer interrupt */
3169 dc
->base
.is_jmp
= DISAS_EXIT
;
3172 TRANS(WRTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick_cmpr
)
3174 static void do_wrstick(DisasContext
*dc
, TCGv src
)
3176 #ifdef TARGET_SPARC64
3177 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3179 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, offsetof(CPUSPARCState
, stick
));
3180 translator_io_start(&dc
->base
);
3181 gen_helper_tick_set_count(r_tickptr
, src
);
3182 /* End TB to handle timer interrupt */
3183 dc
->base
.is_jmp
= DISAS_EXIT
;
3185 qemu_build_not_reached();
3189 TRANS(WRSTICK
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick
)
3191 static void do_wrstick_cmpr(DisasContext
*dc
, TCGv src
)
3193 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3195 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(stick_cmpr
));
3196 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
3197 translator_io_start(&dc
->base
);
3198 gen_helper_tick_set_limit(r_tickptr
, src
);
3199 /* End TB to handle timer interrupt */
3200 dc
->base
.is_jmp
= DISAS_EXIT
;
3203 TRANS(WRSTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick_cmpr
)
3205 static void do_wrpowerdown(DisasContext
*dc
, TCGv src
)
3209 gen_helper_power_down(tcg_env
);
3212 TRANS(WRPOWERDOWN
, POWERDOWN
, do_wr_special
, a
, supervisor(dc
), do_wrpowerdown
)
3214 static void do_wrpsr(DisasContext
*dc
, TCGv src
)
3216 gen_helper_wrpsr(tcg_env
, src
);
3217 dc
->base
.is_jmp
= DISAS_EXIT
;
3220 TRANS(WRPSR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrpsr
)
3222 static void do_wrwim(DisasContext
*dc
, TCGv src
)
3224 target_ulong mask
= MAKE_64BIT_MASK(0, dc
->def
->nwindows
);
3225 TCGv tmp
= tcg_temp_new();
3227 tcg_gen_andi_tl(tmp
, src
, mask
);
3228 tcg_gen_st_tl(tmp
, tcg_env
, env32_field_offsetof(wim
));
3231 TRANS(WRWIM
, 32, do_wr_special
, a
, supervisor(dc
), do_wrwim
)
3233 static void do_wrtpc(DisasContext
*dc
, TCGv src
)
3235 #ifdef TARGET_SPARC64
3236 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3238 gen_load_trap_state_at_tl(r_tsptr
);
3239 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tpc
));
3241 qemu_build_not_reached();
3245 TRANS(WRPR_tpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtpc
)
3247 static void do_wrtnpc(DisasContext
*dc
, TCGv src
)
3249 #ifdef TARGET_SPARC64
3250 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3252 gen_load_trap_state_at_tl(r_tsptr
);
3253 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tnpc
));
3255 qemu_build_not_reached();
3259 TRANS(WRPR_tnpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtnpc
)
3261 static void do_wrtstate(DisasContext
*dc
, TCGv src
)
3263 #ifdef TARGET_SPARC64
3264 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3266 gen_load_trap_state_at_tl(r_tsptr
);
3267 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tstate
));
3269 qemu_build_not_reached();
3273 TRANS(WRPR_tstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtstate
)
3275 static void do_wrtt(DisasContext
*dc
, TCGv src
)
3277 #ifdef TARGET_SPARC64
3278 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3280 gen_load_trap_state_at_tl(r_tsptr
);
3281 tcg_gen_st32_tl(src
, r_tsptr
, offsetof(trap_state
, tt
));
3283 qemu_build_not_reached();
3287 TRANS(WRPR_tt
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtt
)
3289 static void do_wrtick(DisasContext
*dc
, TCGv src
)
3291 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3293 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3294 translator_io_start(&dc
->base
);
3295 gen_helper_tick_set_count(r_tickptr
, src
);
3296 /* End TB to handle timer interrupt */
3297 dc
->base
.is_jmp
= DISAS_EXIT
;
3300 TRANS(WRPR_tick
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick
)
3302 static void do_wrtba(DisasContext
*dc
, TCGv src
)
3304 tcg_gen_mov_tl(cpu_tbr
, src
);
3307 TRANS(WRPR_tba
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3309 static void do_wrpstate(DisasContext
*dc
, TCGv src
)
3312 if (translator_io_start(&dc
->base
)) {
3313 dc
->base
.is_jmp
= DISAS_EXIT
;
3315 gen_helper_wrpstate(tcg_env
, src
);
3316 dc
->npc
= DYNAMIC_PC
;
3319 TRANS(WRPR_pstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpstate
)
3321 static void do_wrtl(DisasContext
*dc
, TCGv src
)
3324 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(tl
));
3325 dc
->npc
= DYNAMIC_PC
;
3328 TRANS(WRPR_tl
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtl
)
3330 static void do_wrpil(DisasContext
*dc
, TCGv src
)
3332 if (translator_io_start(&dc
->base
)) {
3333 dc
->base
.is_jmp
= DISAS_EXIT
;
3335 gen_helper_wrpil(tcg_env
, src
);
3338 TRANS(WRPR_pil
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpil
)
3340 static void do_wrcwp(DisasContext
*dc
, TCGv src
)
3342 gen_helper_wrcwp(tcg_env
, src
);
3345 TRANS(WRPR_cwp
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcwp
)
3347 static void do_wrcansave(DisasContext
*dc
, TCGv src
)
3349 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cansave
));
3352 TRANS(WRPR_cansave
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcansave
)
3354 static void do_wrcanrestore(DisasContext
*dc
, TCGv src
)
3356 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(canrestore
));
3359 TRANS(WRPR_canrestore
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcanrestore
)
3361 static void do_wrcleanwin(DisasContext
*dc
, TCGv src
)
3363 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cleanwin
));
3366 TRANS(WRPR_cleanwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcleanwin
)
3368 static void do_wrotherwin(DisasContext
*dc
, TCGv src
)
3370 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(otherwin
));
3373 TRANS(WRPR_otherwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrotherwin
)
3375 static void do_wrwstate(DisasContext
*dc
, TCGv src
)
3377 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(wstate
));
3380 TRANS(WRPR_wstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrwstate
)
3382 static void do_wrgl(DisasContext
*dc
, TCGv src
)
3384 gen_helper_wrgl(tcg_env
, src
);
3387 TRANS(WRPR_gl
, GL
, do_wr_special
, a
, supervisor(dc
), do_wrgl
)
3389 /* UA2005 strand status */
3390 static void do_wrssr(DisasContext
*dc
, TCGv src
)
3392 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(ssr
));
3395 TRANS(WRPR_strand_status
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrssr
)
3397 TRANS(WRTBR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3399 static void do_wrhpstate(DisasContext
*dc
, TCGv src
)
3401 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hpstate
));
3402 dc
->base
.is_jmp
= DISAS_EXIT
;
3405 TRANS(WRHPR_hpstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhpstate
)
3407 static void do_wrhtstate(DisasContext
*dc
, TCGv src
)
3409 TCGv_i32 tl
= tcg_temp_new_i32();
3410 TCGv_ptr tp
= tcg_temp_new_ptr();
3412 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
3413 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
3414 tcg_gen_shli_i32(tl
, tl
, 3);
3415 tcg_gen_ext_i32_ptr(tp
, tl
);
3416 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
3418 tcg_gen_st_tl(src
, tp
, env64_field_offsetof(htstate
));
3421 TRANS(WRHPR_htstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtstate
)
3423 static void do_wrhintp(DisasContext
*dc
, TCGv src
)
3425 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hintp
));
3428 TRANS(WRHPR_hintp
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhintp
)
3430 static void do_wrhtba(DisasContext
*dc
, TCGv src
)
3432 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(htba
));
3435 TRANS(WRHPR_htba
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtba
)
3437 static void do_wrhstick_cmpr(DisasContext
*dc
, TCGv src
)
3439 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3441 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
3442 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(hstick
));
3443 translator_io_start(&dc
->base
);
3444 gen_helper_tick_set_limit(r_tickptr
, src
);
3445 /* End TB to handle timer interrupt */
3446 dc
->base
.is_jmp
= DISAS_EXIT
;
3449 TRANS(WRHPR_hstick_cmpr
, HYPV
, do_wr_special
, a
, hypervisor(dc
),
3452 static bool do_saved_restored(DisasContext
*dc
, bool saved
)
3454 if (!supervisor(dc
)) {
3455 return raise_priv(dc
);
3458 gen_helper_saved(tcg_env
);
3460 gen_helper_restored(tcg_env
);
3462 return advance_pc(dc
);
3465 TRANS(SAVED
, 64, do_saved_restored
, true)
3466 TRANS(RESTORED
, 64, do_saved_restored
, false)
3468 static bool trans_NOP(DisasContext
*dc
, arg_NOP
*a
)
3470 return advance_pc(dc
);
3474 * TODO: Need a feature bit for sparcv8.
3475 * In the meantime, treat all 32-bit cpus like sparcv7.
3477 TRANS(NOP_v7
, 32, trans_NOP
, a
)
3478 TRANS(NOP_v9
, 64, trans_NOP
, a
)
3480 static bool do_arith_int(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3481 void (*func
)(TCGv
, TCGv
, TCGv
),
3482 void (*funci
)(TCGv
, TCGv
, target_long
),
3487 /* For simplicity, we under-decoded the rs2 form. */
3488 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3495 dst
= gen_dest_gpr(dc
, a
->rd
);
3497 src1
= gen_load_gpr(dc
, a
->rs1
);
3499 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3501 funci(dst
, src1
, a
->rs2_or_imm
);
3503 func(dst
, src1
, tcg_constant_tl(a
->rs2_or_imm
));
3506 func(dst
, src1
, cpu_regs
[a
->rs2_or_imm
]);
3510 if (TARGET_LONG_BITS
== 64) {
3511 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
3512 tcg_gen_movi_tl(cpu_icc_C
, 0);
3514 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
3515 tcg_gen_movi_tl(cpu_cc_C
, 0);
3516 tcg_gen_movi_tl(cpu_cc_V
, 0);
3519 gen_store_gpr(dc
, a
->rd
, dst
);
3520 return advance_pc(dc
);
3523 static bool do_arith(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3524 void (*func
)(TCGv
, TCGv
, TCGv
),
3525 void (*funci
)(TCGv
, TCGv
, target_long
),
3526 void (*func_cc
)(TCGv
, TCGv
, TCGv
))
3529 return do_arith_int(dc
, a
, func_cc
, NULL
, false);
3531 return do_arith_int(dc
, a
, func
, funci
, false);
3534 static bool do_logic(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3535 void (*func
)(TCGv
, TCGv
, TCGv
),
3536 void (*funci
)(TCGv
, TCGv
, target_long
))
3538 return do_arith_int(dc
, a
, func
, funci
, a
->cc
);
3541 TRANS(ADD
, ALL
, do_arith
, a
, tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_addcc
)
3542 TRANS(SUB
, ALL
, do_arith
, a
, tcg_gen_sub_tl
, tcg_gen_subi_tl
, gen_op_subcc
)
3543 TRANS(ADDC
, ALL
, do_arith
, a
, gen_op_addc
, NULL
, gen_op_addccc
)
3544 TRANS(SUBC
, ALL
, do_arith
, a
, gen_op_subc
, NULL
, gen_op_subccc
)
3546 TRANS(TADDcc
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_taddcc
)
3547 TRANS(TSUBcc
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_tsubcc
)
3548 TRANS(TADDccTV
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_taddcctv
)
3549 TRANS(TSUBccTV
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_tsubcctv
)
3551 TRANS(AND
, ALL
, do_logic
, a
, tcg_gen_and_tl
, tcg_gen_andi_tl
)
3552 TRANS(XOR
, ALL
, do_logic
, a
, tcg_gen_xor_tl
, tcg_gen_xori_tl
)
3553 TRANS(ANDN
, ALL
, do_logic
, a
, tcg_gen_andc_tl
, NULL
)
3554 TRANS(ORN
, ALL
, do_logic
, a
, tcg_gen_orc_tl
, NULL
)
3555 TRANS(XORN
, ALL
, do_logic
, a
, tcg_gen_eqv_tl
, NULL
)
3557 TRANS(MULX
, 64, do_arith
, a
, tcg_gen_mul_tl
, tcg_gen_muli_tl
, NULL
)
3558 TRANS(UMUL
, MUL
, do_logic
, a
, gen_op_umul
, NULL
)
3559 TRANS(SMUL
, MUL
, do_logic
, a
, gen_op_smul
, NULL
)
3560 TRANS(MULScc
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_mulscc
)
3562 TRANS(UDIVcc
, DIV
, do_arith
, a
, NULL
, NULL
, gen_op_udivcc
)
3563 TRANS(SDIV
, DIV
, do_arith
, a
, gen_op_sdiv
, NULL
, gen_op_sdivcc
)
3565 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3566 TRANS(POPC
, 64, do_arith
, a
, gen_op_popc
, NULL
, NULL
)
3568 static bool trans_OR(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
3570 /* OR with %g0 is the canonical alias for MOV. */
3571 if (!a
->cc
&& a
->rs1
== 0) {
3572 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3573 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl(a
->rs2_or_imm
));
3574 } else if (a
->rs2_or_imm
& ~0x1f) {
3575 /* For simplicity, we under-decoded the rs2 form. */
3578 gen_store_gpr(dc
, a
->rd
, cpu_regs
[a
->rs2_or_imm
]);
3580 return advance_pc(dc
);
3582 return do_logic(dc
, a
, tcg_gen_or_tl
, tcg_gen_ori_tl
);
3585 static bool trans_UDIV(DisasContext
*dc
, arg_r_r_ri
*a
)
3590 if (!avail_DIV(dc
)) {
3593 /* For simplicity, we under-decoded the rs2 form. */
3594 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3598 if (unlikely(a
->rs2_or_imm
== 0)) {
3599 gen_exception(dc
, TT_DIV_ZERO
);
3604 t2
= tcg_constant_i64((uint32_t)a
->rs2_or_imm
);
3612 n2
= tcg_temp_new_i32();
3613 tcg_gen_trunc_tl_i32(n2
, cpu_regs
[a
->rs2_or_imm
]);
3615 lab
= delay_exception(dc
, TT_DIV_ZERO
);
3616 tcg_gen_brcondi_i32(TCG_COND_EQ
, n2
, 0, lab
);
3618 t2
= tcg_temp_new_i64();
3619 #ifdef TARGET_SPARC64
3620 tcg_gen_ext32u_i64(t2
, cpu_regs
[a
->rs2_or_imm
]);
3622 tcg_gen_extu_i32_i64(t2
, cpu_regs
[a
->rs2_or_imm
]);
3626 t1
= tcg_temp_new_i64();
3627 tcg_gen_concat_tl_i64(t1
, gen_load_gpr(dc
, a
->rs1
), cpu_y
);
3629 tcg_gen_divu_i64(t1
, t1
, t2
);
3630 tcg_gen_umin_i64(t1
, t1
, tcg_constant_i64(UINT32_MAX
));
3632 dst
= gen_dest_gpr(dc
, a
->rd
);
3633 tcg_gen_trunc_i64_tl(dst
, t1
);
3634 gen_store_gpr(dc
, a
->rd
, dst
);
3635 return advance_pc(dc
);
3638 static bool trans_UDIVX(DisasContext
*dc
, arg_r_r_ri
*a
)
3640 TCGv dst
, src1
, src2
;
3642 if (!avail_64(dc
)) {
3645 /* For simplicity, we under-decoded the rs2 form. */
3646 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3650 if (unlikely(a
->rs2_or_imm
== 0)) {
3651 gen_exception(dc
, TT_DIV_ZERO
);
3656 src2
= tcg_constant_tl(a
->rs2_or_imm
);
3663 lab
= delay_exception(dc
, TT_DIV_ZERO
);
3664 src2
= cpu_regs
[a
->rs2_or_imm
];
3665 tcg_gen_brcondi_tl(TCG_COND_EQ
, src2
, 0, lab
);
3668 dst
= gen_dest_gpr(dc
, a
->rd
);
3669 src1
= gen_load_gpr(dc
, a
->rs1
);
3671 tcg_gen_divu_tl(dst
, src1
, src2
);
3672 gen_store_gpr(dc
, a
->rd
, dst
);
3673 return advance_pc(dc
);
3676 static bool trans_SDIVX(DisasContext
*dc
, arg_r_r_ri
*a
)
3678 TCGv dst
, src1
, src2
;
3680 if (!avail_64(dc
)) {
3683 /* For simplicity, we under-decoded the rs2 form. */
3684 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3688 if (unlikely(a
->rs2_or_imm
== 0)) {
3689 gen_exception(dc
, TT_DIV_ZERO
);
3693 dst
= gen_dest_gpr(dc
, a
->rd
);
3694 src1
= gen_load_gpr(dc
, a
->rs1
);
3697 if (unlikely(a
->rs2_or_imm
== -1)) {
3698 tcg_gen_neg_tl(dst
, src1
);
3699 gen_store_gpr(dc
, a
->rd
, dst
);
3700 return advance_pc(dc
);
3702 src2
= tcg_constant_tl(a
->rs2_or_imm
);
3710 lab
= delay_exception(dc
, TT_DIV_ZERO
);
3711 src2
= cpu_regs
[a
->rs2_or_imm
];
3712 tcg_gen_brcondi_tl(TCG_COND_EQ
, src2
, 0, lab
);
3715 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3716 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3718 t1
= tcg_temp_new();
3719 t2
= tcg_temp_new();
3720 tcg_gen_setcondi_tl(TCG_COND_EQ
, t1
, src1
, (target_long
)INT64_MIN
);
3721 tcg_gen_setcondi_tl(TCG_COND_EQ
, t2
, src2
, -1);
3722 tcg_gen_and_tl(t1
, t1
, t2
);
3723 tcg_gen_movcond_tl(TCG_COND_NE
, t1
, t1
, tcg_constant_tl(0),
3724 tcg_constant_tl(1), src2
);
3728 tcg_gen_div_tl(dst
, src1
, src2
);
3729 gen_store_gpr(dc
, a
->rd
, dst
);
3730 return advance_pc(dc
);
3733 static bool gen_edge(DisasContext
*dc
, arg_r_r_r
*a
,
3734 int width
, bool cc
, bool left
)
3736 TCGv dst
, s1
, s2
, lo1
, lo2
;
3737 uint64_t amask
, tabl
, tabr
;
3738 int shift
, imask
, omask
;
3740 dst
= gen_dest_gpr(dc
, a
->rd
);
3741 s1
= gen_load_gpr(dc
, a
->rs1
);
3742 s2
= gen_load_gpr(dc
, a
->rs2
);
3745 gen_op_subcc(cpu_cc_N
, s1
, s2
);
3749 * Theory of operation: there are two tables, left and right (not to
3750 * be confused with the left and right versions of the opcode). These
3751 * are indexed by the low 3 bits of the inputs. To make things "easy",
3752 * these tables are loaded into two constants, TABL and TABR below.
3753 * The operation index = (input & imask) << shift calculates the index
3754 * into the constant, while val = (table >> index) & omask calculates
3755 * the value we're looking for.
3763 tabl
= 0x80c0e0f0f8fcfeffULL
;
3764 tabr
= 0xff7f3f1f0f070301ULL
;
3766 tabl
= 0x0103070f1f3f7fffULL
;
3767 tabr
= 0xfffefcf8f0e0c080ULL
;
3787 tabl
= (2 << 2) | 3;
3788 tabr
= (3 << 2) | 1;
3790 tabl
= (1 << 2) | 3;
3791 tabr
= (3 << 2) | 2;
3798 lo1
= tcg_temp_new();
3799 lo2
= tcg_temp_new();
3800 tcg_gen_andi_tl(lo1
, s1
, imask
);
3801 tcg_gen_andi_tl(lo2
, s2
, imask
);
3802 tcg_gen_shli_tl(lo1
, lo1
, shift
);
3803 tcg_gen_shli_tl(lo2
, lo2
, shift
);
3805 tcg_gen_shr_tl(lo1
, tcg_constant_tl(tabl
), lo1
);
3806 tcg_gen_shr_tl(lo2
, tcg_constant_tl(tabr
), lo2
);
3807 tcg_gen_andi_tl(lo1
, lo1
, omask
);
3808 tcg_gen_andi_tl(lo2
, lo2
, omask
);
3810 amask
= address_mask_i(dc
, -8);
3811 tcg_gen_andi_tl(s1
, s1
, amask
);
3812 tcg_gen_andi_tl(s2
, s2
, amask
);
3814 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3815 tcg_gen_and_tl(lo2
, lo2
, lo1
);
3816 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, s1
, s2
, lo1
, lo2
);
3818 gen_store_gpr(dc
, a
->rd
, dst
);
3819 return advance_pc(dc
);
3822 TRANS(EDGE8cc
, VIS1
, gen_edge
, a
, 8, 1, 0)
3823 TRANS(EDGE8Lcc
, VIS1
, gen_edge
, a
, 8, 1, 1)
3824 TRANS(EDGE16cc
, VIS1
, gen_edge
, a
, 16, 1, 0)
3825 TRANS(EDGE16Lcc
, VIS1
, gen_edge
, a
, 16, 1, 1)
3826 TRANS(EDGE32cc
, VIS1
, gen_edge
, a
, 32, 1, 0)
3827 TRANS(EDGE32Lcc
, VIS1
, gen_edge
, a
, 32, 1, 1)
3829 TRANS(EDGE8N
, VIS2
, gen_edge
, a
, 8, 0, 0)
3830 TRANS(EDGE8LN
, VIS2
, gen_edge
, a
, 8, 0, 1)
3831 TRANS(EDGE16N
, VIS2
, gen_edge
, a
, 16, 0, 0)
3832 TRANS(EDGE16LN
, VIS2
, gen_edge
, a
, 16, 0, 1)
3833 TRANS(EDGE32N
, VIS2
, gen_edge
, a
, 32, 0, 0)
3834 TRANS(EDGE32LN
, VIS2
, gen_edge
, a
, 32, 0, 1)
3836 static bool do_rrr(DisasContext
*dc
, arg_r_r_r
*a
,
3837 void (*func
)(TCGv
, TCGv
, TCGv
))
3839 TCGv dst
= gen_dest_gpr(dc
, a
->rd
);
3840 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
3841 TCGv src2
= gen_load_gpr(dc
, a
->rs2
);
3843 func(dst
, src1
, src2
);
3844 gen_store_gpr(dc
, a
->rd
, dst
);
3845 return advance_pc(dc
);
3848 TRANS(ARRAY8
, VIS1
, do_rrr
, a
, gen_helper_array8
)
3849 TRANS(ARRAY16
, VIS1
, do_rrr
, a
, gen_op_array16
)
3850 TRANS(ARRAY32
, VIS1
, do_rrr
, a
, gen_op_array32
)
3852 static void gen_op_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
)
3854 #ifdef TARGET_SPARC64
3855 TCGv tmp
= tcg_temp_new();
3857 tcg_gen_add_tl(tmp
, s1
, s2
);
3858 tcg_gen_andi_tl(dst
, tmp
, -8);
3859 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
3861 g_assert_not_reached();
3865 static void gen_op_alignaddrl(TCGv dst
, TCGv s1
, TCGv s2
)
3867 #ifdef TARGET_SPARC64
3868 TCGv tmp
= tcg_temp_new();
3870 tcg_gen_add_tl(tmp
, s1
, s2
);
3871 tcg_gen_andi_tl(dst
, tmp
, -8);
3872 tcg_gen_neg_tl(tmp
, tmp
);
3873 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
3875 g_assert_not_reached();
3879 TRANS(ALIGNADDR
, VIS1
, do_rrr
, a
, gen_op_alignaddr
)
3880 TRANS(ALIGNADDRL
, VIS1
, do_rrr
, a
, gen_op_alignaddrl
)
3882 static void gen_op_bmask(TCGv dst
, TCGv s1
, TCGv s2
)
3884 #ifdef TARGET_SPARC64
3885 tcg_gen_add_tl(dst
, s1
, s2
);
3886 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, dst
, 32, 32);
3888 g_assert_not_reached();
3892 TRANS(BMASK
, VIS2
, do_rrr
, a
, gen_op_bmask
)
3894 static bool do_shift_r(DisasContext
*dc
, arg_shiftr
*a
, bool l
, bool u
)
3896 TCGv dst
, src1
, src2
;
3898 /* Reject 64-bit shifts for sparc32. */
3899 if (avail_32(dc
) && a
->x
) {
3903 src2
= tcg_temp_new();
3904 tcg_gen_andi_tl(src2
, gen_load_gpr(dc
, a
->rs2
), a
->x
? 63 : 31);
3905 src1
= gen_load_gpr(dc
, a
->rs1
);
3906 dst
= gen_dest_gpr(dc
, a
->rd
);
3909 tcg_gen_shl_tl(dst
, src1
, src2
);
3911 tcg_gen_ext32u_tl(dst
, dst
);
3915 tcg_gen_ext32u_tl(dst
, src1
);
3918 tcg_gen_shr_tl(dst
, src1
, src2
);
3921 tcg_gen_ext32s_tl(dst
, src1
);
3924 tcg_gen_sar_tl(dst
, src1
, src2
);
3926 gen_store_gpr(dc
, a
->rd
, dst
);
3927 return advance_pc(dc
);
3930 TRANS(SLL_r
, ALL
, do_shift_r
, a
, true, true)
3931 TRANS(SRL_r
, ALL
, do_shift_r
, a
, false, true)
3932 TRANS(SRA_r
, ALL
, do_shift_r
, a
, false, false)
3934 static bool do_shift_i(DisasContext
*dc
, arg_shifti
*a
, bool l
, bool u
)
3938 /* Reject 64-bit shifts for sparc32. */
3939 if (avail_32(dc
) && (a
->x
|| a
->i
>= 32)) {
3943 src1
= gen_load_gpr(dc
, a
->rs1
);
3944 dst
= gen_dest_gpr(dc
, a
->rd
);
3946 if (avail_32(dc
) || a
->x
) {
3948 tcg_gen_shli_tl(dst
, src1
, a
->i
);
3950 tcg_gen_shri_tl(dst
, src1
, a
->i
);
3952 tcg_gen_sari_tl(dst
, src1
, a
->i
);
3956 tcg_gen_deposit_z_tl(dst
, src1
, a
->i
, 32 - a
->i
);
3958 tcg_gen_extract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
3960 tcg_gen_sextract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
3963 gen_store_gpr(dc
, a
->rd
, dst
);
3964 return advance_pc(dc
);
3967 TRANS(SLL_i
, ALL
, do_shift_i
, a
, true, true)
3968 TRANS(SRL_i
, ALL
, do_shift_i
, a
, false, true)
3969 TRANS(SRA_i
, ALL
, do_shift_i
, a
, false, false)
3971 static TCGv
gen_rs2_or_imm(DisasContext
*dc
, bool imm
, int rs2_or_imm
)
3973 /* For simplicity, we under-decoded the rs2 form. */
3974 if (!imm
&& rs2_or_imm
& ~0x1f) {
3977 if (imm
|| rs2_or_imm
== 0) {
3978 return tcg_constant_tl(rs2_or_imm
);
3980 return cpu_regs
[rs2_or_imm
];
3984 static bool do_mov_cond(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, TCGv src2
)
3986 TCGv dst
= gen_load_gpr(dc
, rd
);
3987 TCGv c2
= tcg_constant_tl(cmp
->c2
);
3989 tcg_gen_movcond_tl(cmp
->cond
, dst
, cmp
->c1
, c2
, src2
, dst
);
3990 gen_store_gpr(dc
, rd
, dst
);
3991 return advance_pc(dc
);
3994 static bool trans_MOVcc(DisasContext
*dc
, arg_MOVcc
*a
)
3996 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4002 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
4003 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4006 static bool trans_MOVfcc(DisasContext
*dc
, arg_MOVfcc
*a
)
4008 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4014 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
4015 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4018 static bool trans_MOVR(DisasContext
*dc
, arg_MOVR
*a
)
4020 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4026 if (!gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
))) {
4029 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4032 static bool do_add_special(DisasContext
*dc
, arg_r_r_ri
*a
,
4033 bool (*func
)(DisasContext
*dc
, int rd
, TCGv src
))
4037 /* For simplicity, we under-decoded the rs2 form. */
4038 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
4043 * Always load the sum into a new temporary.
4044 * This is required to capture the value across a window change,
4045 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4047 sum
= tcg_temp_new();
4048 src1
= gen_load_gpr(dc
, a
->rs1
);
4049 if (a
->imm
|| a
->rs2_or_imm
== 0) {
4050 tcg_gen_addi_tl(sum
, src1
, a
->rs2_or_imm
);
4052 tcg_gen_add_tl(sum
, src1
, cpu_regs
[a
->rs2_or_imm
]);
4054 return func(dc
, a
->rd
, sum
);
4057 static bool do_jmpl(DisasContext
*dc
, int rd
, TCGv src
)
4060 * Preserve pc across advance, so that we can delay
4061 * the writeback to rd until after src is consumed.
4063 target_ulong cur_pc
= dc
->pc
;
4065 gen_check_align(dc
, src
, 3);
4068 tcg_gen_mov_tl(cpu_npc
, src
);
4069 gen_address_mask(dc
, cpu_npc
);
4070 gen_store_gpr(dc
, rd
, tcg_constant_tl(cur_pc
));
4072 dc
->npc
= DYNAMIC_PC_LOOKUP
;
4076 TRANS(JMPL
, ALL
, do_add_special
, a
, do_jmpl
)
4078 static bool do_rett(DisasContext
*dc
, int rd
, TCGv src
)
4080 if (!supervisor(dc
)) {
4081 return raise_priv(dc
);
4084 gen_check_align(dc
, src
, 3);
4087 tcg_gen_mov_tl(cpu_npc
, src
);
4088 gen_helper_rett(tcg_env
);
4090 dc
->npc
= DYNAMIC_PC
;
4094 TRANS(RETT
, 32, do_add_special
, a
, do_rett
)
4096 static bool do_return(DisasContext
*dc
, int rd
, TCGv src
)
4098 gen_check_align(dc
, src
, 3);
4099 gen_helper_restore(tcg_env
);
4102 tcg_gen_mov_tl(cpu_npc
, src
);
4103 gen_address_mask(dc
, cpu_npc
);
4105 dc
->npc
= DYNAMIC_PC_LOOKUP
;
4109 TRANS(RETURN
, 64, do_add_special
, a
, do_return
)
4111 static bool do_save(DisasContext
*dc
, int rd
, TCGv src
)
4113 gen_helper_save(tcg_env
);
4114 gen_store_gpr(dc
, rd
, src
);
4115 return advance_pc(dc
);
4118 TRANS(SAVE
, ALL
, do_add_special
, a
, do_save
)
4120 static bool do_restore(DisasContext
*dc
, int rd
, TCGv src
)
4122 gen_helper_restore(tcg_env
);
4123 gen_store_gpr(dc
, rd
, src
);
4124 return advance_pc(dc
);
4127 TRANS(RESTORE
, ALL
, do_add_special
, a
, do_restore
)
4129 static bool do_done_retry(DisasContext
*dc
, bool done
)
4131 if (!supervisor(dc
)) {
4132 return raise_priv(dc
);
4134 dc
->npc
= DYNAMIC_PC
;
4135 dc
->pc
= DYNAMIC_PC
;
4136 translator_io_start(&dc
->base
);
4138 gen_helper_done(tcg_env
);
4140 gen_helper_retry(tcg_env
);
4145 TRANS(DONE
, 64, do_done_retry
, true)
4146 TRANS(RETRY
, 64, do_done_retry
, false)
4149 * Major opcode 11 -- load and store instructions
4152 static TCGv
gen_ldst_addr(DisasContext
*dc
, int rs1
, bool imm
, int rs2_or_imm
)
4154 TCGv addr
, tmp
= NULL
;
4156 /* For simplicity, we under-decoded the rs2 form. */
4157 if (!imm
&& rs2_or_imm
& ~0x1f) {
4161 addr
= gen_load_gpr(dc
, rs1
);
4163 tmp
= tcg_temp_new();
4165 tcg_gen_addi_tl(tmp
, addr
, rs2_or_imm
);
4167 tcg_gen_add_tl(tmp
, addr
, cpu_regs
[rs2_or_imm
]);
4173 tmp
= tcg_temp_new();
4175 tcg_gen_ext32u_tl(tmp
, addr
);
4181 static bool do_ld_gpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4183 TCGv reg
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4189 da
= resolve_asi(dc
, a
->asi
, mop
);
4191 reg
= gen_dest_gpr(dc
, a
->rd
);
4192 gen_ld_asi(dc
, &da
, reg
, addr
);
4193 gen_store_gpr(dc
, a
->rd
, reg
);
4194 return advance_pc(dc
);
4197 TRANS(LDUW
, ALL
, do_ld_gpr
, a
, MO_TEUL
)
4198 TRANS(LDUB
, ALL
, do_ld_gpr
, a
, MO_UB
)
4199 TRANS(LDUH
, ALL
, do_ld_gpr
, a
, MO_TEUW
)
4200 TRANS(LDSB
, ALL
, do_ld_gpr
, a
, MO_SB
)
4201 TRANS(LDSH
, ALL
, do_ld_gpr
, a
, MO_TESW
)
4202 TRANS(LDSW
, 64, do_ld_gpr
, a
, MO_TESL
)
4203 TRANS(LDX
, 64, do_ld_gpr
, a
, MO_TEUQ
)
4205 static bool do_st_gpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4207 TCGv reg
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4213 da
= resolve_asi(dc
, a
->asi
, mop
);
4215 reg
= gen_load_gpr(dc
, a
->rd
);
4216 gen_st_asi(dc
, &da
, reg
, addr
);
4217 return advance_pc(dc
);
4220 TRANS(STW
, ALL
, do_st_gpr
, a
, MO_TEUL
)
4221 TRANS(STB
, ALL
, do_st_gpr
, a
, MO_UB
)
4222 TRANS(STH
, ALL
, do_st_gpr
, a
, MO_TEUW
)
4223 TRANS(STX
, 64, do_st_gpr
, a
, MO_TEUQ
)
4225 static bool trans_LDD(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4233 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4237 da
= resolve_asi(dc
, a
->asi
, MO_TEUQ
);
4238 gen_ldda_asi(dc
, &da
, addr
, a
->rd
);
4239 return advance_pc(dc
);
4242 static bool trans_STD(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4250 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4254 da
= resolve_asi(dc
, a
->asi
, MO_TEUQ
);
4255 gen_stda_asi(dc
, &da
, addr
, a
->rd
);
4256 return advance_pc(dc
);
4259 static bool trans_LDSTUB(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4264 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4268 da
= resolve_asi(dc
, a
->asi
, MO_UB
);
4270 reg
= gen_dest_gpr(dc
, a
->rd
);
4271 gen_ldstub_asi(dc
, &da
, reg
, addr
);
4272 gen_store_gpr(dc
, a
->rd
, reg
);
4273 return advance_pc(dc
);
4276 static bool trans_SWAP(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4278 TCGv addr
, dst
, src
;
4281 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4285 da
= resolve_asi(dc
, a
->asi
, MO_TEUL
);
4287 dst
= gen_dest_gpr(dc
, a
->rd
);
4288 src
= gen_load_gpr(dc
, a
->rd
);
4289 gen_swap_asi(dc
, &da
, dst
, src
, addr
);
4290 gen_store_gpr(dc
, a
->rd
, dst
);
4291 return advance_pc(dc
);
4294 static bool do_casa(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4299 addr
= gen_ldst_addr(dc
, a
->rs1
, true, 0);
4303 da
= resolve_asi(dc
, a
->asi
, mop
);
4305 o
= gen_dest_gpr(dc
, a
->rd
);
4306 n
= gen_load_gpr(dc
, a
->rd
);
4307 c
= gen_load_gpr(dc
, a
->rs2_or_imm
);
4308 gen_cas_asi(dc
, &da
, o
, n
, c
, addr
);
4309 gen_store_gpr(dc
, a
->rd
, o
);
4310 return advance_pc(dc
);
4313 TRANS(CASA
, CASA
, do_casa
, a
, MO_TEUL
)
4314 TRANS(CASXA
, 64, do_casa
, a
, MO_TEUQ
)
4316 static bool do_ld_fpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp sz
)
4318 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4324 if (gen_trap_ifnofpu(dc
)) {
4327 if (sz
== MO_128
&& gen_trap_float128(dc
)) {
4330 da
= resolve_asi(dc
, a
->asi
, MO_TE
| sz
);
4331 gen_ldf_asi(dc
, &da
, sz
, addr
, a
->rd
);
4332 gen_update_fprs_dirty(dc
, a
->rd
);
4333 return advance_pc(dc
);
4336 TRANS(LDF
, ALL
, do_ld_fpr
, a
, MO_32
)
4337 TRANS(LDDF
, ALL
, do_ld_fpr
, a
, MO_64
)
4338 TRANS(LDQF
, ALL
, do_ld_fpr
, a
, MO_128
)
4340 TRANS(LDFA
, 64, do_ld_fpr
, a
, MO_32
)
4341 TRANS(LDDFA
, 64, do_ld_fpr
, a
, MO_64
)
4342 TRANS(LDQFA
, 64, do_ld_fpr
, a
, MO_128
)
4344 static bool do_st_fpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp sz
)
4346 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4352 if (gen_trap_ifnofpu(dc
)) {
4355 if (sz
== MO_128
&& gen_trap_float128(dc
)) {
4358 da
= resolve_asi(dc
, a
->asi
, MO_TE
| sz
);
4359 gen_stf_asi(dc
, &da
, sz
, addr
, a
->rd
);
4360 return advance_pc(dc
);
4363 TRANS(STF
, ALL
, do_st_fpr
, a
, MO_32
)
4364 TRANS(STDF
, ALL
, do_st_fpr
, a
, MO_64
)
4365 TRANS(STQF
, ALL
, do_st_fpr
, a
, MO_128
)
4367 TRANS(STFA
, 64, do_st_fpr
, a
, MO_32
)
4368 TRANS(STDFA
, 64, do_st_fpr
, a
, MO_64
)
4369 TRANS(STQFA
, 64, do_st_fpr
, a
, MO_128
)
4371 static bool trans_STDFQ(DisasContext
*dc
, arg_STDFQ
*a
)
4373 if (!avail_32(dc
)) {
4376 if (!supervisor(dc
)) {
4377 return raise_priv(dc
);
4379 if (gen_trap_ifnofpu(dc
)) {
4382 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
4386 static bool do_ldfsr(DisasContext
*dc
, arg_r_r_ri
*a
, MemOp mop
,
4387 target_ulong new_mask
, target_ulong old_mask
)
4389 TCGv tmp
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4393 if (gen_trap_ifnofpu(dc
)) {
4396 tmp
= tcg_temp_new();
4397 tcg_gen_qemu_ld_tl(tmp
, addr
, dc
->mem_idx
, mop
| MO_ALIGN
);
4398 tcg_gen_andi_tl(tmp
, tmp
, new_mask
);
4399 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, old_mask
);
4400 tcg_gen_or_tl(cpu_fsr
, cpu_fsr
, tmp
);
4401 gen_helper_set_fsr(tcg_env
, cpu_fsr
);
4402 return advance_pc(dc
);
4405 TRANS(LDFSR
, ALL
, do_ldfsr
, a
, MO_TEUL
, FSR_LDFSR_MASK
, FSR_LDFSR_OLDMASK
)
4406 TRANS(LDXFSR
, 64, do_ldfsr
, a
, MO_TEUQ
, FSR_LDXFSR_MASK
, FSR_LDXFSR_OLDMASK
)
4408 static bool do_stfsr(DisasContext
*dc
, arg_r_r_ri
*a
, MemOp mop
)
4410 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4414 if (gen_trap_ifnofpu(dc
)) {
4417 tcg_gen_qemu_st_tl(cpu_fsr
, addr
, dc
->mem_idx
, mop
| MO_ALIGN
);
4418 return advance_pc(dc
);
4421 TRANS(STFSR
, ALL
, do_stfsr
, a
, MO_TEUL
)
4422 TRANS(STXFSR
, 64, do_stfsr
, a
, MO_TEUQ
)
4424 static bool do_fc(DisasContext
*dc
, int rd
, bool c
)
4428 if (gen_trap_ifnofpu(dc
)) {
4433 mask
= MAKE_64BIT_MASK(0, 32);
4435 mask
= MAKE_64BIT_MASK(32, 32);
4438 tcg_gen_ori_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rd
/ 2], mask
);
4440 tcg_gen_andi_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rd
/ 2], ~mask
);
4442 gen_update_fprs_dirty(dc
, rd
);
4443 return advance_pc(dc
);
4446 TRANS(FZEROs
, VIS1
, do_fc
, a
->rd
, 0)
4447 TRANS(FONEs
, VIS1
, do_fc
, a
->rd
, 1)
4449 static bool do_dc(DisasContext
*dc
, int rd
, int64_t c
)
4451 if (gen_trap_ifnofpu(dc
)) {
4455 tcg_gen_movi_i64(cpu_fpr
[rd
/ 2], c
);
4456 gen_update_fprs_dirty(dc
, rd
);
4457 return advance_pc(dc
);
4460 TRANS(FZEROd
, VIS1
, do_dc
, a
->rd
, 0)
4461 TRANS(FONEd
, VIS1
, do_dc
, a
->rd
, -1)
4463 static bool do_ff(DisasContext
*dc
, arg_r_r
*a
,
4464 void (*func
)(TCGv_i32
, TCGv_i32
))
4468 if (gen_trap_ifnofpu(dc
)) {
4472 tmp
= gen_load_fpr_F(dc
, a
->rs
);
4474 gen_store_fpr_F(dc
, a
->rd
, tmp
);
4475 return advance_pc(dc
);
4478 TRANS(FMOVs
, ALL
, do_ff
, a
, gen_op_fmovs
)
4479 TRANS(FNEGs
, ALL
, do_ff
, a
, gen_op_fnegs
)
4480 TRANS(FABSs
, ALL
, do_ff
, a
, gen_op_fabss
)
4481 TRANS(FSRCs
, VIS1
, do_ff
, a
, tcg_gen_mov_i32
)
4482 TRANS(FNOTs
, VIS1
, do_ff
, a
, tcg_gen_not_i32
)
4484 static bool do_fd(DisasContext
*dc
, arg_r_r
*a
,
4485 void (*func
)(TCGv_i32
, TCGv_i64
))
4490 if (gen_trap_ifnofpu(dc
)) {
4494 dst
= gen_dest_fpr_F(dc
);
4495 src
= gen_load_fpr_D(dc
, a
->rs
);
4497 gen_store_fpr_F(dc
, a
->rd
, dst
);
4498 return advance_pc(dc
);
4501 TRANS(FPACK16
, VIS1
, do_fd
, a
, gen_op_fpack16
)
4502 TRANS(FPACKFIX
, VIS1
, do_fd
, a
, gen_op_fpackfix
)
4504 static bool do_env_ff(DisasContext
*dc
, arg_r_r
*a
,
4505 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
4509 if (gen_trap_ifnofpu(dc
)) {
4513 gen_op_clear_ieee_excp_and_FTT();
4514 tmp
= gen_load_fpr_F(dc
, a
->rs
);
4515 func(tmp
, tcg_env
, tmp
);
4516 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4517 gen_store_fpr_F(dc
, a
->rd
, tmp
);
4518 return advance_pc(dc
);
4521 TRANS(FSQRTs
, ALL
, do_env_ff
, a
, gen_helper_fsqrts
)
4522 TRANS(FiTOs
, ALL
, do_env_ff
, a
, gen_helper_fitos
)
4523 TRANS(FsTOi
, ALL
, do_env_ff
, a
, gen_helper_fstoi
)
4525 static bool do_env_fd(DisasContext
*dc
, arg_r_r
*a
,
4526 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
4531 if (gen_trap_ifnofpu(dc
)) {
4535 gen_op_clear_ieee_excp_and_FTT();
4536 dst
= gen_dest_fpr_F(dc
);
4537 src
= gen_load_fpr_D(dc
, a
->rs
);
4538 func(dst
, tcg_env
, src
);
4539 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4540 gen_store_fpr_F(dc
, a
->rd
, dst
);
4541 return advance_pc(dc
);
4544 TRANS(FdTOs
, ALL
, do_env_fd
, a
, gen_helper_fdtos
)
4545 TRANS(FdTOi
, ALL
, do_env_fd
, a
, gen_helper_fdtoi
)
4546 TRANS(FxTOs
, 64, do_env_fd
, a
, gen_helper_fxtos
)
4548 static bool do_dd(DisasContext
*dc
, arg_r_r
*a
,
4549 void (*func
)(TCGv_i64
, TCGv_i64
))
4553 if (gen_trap_ifnofpu(dc
)) {
4557 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4558 src
= gen_load_fpr_D(dc
, a
->rs
);
4560 gen_store_fpr_D(dc
, a
->rd
, dst
);
4561 return advance_pc(dc
);
4564 TRANS(FMOVd
, 64, do_dd
, a
, gen_op_fmovd
)
4565 TRANS(FNEGd
, 64, do_dd
, a
, gen_op_fnegd
)
4566 TRANS(FABSd
, 64, do_dd
, a
, gen_op_fabsd
)
4567 TRANS(FSRCd
, VIS1
, do_dd
, a
, tcg_gen_mov_i64
)
4568 TRANS(FNOTd
, VIS1
, do_dd
, a
, tcg_gen_not_i64
)
4570 static bool do_env_dd(DisasContext
*dc
, arg_r_r
*a
,
4571 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
4575 if (gen_trap_ifnofpu(dc
)) {
4579 gen_op_clear_ieee_excp_and_FTT();
4580 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4581 src
= gen_load_fpr_D(dc
, a
->rs
);
4582 func(dst
, tcg_env
, src
);
4583 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4584 gen_store_fpr_D(dc
, a
->rd
, dst
);
4585 return advance_pc(dc
);
4588 TRANS(FSQRTd
, ALL
, do_env_dd
, a
, gen_helper_fsqrtd
)
4589 TRANS(FxTOd
, 64, do_env_dd
, a
, gen_helper_fxtod
)
4590 TRANS(FdTOx
, 64, do_env_dd
, a
, gen_helper_fdtox
)
4592 static bool do_env_df(DisasContext
*dc
, arg_r_r
*a
,
4593 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
4598 if (gen_trap_ifnofpu(dc
)) {
4602 gen_op_clear_ieee_excp_and_FTT();
4603 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4604 src
= gen_load_fpr_F(dc
, a
->rs
);
4605 func(dst
, tcg_env
, src
);
4606 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4607 gen_store_fpr_D(dc
, a
->rd
, dst
);
4608 return advance_pc(dc
);
4611 TRANS(FiTOd
, ALL
, do_env_df
, a
, gen_helper_fitod
)
4612 TRANS(FsTOd
, ALL
, do_env_df
, a
, gen_helper_fstod
)
4613 TRANS(FsTOx
, 64, do_env_df
, a
, gen_helper_fstox
)
4615 static bool trans_FMOVq(DisasContext
*dc
, arg_FMOVq
*a
)
4619 if (!avail_64(dc
)) {
4622 if (gen_trap_ifnofpu(dc
)) {
4625 if (gen_trap_float128(dc
)) {
4629 gen_op_clear_ieee_excp_and_FTT();
4632 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
4633 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
4634 gen_update_fprs_dirty(dc
, rd
);
4635 return advance_pc(dc
);
4638 static bool do_qq(DisasContext
*dc
, arg_r_r
*a
,
4639 void (*func
)(TCGv_env
))
4641 if (gen_trap_ifnofpu(dc
)) {
4644 if (gen_trap_float128(dc
)) {
4648 gen_op_clear_ieee_excp_and_FTT();
4649 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4651 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4652 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4653 return advance_pc(dc
);
4656 TRANS(FNEGq
, 64, do_qq
, a
, gen_helper_fnegq
)
4657 TRANS(FABSq
, 64, do_qq
, a
, gen_helper_fabsq
)
4659 static bool do_env_qq(DisasContext
*dc
, arg_r_r
*a
,
4660 void (*func
)(TCGv_env
))
4662 if (gen_trap_ifnofpu(dc
)) {
4665 if (gen_trap_float128(dc
)) {
4669 gen_op_clear_ieee_excp_and_FTT();
4670 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4672 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4673 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4674 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4675 return advance_pc(dc
);
4678 TRANS(FSQRTq
, ALL
, do_env_qq
, a
, gen_helper_fsqrtq
)
4680 static bool do_env_fq(DisasContext
*dc
, arg_r_r
*a
,
4681 void (*func
)(TCGv_i32
, TCGv_env
))
4685 if (gen_trap_ifnofpu(dc
)) {
4688 if (gen_trap_float128(dc
)) {
4692 gen_op_clear_ieee_excp_and_FTT();
4693 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4694 dst
= gen_dest_fpr_F(dc
);
4696 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4697 gen_store_fpr_F(dc
, a
->rd
, dst
);
4698 return advance_pc(dc
);
4701 TRANS(FqTOs
, ALL
, do_env_fq
, a
, gen_helper_fqtos
)
4702 TRANS(FqTOi
, ALL
, do_env_fq
, a
, gen_helper_fqtoi
)
4704 static bool do_env_dq(DisasContext
*dc
, arg_r_r
*a
,
4705 void (*func
)(TCGv_i64
, TCGv_env
))
4709 if (gen_trap_ifnofpu(dc
)) {
4712 if (gen_trap_float128(dc
)) {
4716 gen_op_clear_ieee_excp_and_FTT();
4717 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4718 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4720 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4721 gen_store_fpr_D(dc
, a
->rd
, dst
);
4722 return advance_pc(dc
);
4725 TRANS(FqTOd
, ALL
, do_env_dq
, a
, gen_helper_fqtod
)
4726 TRANS(FqTOx
, 64, do_env_dq
, a
, gen_helper_fqtox
)
4728 static bool do_env_qf(DisasContext
*dc
, arg_r_r
*a
,
4729 void (*func
)(TCGv_env
, TCGv_i32
))
4733 if (gen_trap_ifnofpu(dc
)) {
4736 if (gen_trap_float128(dc
)) {
4740 gen_op_clear_ieee_excp_and_FTT();
4741 src
= gen_load_fpr_F(dc
, a
->rs
);
4743 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4744 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4745 return advance_pc(dc
);
4748 TRANS(FiTOq
, ALL
, do_env_qf
, a
, gen_helper_fitoq
)
4749 TRANS(FsTOq
, ALL
, do_env_qf
, a
, gen_helper_fstoq
)
4751 static bool do_env_qd(DisasContext
*dc
, arg_r_r
*a
,
4752 void (*func
)(TCGv_env
, TCGv_i64
))
4756 if (gen_trap_ifnofpu(dc
)) {
4759 if (gen_trap_float128(dc
)) {
4763 gen_op_clear_ieee_excp_and_FTT();
4764 src
= gen_load_fpr_D(dc
, a
->rs
);
4766 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4767 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4768 return advance_pc(dc
);
4771 TRANS(FdTOq
, ALL
, do_env_qd
, a
, gen_helper_fdtoq
)
4772 TRANS(FxTOq
, 64, do_env_qd
, a
, gen_helper_fxtoq
)
4774 static bool do_fff(DisasContext
*dc
, arg_r_r_r
*a
,
4775 void (*func
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
4777 TCGv_i32 src1
, src2
;
4779 if (gen_trap_ifnofpu(dc
)) {
4783 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4784 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4785 func(src1
, src1
, src2
);
4786 gen_store_fpr_F(dc
, a
->rd
, src1
);
4787 return advance_pc(dc
);
4790 TRANS(FPADD16s
, VIS1
, do_fff
, a
, tcg_gen_vec_add16_i32
)
4791 TRANS(FPADD32s
, VIS1
, do_fff
, a
, tcg_gen_add_i32
)
4792 TRANS(FPSUB16s
, VIS1
, do_fff
, a
, tcg_gen_vec_sub16_i32
)
4793 TRANS(FPSUB32s
, VIS1
, do_fff
, a
, tcg_gen_sub_i32
)
4794 TRANS(FNORs
, VIS1
, do_fff
, a
, tcg_gen_nor_i32
)
4795 TRANS(FANDNOTs
, VIS1
, do_fff
, a
, tcg_gen_andc_i32
)
4796 TRANS(FXORs
, VIS1
, do_fff
, a
, tcg_gen_xor_i32
)
4797 TRANS(FNANDs
, VIS1
, do_fff
, a
, tcg_gen_nand_i32
)
4798 TRANS(FANDs
, VIS1
, do_fff
, a
, tcg_gen_and_i32
)
4799 TRANS(FXNORs
, VIS1
, do_fff
, a
, tcg_gen_eqv_i32
)
4800 TRANS(FORNOTs
, VIS1
, do_fff
, a
, tcg_gen_orc_i32
)
4801 TRANS(FORs
, VIS1
, do_fff
, a
, tcg_gen_or_i32
)
4803 static bool do_env_fff(DisasContext
*dc
, arg_r_r_r
*a
,
4804 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
4806 TCGv_i32 src1
, src2
;
4808 if (gen_trap_ifnofpu(dc
)) {
4812 gen_op_clear_ieee_excp_and_FTT();
4813 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4814 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4815 func(src1
, tcg_env
, src1
, src2
);
4816 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4817 gen_store_fpr_F(dc
, a
->rd
, src1
);
4818 return advance_pc(dc
);
4821 TRANS(FADDs
, ALL
, do_env_fff
, a
, gen_helper_fadds
)
4822 TRANS(FSUBs
, ALL
, do_env_fff
, a
, gen_helper_fsubs
)
4823 TRANS(FMULs
, ALL
, do_env_fff
, a
, gen_helper_fmuls
)
4824 TRANS(FDIVs
, ALL
, do_env_fff
, a
, gen_helper_fdivs
)
4826 static bool do_ddd(DisasContext
*dc
, arg_r_r_r
*a
,
4827 void (*func
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
4829 TCGv_i64 dst
, src1
, src2
;
4831 if (gen_trap_ifnofpu(dc
)) {
4835 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4836 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4837 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4838 func(dst
, src1
, src2
);
4839 gen_store_fpr_D(dc
, a
->rd
, dst
);
4840 return advance_pc(dc
);
4843 TRANS(FMUL8x16
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16
)
4844 TRANS(FMUL8x16AU
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16au
)
4845 TRANS(FMUL8x16AL
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16al
)
4846 TRANS(FMUL8SUx16
, VIS1
, do_ddd
, a
, gen_helper_fmul8sux16
)
4847 TRANS(FMUL8ULx16
, VIS1
, do_ddd
, a
, gen_helper_fmul8ulx16
)
4848 TRANS(FMULD8SUx16
, VIS1
, do_ddd
, a
, gen_helper_fmuld8sux16
)
4849 TRANS(FMULD8ULx16
, VIS1
, do_ddd
, a
, gen_helper_fmuld8ulx16
)
4850 TRANS(FPMERGE
, VIS1
, do_ddd
, a
, gen_helper_fpmerge
)
4851 TRANS(FEXPAND
, VIS1
, do_ddd
, a
, gen_helper_fexpand
)
4853 TRANS(FPADD16
, VIS1
, do_ddd
, a
, tcg_gen_vec_add16_i64
)
4854 TRANS(FPADD32
, VIS1
, do_ddd
, a
, tcg_gen_vec_add32_i64
)
4855 TRANS(FPSUB16
, VIS1
, do_ddd
, a
, tcg_gen_vec_sub16_i64
)
4856 TRANS(FPSUB32
, VIS1
, do_ddd
, a
, tcg_gen_vec_sub32_i64
)
4857 TRANS(FNORd
, VIS1
, do_ddd
, a
, tcg_gen_nor_i64
)
4858 TRANS(FANDNOTd
, VIS1
, do_ddd
, a
, tcg_gen_andc_i64
)
4859 TRANS(FXORd
, VIS1
, do_ddd
, a
, tcg_gen_xor_i64
)
4860 TRANS(FNANDd
, VIS1
, do_ddd
, a
, tcg_gen_nand_i64
)
4861 TRANS(FANDd
, VIS1
, do_ddd
, a
, tcg_gen_and_i64
)
4862 TRANS(FXNORd
, VIS1
, do_ddd
, a
, tcg_gen_eqv_i64
)
4863 TRANS(FORNOTd
, VIS1
, do_ddd
, a
, tcg_gen_orc_i64
)
4864 TRANS(FORd
, VIS1
, do_ddd
, a
, tcg_gen_or_i64
)
4866 TRANS(FPACK32
, VIS1
, do_ddd
, a
, gen_op_fpack32
)
4867 TRANS(FALIGNDATAg
, VIS1
, do_ddd
, a
, gen_op_faligndata
)
4868 TRANS(BSHUFFLE
, VIS2
, do_ddd
, a
, gen_op_bshuffle
)
4870 static bool do_rdd(DisasContext
*dc
, arg_r_r_r
*a
,
4871 void (*func
)(TCGv
, TCGv_i64
, TCGv_i64
))
4873 TCGv_i64 src1
, src2
;
4876 if (gen_trap_ifnofpu(dc
)) {
4880 dst
= gen_dest_gpr(dc
, a
->rd
);
4881 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4882 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4883 func(dst
, src1
, src2
);
4884 gen_store_gpr(dc
, a
->rd
, dst
);
4885 return advance_pc(dc
);
4888 TRANS(FPCMPLE16
, VIS1
, do_rdd
, a
, gen_helper_fcmple16
)
4889 TRANS(FPCMPNE16
, VIS1
, do_rdd
, a
, gen_helper_fcmpne16
)
4890 TRANS(FPCMPGT16
, VIS1
, do_rdd
, a
, gen_helper_fcmpgt16
)
4891 TRANS(FPCMPEQ16
, VIS1
, do_rdd
, a
, gen_helper_fcmpeq16
)
4893 TRANS(FPCMPLE32
, VIS1
, do_rdd
, a
, gen_helper_fcmple32
)
4894 TRANS(FPCMPNE32
, VIS1
, do_rdd
, a
, gen_helper_fcmpne32
)
4895 TRANS(FPCMPGT32
, VIS1
, do_rdd
, a
, gen_helper_fcmpgt32
)
4896 TRANS(FPCMPEQ32
, VIS1
, do_rdd
, a
, gen_helper_fcmpeq32
)
4898 static bool do_env_ddd(DisasContext
*dc
, arg_r_r_r
*a
,
4899 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
))
4901 TCGv_i64 dst
, src1
, src2
;
4903 if (gen_trap_ifnofpu(dc
)) {
4907 gen_op_clear_ieee_excp_and_FTT();
4908 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4909 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4910 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4911 func(dst
, tcg_env
, src1
, src2
);
4912 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4913 gen_store_fpr_D(dc
, a
->rd
, dst
);
4914 return advance_pc(dc
);
4917 TRANS(FADDd
, ALL
, do_env_ddd
, a
, gen_helper_faddd
)
4918 TRANS(FSUBd
, ALL
, do_env_ddd
, a
, gen_helper_fsubd
)
4919 TRANS(FMULd
, ALL
, do_env_ddd
, a
, gen_helper_fmuld
)
4920 TRANS(FDIVd
, ALL
, do_env_ddd
, a
, gen_helper_fdivd
)
4922 static bool trans_FsMULd(DisasContext
*dc
, arg_r_r_r
*a
)
4925 TCGv_i32 src1
, src2
;
4927 if (gen_trap_ifnofpu(dc
)) {
4930 if (!(dc
->def
->features
& CPU_FEATURE_FSMULD
)) {
4931 return raise_unimpfpop(dc
);
4934 gen_op_clear_ieee_excp_and_FTT();
4935 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4936 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4937 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4938 gen_helper_fsmuld(dst
, tcg_env
, src1
, src2
);
4939 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4940 gen_store_fpr_D(dc
, a
->rd
, dst
);
4941 return advance_pc(dc
);
4944 static bool do_dddd(DisasContext
*dc
, arg_r_r_r
*a
,
4945 void (*func
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
4947 TCGv_i64 dst
, src0
, src1
, src2
;
4949 if (gen_trap_ifnofpu(dc
)) {
4953 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4954 src0
= gen_load_fpr_D(dc
, a
->rd
);
4955 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4956 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4957 func(dst
, src0
, src1
, src2
);
4958 gen_store_fpr_D(dc
, a
->rd
, dst
);
4959 return advance_pc(dc
);
4962 TRANS(PDIST
, VIS1
, do_dddd
, a
, gen_helper_pdist
)
4964 static bool do_env_qqq(DisasContext
*dc
, arg_r_r_r
*a
,
4965 void (*func
)(TCGv_env
))
4967 if (gen_trap_ifnofpu(dc
)) {
4970 if (gen_trap_float128(dc
)) {
4974 gen_op_clear_ieee_excp_and_FTT();
4975 gen_op_load_fpr_QT0(QFPREG(a
->rs1
));
4976 gen_op_load_fpr_QT1(QFPREG(a
->rs2
));
4978 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4979 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4980 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4981 return advance_pc(dc
);
4984 TRANS(FADDq
, ALL
, do_env_qqq
, a
, gen_helper_faddq
)
4985 TRANS(FSUBq
, ALL
, do_env_qqq
, a
, gen_helper_fsubq
)
4986 TRANS(FMULq
, ALL
, do_env_qqq
, a
, gen_helper_fmulq
)
4987 TRANS(FDIVq
, ALL
, do_env_qqq
, a
, gen_helper_fdivq
)
4989 static bool trans_FdMULq(DisasContext
*dc
, arg_r_r_r
*a
)
4991 TCGv_i64 src1
, src2
;
4993 if (gen_trap_ifnofpu(dc
)) {
4996 if (gen_trap_float128(dc
)) {
5000 gen_op_clear_ieee_excp_and_FTT();
5001 src1
= gen_load_fpr_D(dc
, a
->rs1
);
5002 src2
= gen_load_fpr_D(dc
, a
->rs2
);
5003 gen_helper_fdmulq(tcg_env
, src1
, src2
);
5004 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
5005 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
5006 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
5007 return advance_pc(dc
);
5010 static bool do_fmovr(DisasContext
*dc
, arg_FMOVRs
*a
, bool is_128
,
5011 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
5015 if (!gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
))) {
5018 if (gen_trap_ifnofpu(dc
)) {
5021 if (is_128
&& gen_trap_float128(dc
)) {
5025 gen_op_clear_ieee_excp_and_FTT();
5026 func(dc
, &cmp
, a
->rd
, a
->rs2
);
5027 return advance_pc(dc
);
5030 TRANS(FMOVRs
, 64, do_fmovr
, a
, false, gen_fmovs
)
5031 TRANS(FMOVRd
, 64, do_fmovr
, a
, false, gen_fmovd
)
5032 TRANS(FMOVRq
, 64, do_fmovr
, a
, true, gen_fmovq
)
5034 static bool do_fmovcc(DisasContext
*dc
, arg_FMOVscc
*a
, bool is_128
,
5035 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
5039 if (gen_trap_ifnofpu(dc
)) {
5042 if (is_128
&& gen_trap_float128(dc
)) {
5046 gen_op_clear_ieee_excp_and_FTT();
5047 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
5048 func(dc
, &cmp
, a
->rd
, a
->rs2
);
5049 return advance_pc(dc
);
5052 TRANS(FMOVscc
, 64, do_fmovcc
, a
, false, gen_fmovs
)
5053 TRANS(FMOVdcc
, 64, do_fmovcc
, a
, false, gen_fmovd
)
5054 TRANS(FMOVqcc
, 64, do_fmovcc
, a
, true, gen_fmovq
)
5056 static bool do_fmovfcc(DisasContext
*dc
, arg_FMOVsfcc
*a
, bool is_128
,
5057 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
5061 if (gen_trap_ifnofpu(dc
)) {
5064 if (is_128
&& gen_trap_float128(dc
)) {
5068 gen_op_clear_ieee_excp_and_FTT();
5069 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
5070 func(dc
, &cmp
, a
->rd
, a
->rs2
);
5071 return advance_pc(dc
);
5074 TRANS(FMOVsfcc
, 64, do_fmovfcc
, a
, false, gen_fmovs
)
5075 TRANS(FMOVdfcc
, 64, do_fmovfcc
, a
, false, gen_fmovd
)
5076 TRANS(FMOVqfcc
, 64, do_fmovfcc
, a
, true, gen_fmovq
)
5078 static bool do_fcmps(DisasContext
*dc
, arg_FCMPs
*a
, bool e
)
5080 TCGv_i32 src1
, src2
;
5082 if (avail_32(dc
) && a
->cc
!= 0) {
5085 if (gen_trap_ifnofpu(dc
)) {
5089 gen_op_clear_ieee_excp_and_FTT();
5090 src1
= gen_load_fpr_F(dc
, a
->rs1
);
5091 src2
= gen_load_fpr_F(dc
, a
->rs2
);
5093 gen_op_fcmpes(a
->cc
, src1
, src2
);
5095 gen_op_fcmps(a
->cc
, src1
, src2
);
5097 return advance_pc(dc
);
5100 TRANS(FCMPs
, ALL
, do_fcmps
, a
, false)
5101 TRANS(FCMPEs
, ALL
, do_fcmps
, a
, true)
5103 static bool do_fcmpd(DisasContext
*dc
, arg_FCMPd
*a
, bool e
)
5105 TCGv_i64 src1
, src2
;
5107 if (avail_32(dc
) && a
->cc
!= 0) {
5110 if (gen_trap_ifnofpu(dc
)) {
5114 gen_op_clear_ieee_excp_and_FTT();
5115 src1
= gen_load_fpr_D(dc
, a
->rs1
);
5116 src2
= gen_load_fpr_D(dc
, a
->rs2
);
5118 gen_op_fcmped(a
->cc
, src1
, src2
);
5120 gen_op_fcmpd(a
->cc
, src1
, src2
);
5122 return advance_pc(dc
);
5125 TRANS(FCMPd
, ALL
, do_fcmpd
, a
, false)
5126 TRANS(FCMPEd
, ALL
, do_fcmpd
, a
, true)
5128 static bool do_fcmpq(DisasContext
*dc
, arg_FCMPq
*a
, bool e
)
5130 if (avail_32(dc
) && a
->cc
!= 0) {
5133 if (gen_trap_ifnofpu(dc
)) {
5136 if (gen_trap_float128(dc
)) {
5140 gen_op_clear_ieee_excp_and_FTT();
5141 gen_op_load_fpr_QT0(QFPREG(a
->rs1
));
5142 gen_op_load_fpr_QT1(QFPREG(a
->rs2
));
5144 gen_op_fcmpeq(a
->cc
);
5146 gen_op_fcmpq(a
->cc
);
5148 return advance_pc(dc
);
5151 TRANS(FCMPq
, ALL
, do_fcmpq
, a
, false)
5152 TRANS(FCMPEq
, ALL
, do_fcmpq
, a
, true)
5154 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
5156 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5157 CPUSPARCState
*env
= cpu_env(cs
);
5160 dc
->pc
= dc
->base
.pc_first
;
5161 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
5162 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
5163 dc
->def
= &env
->def
;
5164 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
5165 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
5166 #ifndef CONFIG_USER_ONLY
5167 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
5169 #ifdef TARGET_SPARC64
5171 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5172 #ifndef CONFIG_USER_ONLY
5173 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
5177 * if we reach a page boundary, we stop generation so that the
5178 * PC of a TT_TFAULT exception is always in the right page
5180 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
5181 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
5184 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
5188 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
5190 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5191 target_ulong npc
= dc
->npc
;
5196 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5197 npc
= dc
->jump_pc
[0] | JUMP_PC
;
5200 case DYNAMIC_PC_LOOKUP
:
5204 g_assert_not_reached();
5207 tcg_gen_insn_start(dc
->pc
, npc
);
5210 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
5212 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5213 CPUSPARCState
*env
= cpu_env(cs
);
5216 insn
= translator_ldl(env
, &dc
->base
, dc
->pc
);
5217 dc
->base
.pc_next
+= 4;
5219 if (!decode(dc
, insn
)) {
5220 gen_exception(dc
, TT_ILL_INSN
);
5223 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
5226 if (dc
->pc
!= dc
->base
.pc_next
) {
5227 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
5231 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
5233 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5234 DisasDelayException
*e
, *e_next
;
5239 switch (dc
->base
.is_jmp
) {
5241 case DISAS_TOO_MANY
:
5242 if (((dc
->pc
| dc
->npc
) & 3) == 0) {
5243 /* static PC and NPC: we can use direct chaining */
5244 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5251 case DYNAMIC_PC_LOOKUP
:
5257 g_assert_not_reached();
5260 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5266 gen_generic_branch(dc
);
5271 case DYNAMIC_PC_LOOKUP
:
5274 g_assert_not_reached();
5277 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
5280 tcg_gen_lookup_and_goto_ptr();
5282 tcg_gen_exit_tb(NULL
, 0);
5286 case DISAS_NORETURN
:
5292 tcg_gen_exit_tb(NULL
, 0);
5296 g_assert_not_reached();
5299 for (e
= dc
->delay_excp_list
; e
; e
= e_next
) {
5300 gen_set_label(e
->lab
);
5302 tcg_gen_movi_tl(cpu_pc
, e
->pc
);
5303 if (e
->npc
% 4 == 0) {
5304 tcg_gen_movi_tl(cpu_npc
, e
->npc
);
5306 gen_helper_raise_exception(tcg_env
, e
->excp
);
5313 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
,
5314 CPUState
*cpu
, FILE *logfile
)
5316 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5317 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5320 static const TranslatorOps sparc_tr_ops
= {
5321 .init_disas_context
= sparc_tr_init_disas_context
,
5322 .tb_start
= sparc_tr_tb_start
,
5323 .insn_start
= sparc_tr_insn_start
,
5324 .translate_insn
= sparc_tr_translate_insn
,
5325 .tb_stop
= sparc_tr_tb_stop
,
5326 .disas_log
= sparc_tr_disas_log
,
5329 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
5330 target_ulong pc
, void *host_pc
)
5332 DisasContext dc
= {};
5334 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &sparc_tr_ops
, &dc
.base
);
5337 void sparc_tcg_init(void)
5339 static const char gregnames
[32][4] = {
5340 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5341 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5342 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5343 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5345 static const char fregnames
[32][4] = {
5346 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5347 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5348 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5349 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5352 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5353 #ifdef TARGET_SPARC64
5354 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5355 { &cpu_xcc_Z
, offsetof(CPUSPARCState
, xcc_Z
), "xcc_Z" },
5356 { &cpu_xcc_C
, offsetof(CPUSPARCState
, xcc_C
), "xcc_C" },
5358 { &cpu_cc_N
, offsetof(CPUSPARCState
, cc_N
), "cc_N" },
5359 { &cpu_cc_V
, offsetof(CPUSPARCState
, cc_V
), "cc_V" },
5360 { &cpu_icc_Z
, offsetof(CPUSPARCState
, icc_Z
), "icc_Z" },
5361 { &cpu_icc_C
, offsetof(CPUSPARCState
, icc_C
), "icc_C" },
5362 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5363 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5364 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5365 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5366 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5367 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5372 cpu_regwptr
= tcg_global_mem_new_ptr(tcg_env
,
5373 offsetof(CPUSPARCState
, regwptr
),
5376 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5377 *rtl
[i
].ptr
= tcg_global_mem_new(tcg_env
, rtl
[i
].off
, rtl
[i
].name
);
5381 for (i
= 1; i
< 8; ++i
) {
5382 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
5383 offsetof(CPUSPARCState
, gregs
[i
]),
5387 for (i
= 8; i
< 32; ++i
) {
5388 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5389 (i
- 8) * sizeof(target_ulong
),
5393 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5394 cpu_fpr
[i
] = tcg_global_mem_new_i64(tcg_env
,
5395 offsetof(CPUSPARCState
, fpr
[i
]),
5399 #ifdef TARGET_SPARC64
5400 cpu_fprs
= tcg_global_mem_new_i32(tcg_env
,
5401 offsetof(CPUSPARCState
, fprs
), "fprs");
5405 void sparc_restore_state_to_opc(CPUState
*cs
,
5406 const TranslationBlock
*tb
,
5407 const uint64_t *data
)
5409 SPARCCPU
*cpu
= SPARC_CPU(cs
);
5410 CPUSPARCState
*env
= &cpu
->env
;
5411 target_ulong pc
= data
[0];
5412 target_ulong npc
= data
[1];
5415 if (npc
== DYNAMIC_PC
) {
5416 /* dynamic NPC: already stored */
5417 } else if (npc
& JUMP_PC
) {
5418 /* jump PC: use 'cond' and the jump targets of the translation */
5420 env
->npc
= npc
& ~3;