4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E) qemu_build_not_reached()
41 # define gen_helper_rett(E) qemu_build_not_reached()
42 # define gen_helper_power_down(E) qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
45 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
46 # define gen_helper_done(E) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
49 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
50 # define gen_helper_restored(E) qemu_build_not_reached()
51 # define gen_helper_retry(E) qemu_build_not_reached()
52 # define gen_helper_saved(E) qemu_build_not_reached()
53 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
54 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
55 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
56 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
57 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
58 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
59 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
60 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
61 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
62 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
63 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
64 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
90 /* Dynamic PC, must exit to main loop. */
92 /* Dynamic PC, one of two values according to jump_pc[T2]. */
94 /* Dynamic PC, may lookup next TB. */
95 #define DYNAMIC_PC_LOOKUP 3
97 #define DISAS_EXIT DISAS_TARGET_0
99 /* global register indexes */
100 static TCGv_ptr cpu_regwptr
;
101 static TCGv cpu_pc
, cpu_npc
;
102 static TCGv cpu_regs
[32];
105 static TCGv cpu_cond
;
106 static TCGv cpu_cc_N
;
107 static TCGv cpu_cc_V
;
108 static TCGv cpu_icc_Z
;
109 static TCGv cpu_icc_C
;
110 #ifdef TARGET_SPARC64
111 static TCGv cpu_xcc_Z
;
112 static TCGv cpu_xcc_C
;
113 static TCGv_i32 cpu_fprs
;
116 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
117 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
120 #ifdef TARGET_SPARC64
121 #define cpu_cc_Z cpu_xcc_Z
122 #define cpu_cc_C cpu_xcc_C
124 #define cpu_cc_Z cpu_icc_Z
125 #define cpu_cc_C cpu_icc_C
126 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
127 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
130 /* Floating point registers */
131 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
132 static TCGv_i32 cpu_fcc
[TARGET_FCCREGS
];
134 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
135 #ifdef TARGET_SPARC64
136 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
137 # define env64_field_offsetof(X) env_field_offsetof(X)
139 # define env32_field_offsetof(X) env_field_offsetof(X)
140 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
143 typedef struct DisasCompare
{
149 typedef struct DisasDelayException
{
150 struct DisasDelayException
*next
;
153 /* Saved state at parent insn. */
156 } DisasDelayException
;
158 typedef struct DisasContext
{
159 DisasContextBase base
;
160 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
161 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
163 /* Used when JUMP_PC value is used. */
165 target_ulong jump_pc
[2];
170 bool address_mask_32bit
;
171 #ifndef CONFIG_USER_ONLY
173 #ifdef TARGET_SPARC64
179 #ifdef TARGET_SPARC64
183 DisasDelayException
*delay_excp_list
;
186 // This function uses non-native bit order
187 #define GET_FIELD(X, FROM, TO) \
188 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
190 // This function uses the order in the manuals, i.e. bit 0 is 2^0
191 #define GET_FIELD_SP(X, FROM, TO) \
192 GET_FIELD(X, 31 - (TO), 31 - (FROM))
194 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
195 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
197 #ifdef TARGET_SPARC64
198 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
199 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
201 #define DFPREG(r) (r & 0x1e)
202 #define QFPREG(r) (r & 0x1c)
205 #define UA2005_HTRAP_MASK 0xff
206 #define V8_TRAP_MASK 0x7f
208 #define IS_IMM (insn & (1<<13))
210 static void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
212 #if defined(TARGET_SPARC64)
213 int bit
= (rd
< 32) ? 1 : 2;
214 /* If we know we've already set this bit within the TB,
215 we can avoid setting it again. */
216 if (!(dc
->fprs_dirty
& bit
)) {
217 dc
->fprs_dirty
|= bit
;
218 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
223 /* floating point registers moves */
224 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
226 TCGv_i32 ret
= tcg_temp_new_i32();
228 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
230 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
235 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
237 TCGv_i64 t
= tcg_temp_new_i64();
239 tcg_gen_extu_i32_i64(t
, v
);
240 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
241 (dst
& 1 ? 0 : 32), 32);
242 gen_update_fprs_dirty(dc
, dst
);
245 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
248 return cpu_fpr
[src
/ 2];
251 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
254 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
255 gen_update_fprs_dirty(dc
, dst
);
258 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
260 return cpu_fpr
[DFPREG(dst
) / 2];
263 static TCGv_i128
gen_load_fpr_Q(DisasContext
*dc
, unsigned int src
)
265 TCGv_i128 ret
= tcg_temp_new_i128();
268 tcg_gen_concat_i64_i128(ret
, cpu_fpr
[src
/ 2 + 1], cpu_fpr
[src
/ 2]);
272 static void gen_store_fpr_Q(DisasContext
*dc
, unsigned int dst
, TCGv_i128 v
)
275 tcg_gen_extr_i128_i64(cpu_fpr
[dst
/ 2 + 1], cpu_fpr
[dst
/ 2], v
);
276 gen_update_fprs_dirty(dc
, dst
);
280 #ifdef CONFIG_USER_ONLY
281 #define supervisor(dc) 0
282 #define hypervisor(dc) 0
284 #ifdef TARGET_SPARC64
285 #define hypervisor(dc) (dc->hypervisor)
286 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
288 #define supervisor(dc) (dc->supervisor)
289 #define hypervisor(dc) 0
293 #if !defined(TARGET_SPARC64)
294 # define AM_CHECK(dc) false
295 #elif defined(TARGET_ABI32)
296 # define AM_CHECK(dc) true
297 #elif defined(CONFIG_USER_ONLY)
298 # define AM_CHECK(dc) false
300 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
303 static void gen_address_mask(DisasContext
*dc
, TCGv addr
)
306 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
310 static target_ulong
address_mask_i(DisasContext
*dc
, target_ulong addr
)
312 return AM_CHECK(dc
) ? (uint32_t)addr
: addr
;
315 static TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
319 return cpu_regs
[reg
];
321 TCGv t
= tcg_temp_new();
322 tcg_gen_movi_tl(t
, 0);
327 static void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
331 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
335 static TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
339 return cpu_regs
[reg
];
341 return tcg_temp_new();
345 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
347 return translator_use_goto_tb(&s
->base
, pc
) &&
348 translator_use_goto_tb(&s
->base
, npc
);
351 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
352 target_ulong pc
, target_ulong npc
)
354 if (use_goto_tb(s
, pc
, npc
)) {
355 /* jump to same page: we can use a direct jump */
356 tcg_gen_goto_tb(tb_num
);
357 tcg_gen_movi_tl(cpu_pc
, pc
);
358 tcg_gen_movi_tl(cpu_npc
, npc
);
359 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
361 /* jump to another page: we can use an indirect jump */
362 tcg_gen_movi_tl(cpu_pc
, pc
);
363 tcg_gen_movi_tl(cpu_npc
, npc
);
364 tcg_gen_lookup_and_goto_ptr();
368 static TCGv
gen_carry32(void)
370 if (TARGET_LONG_BITS
== 64) {
371 TCGv t
= tcg_temp_new();
372 tcg_gen_extract_tl(t
, cpu_icc_C
, 32, 1);
378 static void gen_op_addcc_int(TCGv dst
, TCGv src1
, TCGv src2
, TCGv cin
)
380 TCGv z
= tcg_constant_tl(0);
383 tcg_gen_add2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, cin
, z
);
384 tcg_gen_add2_tl(cpu_cc_N
, cpu_cc_C
, cpu_cc_N
, cpu_cc_C
, src2
, z
);
386 tcg_gen_add2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, src2
, z
);
388 tcg_gen_xor_tl(cpu_cc_Z
, src1
, src2
);
389 tcg_gen_xor_tl(cpu_cc_V
, cpu_cc_N
, src2
);
390 tcg_gen_andc_tl(cpu_cc_V
, cpu_cc_V
, cpu_cc_Z
);
391 if (TARGET_LONG_BITS
== 64) {
393 * Carry-in to bit 32 is result ^ src1 ^ src2.
394 * We already have the src xor term in Z, from computation of V.
396 tcg_gen_xor_tl(cpu_icc_C
, cpu_cc_Z
, cpu_cc_N
);
397 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
399 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
400 tcg_gen_mov_tl(dst
, cpu_cc_N
);
403 static void gen_op_addcc(TCGv dst
, TCGv src1
, TCGv src2
)
405 gen_op_addcc_int(dst
, src1
, src2
, NULL
);
408 static void gen_op_taddcc(TCGv dst
, TCGv src1
, TCGv src2
)
410 TCGv t
= tcg_temp_new();
412 /* Save the tag bits around modification of dst. */
413 tcg_gen_or_tl(t
, src1
, src2
);
415 gen_op_addcc(dst
, src1
, src2
);
417 /* Incorprate tag bits into icc.V */
418 tcg_gen_andi_tl(t
, t
, 3);
419 tcg_gen_neg_tl(t
, t
);
420 tcg_gen_ext32u_tl(t
, t
);
421 tcg_gen_or_tl(cpu_cc_V
, cpu_cc_V
, t
);
424 static void gen_op_addc(TCGv dst
, TCGv src1
, TCGv src2
)
426 tcg_gen_add_tl(dst
, src1
, src2
);
427 tcg_gen_add_tl(dst
, dst
, gen_carry32());
430 static void gen_op_addccc(TCGv dst
, TCGv src1
, TCGv src2
)
432 gen_op_addcc_int(dst
, src1
, src2
, gen_carry32());
435 static void gen_op_subcc_int(TCGv dst
, TCGv src1
, TCGv src2
, TCGv cin
)
437 TCGv z
= tcg_constant_tl(0);
440 tcg_gen_sub2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, cin
, z
);
441 tcg_gen_sub2_tl(cpu_cc_N
, cpu_cc_C
, cpu_cc_N
, cpu_cc_C
, src2
, z
);
443 tcg_gen_sub2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, src2
, z
);
445 tcg_gen_neg_tl(cpu_cc_C
, cpu_cc_C
);
446 tcg_gen_xor_tl(cpu_cc_Z
, src1
, src2
);
447 tcg_gen_xor_tl(cpu_cc_V
, cpu_cc_N
, src1
);
448 tcg_gen_and_tl(cpu_cc_V
, cpu_cc_V
, cpu_cc_Z
);
449 #ifdef TARGET_SPARC64
450 tcg_gen_xor_tl(cpu_icc_C
, cpu_cc_Z
, cpu_cc_N
);
451 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
453 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
454 tcg_gen_mov_tl(dst
, cpu_cc_N
);
457 static void gen_op_subcc(TCGv dst
, TCGv src1
, TCGv src2
)
459 gen_op_subcc_int(dst
, src1
, src2
, NULL
);
462 static void gen_op_tsubcc(TCGv dst
, TCGv src1
, TCGv src2
)
464 TCGv t
= tcg_temp_new();
466 /* Save the tag bits around modification of dst. */
467 tcg_gen_or_tl(t
, src1
, src2
);
469 gen_op_subcc(dst
, src1
, src2
);
471 /* Incorprate tag bits into icc.V */
472 tcg_gen_andi_tl(t
, t
, 3);
473 tcg_gen_neg_tl(t
, t
);
474 tcg_gen_ext32u_tl(t
, t
);
475 tcg_gen_or_tl(cpu_cc_V
, cpu_cc_V
, t
);
478 static void gen_op_subc(TCGv dst
, TCGv src1
, TCGv src2
)
480 tcg_gen_sub_tl(dst
, src1
, src2
);
481 tcg_gen_sub_tl(dst
, dst
, gen_carry32());
484 static void gen_op_subccc(TCGv dst
, TCGv src1
, TCGv src2
)
486 gen_op_subcc_int(dst
, src1
, src2
, gen_carry32());
489 static void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
491 TCGv zero
= tcg_constant_tl(0);
492 TCGv one
= tcg_constant_tl(1);
493 TCGv t_src1
= tcg_temp_new();
494 TCGv t_src2
= tcg_temp_new();
495 TCGv t0
= tcg_temp_new();
497 tcg_gen_ext32u_tl(t_src1
, src1
);
498 tcg_gen_ext32u_tl(t_src2
, src2
);
504 tcg_gen_movcond_tl(TCG_COND_TSTEQ
, t_src2
, cpu_y
, one
, zero
, t_src2
);
508 * y = (b2 << 31) | (y >> 1);
510 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
511 tcg_gen_deposit_tl(cpu_y
, t0
, src1
, 31, 1);
514 tcg_gen_xor_tl(t0
, cpu_cc_N
, cpu_cc_V
);
517 * src1 = (b1 << 31) | (src1 >> 1)
519 tcg_gen_andi_tl(t0
, t0
, 1u << 31);
520 tcg_gen_shri_tl(t_src1
, t_src1
, 1);
521 tcg_gen_or_tl(t_src1
, t_src1
, t0
);
523 gen_op_addcc(dst
, t_src1
, t_src2
);
526 static void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
528 #if TARGET_LONG_BITS == 32
530 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
532 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
535 TCGv t0
= tcg_temp_new_i64();
536 TCGv t1
= tcg_temp_new_i64();
539 tcg_gen_ext32s_i64(t0
, src1
);
540 tcg_gen_ext32s_i64(t1
, src2
);
542 tcg_gen_ext32u_i64(t0
, src1
);
543 tcg_gen_ext32u_i64(t1
, src2
);
546 tcg_gen_mul_i64(dst
, t0
, t1
);
547 tcg_gen_shri_i64(cpu_y
, dst
, 32);
551 static void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
553 /* zero-extend truncated operands before multiplication */
554 gen_op_multiply(dst
, src1
, src2
, 0);
557 static void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
559 /* sign-extend truncated operands before multiplication */
560 gen_op_multiply(dst
, src1
, src2
, 1);
563 static void gen_op_sdiv(TCGv dst
, TCGv src1
, TCGv src2
)
565 #ifdef TARGET_SPARC64
566 gen_helper_sdiv(dst
, tcg_env
, src1
, src2
);
567 tcg_gen_ext32s_tl(dst
, dst
);
569 TCGv_i64 t64
= tcg_temp_new_i64();
570 gen_helper_sdiv(t64
, tcg_env
, src1
, src2
);
571 tcg_gen_trunc_i64_tl(dst
, t64
);
575 static void gen_op_udivcc(TCGv dst
, TCGv src1
, TCGv src2
)
579 #ifdef TARGET_SPARC64
582 t64
= tcg_temp_new_i64();
585 gen_helper_udiv(t64
, tcg_env
, src1
, src2
);
587 #ifdef TARGET_SPARC64
588 tcg_gen_ext32u_tl(cpu_cc_N
, t64
);
589 tcg_gen_shri_tl(cpu_cc_V
, t64
, 32);
590 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
591 tcg_gen_movi_tl(cpu_icc_C
, 0);
593 tcg_gen_extr_i64_tl(cpu_cc_N
, cpu_cc_V
, t64
);
595 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
596 tcg_gen_movi_tl(cpu_cc_C
, 0);
597 tcg_gen_mov_tl(dst
, cpu_cc_N
);
600 static void gen_op_sdivcc(TCGv dst
, TCGv src1
, TCGv src2
)
604 #ifdef TARGET_SPARC64
607 t64
= tcg_temp_new_i64();
610 gen_helper_sdiv(t64
, tcg_env
, src1
, src2
);
612 #ifdef TARGET_SPARC64
613 tcg_gen_ext32s_tl(cpu_cc_N
, t64
);
614 tcg_gen_shri_tl(cpu_cc_V
, t64
, 32);
615 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
616 tcg_gen_movi_tl(cpu_icc_C
, 0);
618 tcg_gen_extr_i64_tl(cpu_cc_N
, cpu_cc_V
, t64
);
620 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
621 tcg_gen_movi_tl(cpu_cc_C
, 0);
622 tcg_gen_mov_tl(dst
, cpu_cc_N
);
625 static void gen_op_taddcctv(TCGv dst
, TCGv src1
, TCGv src2
)
627 gen_helper_taddcctv(dst
, tcg_env
, src1
, src2
);
630 static void gen_op_tsubcctv(TCGv dst
, TCGv src1
, TCGv src2
)
632 gen_helper_tsubcctv(dst
, tcg_env
, src1
, src2
);
635 static void gen_op_popc(TCGv dst
, TCGv src1
, TCGv src2
)
637 tcg_gen_ctpop_tl(dst
, src2
);
640 #ifndef TARGET_SPARC64
641 static void gen_helper_array8(TCGv dst
, TCGv src1
, TCGv src2
)
643 g_assert_not_reached();
647 static void gen_op_array16(TCGv dst
, TCGv src1
, TCGv src2
)
649 gen_helper_array8(dst
, src1
, src2
);
650 tcg_gen_shli_tl(dst
, dst
, 1);
653 static void gen_op_array32(TCGv dst
, TCGv src1
, TCGv src2
)
655 gen_helper_array8(dst
, src1
, src2
);
656 tcg_gen_shli_tl(dst
, dst
, 2);
659 static void gen_op_fpack16(TCGv_i32 dst
, TCGv_i64 src
)
661 #ifdef TARGET_SPARC64
662 gen_helper_fpack16(dst
, cpu_gsr
, src
);
664 g_assert_not_reached();
668 static void gen_op_fpackfix(TCGv_i32 dst
, TCGv_i64 src
)
670 #ifdef TARGET_SPARC64
671 gen_helper_fpackfix(dst
, cpu_gsr
, src
);
673 g_assert_not_reached();
677 static void gen_op_fpack32(TCGv_i64 dst
, TCGv_i64 src1
, TCGv_i64 src2
)
679 #ifdef TARGET_SPARC64
680 gen_helper_fpack32(dst
, cpu_gsr
, src1
, src2
);
682 g_assert_not_reached();
686 static void gen_op_faligndata(TCGv_i64 dst
, TCGv_i64 s1
, TCGv_i64 s2
)
688 #ifdef TARGET_SPARC64
693 shift
= tcg_temp_new();
695 tcg_gen_andi_tl(shift
, cpu_gsr
, 7);
696 tcg_gen_shli_tl(shift
, shift
, 3);
697 tcg_gen_shl_tl(t1
, s1
, shift
);
700 * A shift of 64 does not produce 0 in TCG. Divide this into a
701 * shift of (up to 63) followed by a constant shift of 1.
703 tcg_gen_xori_tl(shift
, shift
, 63);
704 tcg_gen_shr_tl(t2
, s2
, shift
);
705 tcg_gen_shri_tl(t2
, t2
, 1);
707 tcg_gen_or_tl(dst
, t1
, t2
);
709 g_assert_not_reached();
713 static void gen_op_bshuffle(TCGv_i64 dst
, TCGv_i64 src1
, TCGv_i64 src2
)
715 #ifdef TARGET_SPARC64
716 gen_helper_bshuffle(dst
, cpu_gsr
, src1
, src2
);
718 g_assert_not_reached();
722 static void finishing_insn(DisasContext
*dc
)
725 * From here, there is no future path through an unwinding exception.
726 * If the current insn cannot raise an exception, the computation of
727 * cpu_cond may be able to be elided.
729 if (dc
->cpu_cond_live
) {
730 tcg_gen_discard_tl(cpu_cond
);
731 dc
->cpu_cond_live
= false;
735 static void gen_generic_branch(DisasContext
*dc
)
737 TCGv npc0
= tcg_constant_tl(dc
->jump_pc
[0]);
738 TCGv npc1
= tcg_constant_tl(dc
->jump_pc
[1]);
739 TCGv c2
= tcg_constant_tl(dc
->jump
.c2
);
741 tcg_gen_movcond_tl(dc
->jump
.cond
, cpu_npc
, dc
->jump
.c1
, c2
, npc0
, npc1
);
744 /* call this function before using the condition register as it may
745 have been set for a jump */
746 static void flush_cond(DisasContext
*dc
)
748 if (dc
->npc
== JUMP_PC
) {
749 gen_generic_branch(dc
);
750 dc
->npc
= DYNAMIC_PC_LOOKUP
;
754 static void save_npc(DisasContext
*dc
)
759 gen_generic_branch(dc
);
760 dc
->npc
= DYNAMIC_PC_LOOKUP
;
763 case DYNAMIC_PC_LOOKUP
:
766 g_assert_not_reached();
769 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
773 static void save_state(DisasContext
*dc
)
775 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
779 static void gen_exception(DisasContext
*dc
, int which
)
783 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(which
));
784 dc
->base
.is_jmp
= DISAS_NORETURN
;
787 static TCGLabel
*delay_exceptionv(DisasContext
*dc
, TCGv_i32 excp
)
789 DisasDelayException
*e
= g_new0(DisasDelayException
, 1);
791 e
->next
= dc
->delay_excp_list
;
792 dc
->delay_excp_list
= e
;
794 e
->lab
= gen_new_label();
797 /* Caller must have used flush_cond before branch. */
798 assert(e
->npc
!= JUMP_PC
);
804 static TCGLabel
*delay_exception(DisasContext
*dc
, int excp
)
806 return delay_exceptionv(dc
, tcg_constant_i32(excp
));
809 static void gen_check_align(DisasContext
*dc
, TCGv addr
, int mask
)
811 TCGv t
= tcg_temp_new();
814 tcg_gen_andi_tl(t
, addr
, mask
);
817 lab
= delay_exception(dc
, TT_UNALIGNED
);
818 tcg_gen_brcondi_tl(TCG_COND_NE
, t
, 0, lab
);
821 static void gen_mov_pc_npc(DisasContext
*dc
)
828 gen_generic_branch(dc
);
829 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
830 dc
->pc
= DYNAMIC_PC_LOOKUP
;
833 case DYNAMIC_PC_LOOKUP
:
834 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
838 g_assert_not_reached();
845 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
850 cmp
->c1
= t1
= tcg_temp_new();
854 case 0x0: /* never */
855 cmp
->cond
= TCG_COND_NEVER
;
856 cmp
->c1
= tcg_constant_tl(0);
859 case 0x1: /* eq: Z */
860 cmp
->cond
= TCG_COND_EQ
;
861 if (TARGET_LONG_BITS
== 32 || xcc
) {
862 tcg_gen_mov_tl(t1
, cpu_cc_Z
);
864 tcg_gen_ext32u_tl(t1
, cpu_icc_Z
);
868 case 0x2: /* le: Z | (N ^ V) */
871 * cc_Z || (N ^ V) < 0 NE
872 * cc_Z && !((N ^ V) < 0) EQ
873 * cc_Z & ~((N ^ V) >> TLB) EQ
875 cmp
->cond
= TCG_COND_EQ
;
876 tcg_gen_xor_tl(t1
, cpu_cc_N
, cpu_cc_V
);
877 tcg_gen_sextract_tl(t1
, t1
, xcc
? 63 : 31, 1);
878 tcg_gen_andc_tl(t1
, xcc
? cpu_cc_Z
: cpu_icc_Z
, t1
);
879 if (TARGET_LONG_BITS
== 64 && !xcc
) {
880 tcg_gen_ext32u_tl(t1
, t1
);
884 case 0x3: /* lt: N ^ V */
885 cmp
->cond
= TCG_COND_LT
;
886 tcg_gen_xor_tl(t1
, cpu_cc_N
, cpu_cc_V
);
887 if (TARGET_LONG_BITS
== 64 && !xcc
) {
888 tcg_gen_ext32s_tl(t1
, t1
);
892 case 0x4: /* leu: Z | C */
895 * cc_Z == 0 || cc_C != 0 NE
896 * cc_Z != 0 && cc_C == 0 EQ
897 * cc_Z & (cc_C ? 0 : -1) EQ
898 * cc_Z & (cc_C - 1) EQ
900 cmp
->cond
= TCG_COND_EQ
;
901 if (TARGET_LONG_BITS
== 32 || xcc
) {
902 tcg_gen_subi_tl(t1
, cpu_cc_C
, 1);
903 tcg_gen_and_tl(t1
, t1
, cpu_cc_Z
);
905 tcg_gen_extract_tl(t1
, cpu_icc_C
, 32, 1);
906 tcg_gen_subi_tl(t1
, t1
, 1);
907 tcg_gen_and_tl(t1
, t1
, cpu_icc_Z
);
908 tcg_gen_ext32u_tl(t1
, t1
);
912 case 0x5: /* ltu: C */
913 cmp
->cond
= TCG_COND_NE
;
914 if (TARGET_LONG_BITS
== 32 || xcc
) {
915 tcg_gen_mov_tl(t1
, cpu_cc_C
);
917 tcg_gen_extract_tl(t1
, cpu_icc_C
, 32, 1);
921 case 0x6: /* neg: N */
922 cmp
->cond
= TCG_COND_LT
;
923 if (TARGET_LONG_BITS
== 32 || xcc
) {
924 tcg_gen_mov_tl(t1
, cpu_cc_N
);
926 tcg_gen_ext32s_tl(t1
, cpu_cc_N
);
930 case 0x7: /* vs: V */
931 cmp
->cond
= TCG_COND_LT
;
932 if (TARGET_LONG_BITS
== 32 || xcc
) {
933 tcg_gen_mov_tl(t1
, cpu_cc_V
);
935 tcg_gen_ext32s_tl(t1
, cpu_cc_V
);
940 cmp
->cond
= tcg_invert_cond(cmp
->cond
);
944 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
946 TCGv_i32 fcc
= cpu_fcc
[cc
];
960 tcond
= TCG_COND_NEVER
;
962 case 0x1: /* fbne : !0 */
965 case 0x2: /* fblg : 1 or 2 */
966 /* fcc in {1,2} - 1 -> fcc in {0,1} */
967 c1
= tcg_temp_new_i32();
968 tcg_gen_addi_i32(c1
, fcc
, -1);
970 tcond
= TCG_COND_LEU
;
972 case 0x3: /* fbul : 1 or 3 */
973 c1
= tcg_temp_new_i32();
974 tcg_gen_andi_i32(c1
, fcc
, 1);
977 case 0x4: /* fbl : 1 */
981 case 0x5: /* fbug : 2 or 3 */
983 tcond
= TCG_COND_GEU
;
985 case 0x6: /* fbg : 2 */
989 case 0x7: /* fbu : 3 */
995 tcond
= tcg_invert_cond(tcond
);
1000 cmp
->c1
= tcg_temp_new();
1001 tcg_gen_extu_i32_tl(cmp
->c1
, c1
);
1004 static bool gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1006 static const TCGCond cond_reg
[4] = {
1007 TCG_COND_NEVER
, /* reserved */
1014 if ((cond
& 3) == 0) {
1017 tcond
= cond_reg
[cond
& 3];
1019 tcond
= tcg_invert_cond(tcond
);
1023 cmp
->c1
= tcg_temp_new();
1025 tcg_gen_mov_tl(cmp
->c1
, r_src
);
1029 static void gen_op_clear_ieee_excp_and_FTT(void)
1031 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env
,
1032 offsetof(CPUSPARCState
, fsr_cexc_ftt
));
1035 static void gen_op_fmovs(TCGv_i32 dst
, TCGv_i32 src
)
1037 gen_op_clear_ieee_excp_and_FTT();
1038 tcg_gen_mov_i32(dst
, src
);
1041 static void gen_op_fnegs(TCGv_i32 dst
, TCGv_i32 src
)
1043 gen_op_clear_ieee_excp_and_FTT();
1044 tcg_gen_xori_i32(dst
, src
, 1u << 31);
1047 static void gen_op_fabss(TCGv_i32 dst
, TCGv_i32 src
)
1049 gen_op_clear_ieee_excp_and_FTT();
1050 tcg_gen_andi_i32(dst
, src
, ~(1u << 31));
1053 static void gen_op_fmovd(TCGv_i64 dst
, TCGv_i64 src
)
1055 gen_op_clear_ieee_excp_and_FTT();
1056 tcg_gen_mov_i64(dst
, src
);
1059 static void gen_op_fnegd(TCGv_i64 dst
, TCGv_i64 src
)
1061 gen_op_clear_ieee_excp_and_FTT();
1062 tcg_gen_xori_i64(dst
, src
, 1ull << 63);
1065 static void gen_op_fabsd(TCGv_i64 dst
, TCGv_i64 src
)
1067 gen_op_clear_ieee_excp_and_FTT();
1068 tcg_gen_andi_i64(dst
, src
, ~(1ull << 63));
1071 static void gen_op_fnegq(TCGv_i128 dst
, TCGv_i128 src
)
1073 TCGv_i64 l
= tcg_temp_new_i64();
1074 TCGv_i64 h
= tcg_temp_new_i64();
1076 tcg_gen_extr_i128_i64(l
, h
, src
);
1077 tcg_gen_xori_i64(h
, h
, 1ull << 63);
1078 tcg_gen_concat_i64_i128(dst
, l
, h
);
1081 static void gen_op_fabsq(TCGv_i128 dst
, TCGv_i128 src
)
1083 TCGv_i64 l
= tcg_temp_new_i64();
1084 TCGv_i64 h
= tcg_temp_new_i64();
1086 tcg_gen_extr_i128_i64(l
, h
, src
);
1087 tcg_gen_andi_i64(h
, h
, ~(1ull << 63));
1088 tcg_gen_concat_i64_i128(dst
, l
, h
);
1091 static void gen_op_fpexception_im(DisasContext
*dc
, int ftt
)
1094 * CEXC is only set when succesfully completing an FPop,
1095 * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1096 * Thus we can simply store FTT into this field.
1098 tcg_gen_st_i32(tcg_constant_i32(ftt
), tcg_env
,
1099 offsetof(CPUSPARCState
, fsr_cexc_ftt
));
1100 gen_exception(dc
, TT_FP_EXCP
);
1103 static int gen_trap_ifnofpu(DisasContext
*dc
)
1105 #if !defined(CONFIG_USER_ONLY)
1106 if (!dc
->fpu_enabled
) {
1107 gen_exception(dc
, TT_NFPU_INSN
);
1136 * For asi == -1, treat as non-asi.
1137 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1139 static DisasASI
resolve_asi(DisasContext
*dc
, int asi
, MemOp memop
)
1141 ASIType type
= GET_ASI_HELPER
;
1142 int mem_idx
= dc
->mem_idx
;
1145 /* Artificial "non-asi" case. */
1146 type
= GET_ASI_DIRECT
;
1150 #ifndef TARGET_SPARC64
1151 /* Before v9, all asis are immediate and privileged. */
1153 gen_exception(dc
, TT_ILL_INSN
);
1154 type
= GET_ASI_EXCP
;
1155 } else if (supervisor(dc
)
1156 /* Note that LEON accepts ASI_USERDATA in user mode, for
1157 use with CASA. Also note that previous versions of
1158 QEMU allowed (and old versions of gcc emitted) ASI_P
1159 for LEON, which is incorrect. */
1160 || (asi
== ASI_USERDATA
1161 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
1163 case ASI_USERDATA
: /* User data access */
1164 mem_idx
= MMU_USER_IDX
;
1165 type
= GET_ASI_DIRECT
;
1167 case ASI_KERNELDATA
: /* Supervisor data access */
1168 mem_idx
= MMU_KERNEL_IDX
;
1169 type
= GET_ASI_DIRECT
;
1171 case ASI_USERTXT
: /* User text access */
1172 mem_idx
= MMU_USER_IDX
;
1173 type
= GET_ASI_CODE
;
1175 case ASI_KERNELTXT
: /* Supervisor text access */
1176 mem_idx
= MMU_KERNEL_IDX
;
1177 type
= GET_ASI_CODE
;
1179 case ASI_M_BYPASS
: /* MMU passthrough */
1180 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
1181 mem_idx
= MMU_PHYS_IDX
;
1182 type
= GET_ASI_DIRECT
;
1184 case ASI_M_BCOPY
: /* Block copy, sta access */
1185 mem_idx
= MMU_KERNEL_IDX
;
1186 type
= GET_ASI_BCOPY
;
1188 case ASI_M_BFILL
: /* Block fill, stda access */
1189 mem_idx
= MMU_KERNEL_IDX
;
1190 type
= GET_ASI_BFILL
;
1194 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1195 * permissions check in get_physical_address(..).
1197 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
1199 gen_exception(dc
, TT_PRIV_INSN
);
1200 type
= GET_ASI_EXCP
;
1206 /* With v9, all asis below 0x80 are privileged. */
1207 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1208 down that bit into DisasContext. For the moment that's ok,
1209 since the direct implementations below doesn't have any ASIs
1210 in the restricted [0x30, 0x7f] range, and the check will be
1211 done properly in the helper. */
1212 if (!supervisor(dc
) && asi
< 0x80) {
1213 gen_exception(dc
, TT_PRIV_ACT
);
1214 type
= GET_ASI_EXCP
;
1217 case ASI_REAL
: /* Bypass */
1218 case ASI_REAL_IO
: /* Bypass, non-cacheable */
1219 case ASI_REAL_L
: /* Bypass LE */
1220 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
1221 case ASI_TWINX_REAL
: /* Real address, twinx */
1222 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
1223 case ASI_QUAD_LDD_PHYS
:
1224 case ASI_QUAD_LDD_PHYS_L
:
1225 mem_idx
= MMU_PHYS_IDX
;
1227 case ASI_N
: /* Nucleus */
1228 case ASI_NL
: /* Nucleus LE */
1231 case ASI_NUCLEUS_QUAD_LDD
:
1232 case ASI_NUCLEUS_QUAD_LDD_L
:
1233 if (hypervisor(dc
)) {
1234 mem_idx
= MMU_PHYS_IDX
;
1236 mem_idx
= MMU_NUCLEUS_IDX
;
1239 case ASI_AIUP
: /* As if user primary */
1240 case ASI_AIUPL
: /* As if user primary LE */
1241 case ASI_TWINX_AIUP
:
1242 case ASI_TWINX_AIUP_L
:
1243 case ASI_BLK_AIUP_4V
:
1244 case ASI_BLK_AIUP_L_4V
:
1247 mem_idx
= MMU_USER_IDX
;
1249 case ASI_AIUS
: /* As if user secondary */
1250 case ASI_AIUSL
: /* As if user secondary LE */
1251 case ASI_TWINX_AIUS
:
1252 case ASI_TWINX_AIUS_L
:
1253 case ASI_BLK_AIUS_4V
:
1254 case ASI_BLK_AIUS_L_4V
:
1257 mem_idx
= MMU_USER_SECONDARY_IDX
;
1259 case ASI_S
: /* Secondary */
1260 case ASI_SL
: /* Secondary LE */
1263 case ASI_BLK_COMMIT_S
:
1270 if (mem_idx
== MMU_USER_IDX
) {
1271 mem_idx
= MMU_USER_SECONDARY_IDX
;
1272 } else if (mem_idx
== MMU_KERNEL_IDX
) {
1273 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
1276 case ASI_P
: /* Primary */
1277 case ASI_PL
: /* Primary LE */
1280 case ASI_BLK_COMMIT_P
:
1304 type
= GET_ASI_DIRECT
;
1306 case ASI_TWINX_REAL
:
1307 case ASI_TWINX_REAL_L
:
1310 case ASI_TWINX_AIUP
:
1311 case ASI_TWINX_AIUP_L
:
1312 case ASI_TWINX_AIUS
:
1313 case ASI_TWINX_AIUS_L
:
1318 case ASI_QUAD_LDD_PHYS
:
1319 case ASI_QUAD_LDD_PHYS_L
:
1320 case ASI_NUCLEUS_QUAD_LDD
:
1321 case ASI_NUCLEUS_QUAD_LDD_L
:
1322 type
= GET_ASI_DTWINX
;
1324 case ASI_BLK_COMMIT_P
:
1325 case ASI_BLK_COMMIT_S
:
1326 case ASI_BLK_AIUP_4V
:
1327 case ASI_BLK_AIUP_L_4V
:
1330 case ASI_BLK_AIUS_4V
:
1331 case ASI_BLK_AIUS_L_4V
:
1338 type
= GET_ASI_BLOCK
;
1345 type
= GET_ASI_SHORT
;
1352 type
= GET_ASI_SHORT
;
1355 /* The little-endian asis all have bit 3 set. */
1363 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
1366 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1367 static void gen_helper_ld_asi(TCGv_i64 r
, TCGv_env e
, TCGv a
,
1368 TCGv_i32 asi
, TCGv_i32 mop
)
1370 g_assert_not_reached();
1373 static void gen_helper_st_asi(TCGv_env e
, TCGv a
, TCGv_i64 r
,
1374 TCGv_i32 asi
, TCGv_i32 mop
)
1376 g_assert_not_reached();
1380 static void gen_ld_asi(DisasContext
*dc
, DisasASI
*da
, TCGv dst
, TCGv addr
)
1385 case GET_ASI_DTWINX
: /* Reserved for ldda. */
1386 gen_exception(dc
, TT_ILL_INSN
);
1388 case GET_ASI_DIRECT
:
1389 tcg_gen_qemu_ld_tl(dst
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1393 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1395 MemOpIdx oi
= make_memop_idx(da
->memop
, da
->mem_idx
);
1396 TCGv_i64 t64
= tcg_temp_new_i64();
1398 gen_helper_ld_code(t64
, tcg_env
, addr
, tcg_constant_i32(oi
));
1399 tcg_gen_trunc_i64_tl(dst
, t64
);
1403 g_assert_not_reached();
1408 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1409 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
| MO_ALIGN
);
1412 #ifdef TARGET_SPARC64
1413 gen_helper_ld_asi(dst
, tcg_env
, addr
, r_asi
, r_mop
);
1416 TCGv_i64 t64
= tcg_temp_new_i64();
1417 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
1418 tcg_gen_trunc_i64_tl(dst
, t64
);
1426 static void gen_st_asi(DisasContext
*dc
, DisasASI
*da
, TCGv src
, TCGv addr
)
1432 case GET_ASI_DTWINX
: /* Reserved for stda. */
1433 if (TARGET_LONG_BITS
== 32) {
1434 gen_exception(dc
, TT_ILL_INSN
);
1436 } else if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
1437 /* Pre OpenSPARC CPUs don't have these */
1438 gen_exception(dc
, TT_ILL_INSN
);
1441 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1444 case GET_ASI_DIRECT
:
1445 tcg_gen_qemu_st_tl(src
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1449 assert(TARGET_LONG_BITS
== 32);
1451 * Copy 32 bytes from the address in SRC to ADDR.
1453 * From Ross RT625 hyperSPARC manual, section 4.6:
1454 * "Block Copy and Block Fill will work only on cache line boundaries."
1456 * It does not specify if an unaliged address is truncated or trapped.
1457 * Previous qemu behaviour was to truncate to 4 byte alignment, which
1458 * is obviously wrong. The only place I can see this used is in the
1459 * Linux kernel which begins with page alignment, advancing by 32,
1460 * so is always aligned. Assume truncation as the simpler option.
1462 * Since the loads and stores are paired, allow the copy to happen
1463 * in the host endianness. The copy need not be atomic.
1466 MemOp mop
= MO_128
| MO_ATOM_IFALIGN_PAIR
;
1467 TCGv saddr
= tcg_temp_new();
1468 TCGv daddr
= tcg_temp_new();
1469 TCGv_i128 tmp
= tcg_temp_new_i128();
1471 tcg_gen_andi_tl(saddr
, src
, -32);
1472 tcg_gen_andi_tl(daddr
, addr
, -32);
1473 tcg_gen_qemu_ld_i128(tmp
, saddr
, da
->mem_idx
, mop
);
1474 tcg_gen_qemu_st_i128(tmp
, daddr
, da
->mem_idx
, mop
);
1475 tcg_gen_addi_tl(saddr
, saddr
, 16);
1476 tcg_gen_addi_tl(daddr
, daddr
, 16);
1477 tcg_gen_qemu_ld_i128(tmp
, saddr
, da
->mem_idx
, mop
);
1478 tcg_gen_qemu_st_i128(tmp
, daddr
, da
->mem_idx
, mop
);
1484 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1485 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
| MO_ALIGN
);
1488 #ifdef TARGET_SPARC64
1489 gen_helper_st_asi(tcg_env
, addr
, src
, r_asi
, r_mop
);
1492 TCGv_i64 t64
= tcg_temp_new_i64();
1493 tcg_gen_extu_tl_i64(t64
, src
);
1494 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
1498 /* A write to a TLB register may alter page maps. End the TB. */
1499 dc
->npc
= DYNAMIC_PC
;
1505 static void gen_swap_asi(DisasContext
*dc
, DisasASI
*da
,
1506 TCGv dst
, TCGv src
, TCGv addr
)
1511 case GET_ASI_DIRECT
:
1512 tcg_gen_atomic_xchg_tl(dst
, addr
, src
,
1513 da
->mem_idx
, da
->memop
| MO_ALIGN
);
1516 /* ??? Should be DAE_invalid_asi. */
1517 gen_exception(dc
, TT_DATA_ACCESS
);
1522 static void gen_cas_asi(DisasContext
*dc
, DisasASI
*da
,
1523 TCGv oldv
, TCGv newv
, TCGv cmpv
, TCGv addr
)
1528 case GET_ASI_DIRECT
:
1529 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, newv
,
1530 da
->mem_idx
, da
->memop
| MO_ALIGN
);
1533 /* ??? Should be DAE_invalid_asi. */
1534 gen_exception(dc
, TT_DATA_ACCESS
);
1539 static void gen_ldstub_asi(DisasContext
*dc
, DisasASI
*da
, TCGv dst
, TCGv addr
)
1544 case GET_ASI_DIRECT
:
1545 tcg_gen_atomic_xchg_tl(dst
, addr
, tcg_constant_tl(0xff),
1546 da
->mem_idx
, MO_UB
);
1549 /* ??? In theory, this should be raise DAE_invalid_asi.
1550 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1551 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
1552 gen_helper_exit_atomic(tcg_env
);
1554 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1555 TCGv_i32 r_mop
= tcg_constant_i32(MO_UB
);
1559 t64
= tcg_temp_new_i64();
1560 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
1562 s64
= tcg_constant_i64(0xff);
1563 gen_helper_st_asi(tcg_env
, addr
, s64
, r_asi
, r_mop
);
1565 tcg_gen_trunc_i64_tl(dst
, t64
);
1568 dc
->npc
= DYNAMIC_PC
;
1574 static void gen_ldf_asi(DisasContext
*dc
, DisasASI
*da
, MemOp orig_size
,
1577 MemOp memop
= da
->memop
;
1578 MemOp size
= memop
& MO_SIZE
;
1583 /* TODO: Use 128-bit load/store below. */
1584 if (size
== MO_128
) {
1585 memop
= (memop
& ~MO_SIZE
) | MO_64
;
1592 case GET_ASI_DIRECT
:
1593 memop
|= MO_ALIGN_4
;
1596 d32
= tcg_temp_new_i32();
1597 tcg_gen_qemu_ld_i32(d32
, addr
, da
->mem_idx
, memop
);
1598 gen_store_fpr_F(dc
, rd
, d32
);
1602 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
, memop
);
1606 d64
= tcg_temp_new_i64();
1607 tcg_gen_qemu_ld_i64(d64
, addr
, da
->mem_idx
, memop
);
1608 addr_tmp
= tcg_temp_new();
1609 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1610 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + 1], addr_tmp
, da
->mem_idx
, memop
);
1611 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
1614 g_assert_not_reached();
1619 /* Valid for lddfa on aligned registers only. */
1620 if (orig_size
== MO_64
&& (rd
& 7) == 0) {
1621 /* The first operation checks required alignment. */
1622 addr_tmp
= tcg_temp_new();
1623 for (int i
= 0; ; ++i
) {
1624 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
, da
->mem_idx
,
1625 memop
| (i
== 0 ? MO_ALIGN_64
: 0));
1629 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1633 gen_exception(dc
, TT_ILL_INSN
);
1638 /* Valid for lddfa only. */
1639 if (orig_size
== MO_64
) {
1640 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
1643 gen_exception(dc
, TT_ILL_INSN
);
1649 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1650 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
1653 /* According to the table in the UA2011 manual, the only
1654 other asis that are valid for ldfa/lddfa/ldqfa are
1655 the NO_FAULT asis. We still need a helper for these,
1656 but we can just use the integer asi helper for them. */
1659 d64
= tcg_temp_new_i64();
1660 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
1661 d32
= tcg_temp_new_i32();
1662 tcg_gen_extrl_i64_i32(d32
, d64
);
1663 gen_store_fpr_F(dc
, rd
, d32
);
1666 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], tcg_env
, addr
,
1670 d64
= tcg_temp_new_i64();
1671 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
1672 addr_tmp
= tcg_temp_new();
1673 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1674 gen_helper_ld_asi(cpu_fpr
[rd
/ 2 + 1], tcg_env
, addr_tmp
,
1676 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
1679 g_assert_not_reached();
1686 static void gen_stf_asi(DisasContext
*dc
, DisasASI
*da
, MemOp orig_size
,
1689 MemOp memop
= da
->memop
;
1690 MemOp size
= memop
& MO_SIZE
;
1694 /* TODO: Use 128-bit load/store below. */
1695 if (size
== MO_128
) {
1696 memop
= (memop
& ~MO_SIZE
) | MO_64
;
1703 case GET_ASI_DIRECT
:
1704 memop
|= MO_ALIGN_4
;
1707 d32
= gen_load_fpr_F(dc
, rd
);
1708 tcg_gen_qemu_st_i32(d32
, addr
, da
->mem_idx
, memop
| MO_ALIGN
);
1711 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
1712 memop
| MO_ALIGN_4
);
1715 /* Only 4-byte alignment required. However, it is legal for the
1716 cpu to signal the alignment fault, and the OS trap handler is
1717 required to fix it up. Requiring 16-byte alignment here avoids
1718 having to probe the second page before performing the first
1720 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
1721 memop
| MO_ALIGN_16
);
1722 addr_tmp
= tcg_temp_new();
1723 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1724 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + 1], addr_tmp
, da
->mem_idx
, memop
);
1727 g_assert_not_reached();
1732 /* Valid for stdfa on aligned registers only. */
1733 if (orig_size
== MO_64
&& (rd
& 7) == 0) {
1734 /* The first operation checks required alignment. */
1735 addr_tmp
= tcg_temp_new();
1736 for (int i
= 0; ; ++i
) {
1737 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
, da
->mem_idx
,
1738 memop
| (i
== 0 ? MO_ALIGN_64
: 0));
1742 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1746 gen_exception(dc
, TT_ILL_INSN
);
1751 /* Valid for stdfa only. */
1752 if (orig_size
== MO_64
) {
1753 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
1756 gen_exception(dc
, TT_ILL_INSN
);
1761 /* According to the table in the UA2011 manual, the only
1762 other asis that are valid for ldfa/lddfa/ldqfa are
1763 the PST* asis, which aren't currently handled. */
1764 gen_exception(dc
, TT_ILL_INSN
);
1769 static void gen_ldda_asi(DisasContext
*dc
, DisasASI
*da
, TCGv addr
, int rd
)
1771 TCGv hi
= gen_dest_gpr(dc
, rd
);
1772 TCGv lo
= gen_dest_gpr(dc
, rd
+ 1);
1778 case GET_ASI_DTWINX
:
1779 #ifdef TARGET_SPARC64
1781 MemOp mop
= (da
->memop
& MO_BSWAP
) | MO_128
| MO_ALIGN_16
;
1782 TCGv_i128 t
= tcg_temp_new_i128();
1784 tcg_gen_qemu_ld_i128(t
, addr
, da
->mem_idx
, mop
);
1786 * Note that LE twinx acts as if each 64-bit register result is
1787 * byte swapped. We perform one 128-bit LE load, so must swap
1788 * the order of the writebacks.
1790 if ((mop
& MO_BSWAP
) == MO_TE
) {
1791 tcg_gen_extr_i128_i64(lo
, hi
, t
);
1793 tcg_gen_extr_i128_i64(hi
, lo
, t
);
1798 g_assert_not_reached();
1801 case GET_ASI_DIRECT
:
1803 TCGv_i64 tmp
= tcg_temp_new_i64();
1805 tcg_gen_qemu_ld_i64(tmp
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1807 /* Note that LE ldda acts as if each 32-bit register
1808 result is byte swapped. Having just performed one
1809 64-bit bswap, we need now to swap the writebacks. */
1810 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
1811 tcg_gen_extr_i64_tl(lo
, hi
, tmp
);
1813 tcg_gen_extr_i64_tl(hi
, lo
, tmp
);
1819 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1821 MemOpIdx oi
= make_memop_idx(da
->memop
, da
->mem_idx
);
1822 TCGv_i64 tmp
= tcg_temp_new_i64();
1824 gen_helper_ld_code(tmp
, tcg_env
, addr
, tcg_constant_i32(oi
));
1827 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
1828 tcg_gen_extr_i64_tl(lo
, hi
, tmp
);
1830 tcg_gen_extr_i64_tl(hi
, lo
, tmp
);
1835 g_assert_not_reached();
1839 /* ??? In theory we've handled all of the ASIs that are valid
1840 for ldda, and this should raise DAE_invalid_asi. However,
1841 real hardware allows others. This can be seen with e.g.
1842 FreeBSD 10.3 wrt ASI_IC_TAG. */
1844 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1845 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
);
1846 TCGv_i64 tmp
= tcg_temp_new_i64();
1849 gen_helper_ld_asi(tmp
, tcg_env
, addr
, r_asi
, r_mop
);
1852 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
1853 tcg_gen_extr_i64_tl(lo
, hi
, tmp
);
1855 tcg_gen_extr_i64_tl(hi
, lo
, tmp
);
1861 gen_store_gpr(dc
, rd
, hi
);
1862 gen_store_gpr(dc
, rd
+ 1, lo
);
1865 static void gen_stda_asi(DisasContext
*dc
, DisasASI
*da
, TCGv addr
, int rd
)
1867 TCGv hi
= gen_load_gpr(dc
, rd
);
1868 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
1874 case GET_ASI_DTWINX
:
1875 #ifdef TARGET_SPARC64
1877 MemOp mop
= (da
->memop
& MO_BSWAP
) | MO_128
| MO_ALIGN_16
;
1878 TCGv_i128 t
= tcg_temp_new_i128();
1881 * Note that LE twinx acts as if each 64-bit register result is
1882 * byte swapped. We perform one 128-bit LE store, so must swap
1883 * the order of the construction.
1885 if ((mop
& MO_BSWAP
) == MO_TE
) {
1886 tcg_gen_concat_i64_i128(t
, lo
, hi
);
1888 tcg_gen_concat_i64_i128(t
, hi
, lo
);
1890 tcg_gen_qemu_st_i128(t
, addr
, da
->mem_idx
, mop
);
1894 g_assert_not_reached();
1897 case GET_ASI_DIRECT
:
1899 TCGv_i64 t64
= tcg_temp_new_i64();
1901 /* Note that LE stda acts as if each 32-bit register result is
1902 byte swapped. We will perform one 64-bit LE store, so now
1903 we must swap the order of the construction. */
1904 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
1905 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
1907 tcg_gen_concat_tl_i64(t64
, hi
, lo
);
1909 tcg_gen_qemu_st_i64(t64
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1914 assert(TARGET_LONG_BITS
== 32);
1916 * Store 32 bytes of [rd:rd+1] to ADDR.
1917 * See comments for GET_ASI_COPY above.
1920 MemOp mop
= MO_TE
| MO_128
| MO_ATOM_IFALIGN_PAIR
;
1921 TCGv_i64 t8
= tcg_temp_new_i64();
1922 TCGv_i128 t16
= tcg_temp_new_i128();
1923 TCGv daddr
= tcg_temp_new();
1925 tcg_gen_concat_tl_i64(t8
, lo
, hi
);
1926 tcg_gen_concat_i64_i128(t16
, t8
, t8
);
1927 tcg_gen_andi_tl(daddr
, addr
, -32);
1928 tcg_gen_qemu_st_i128(t16
, daddr
, da
->mem_idx
, mop
);
1929 tcg_gen_addi_tl(daddr
, daddr
, 16);
1930 tcg_gen_qemu_st_i128(t16
, daddr
, da
->mem_idx
, mop
);
1935 /* ??? In theory we've handled all of the ASIs that are valid
1936 for stda, and this should raise DAE_invalid_asi. */
1938 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1939 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
);
1940 TCGv_i64 t64
= tcg_temp_new_i64();
1943 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
1944 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
1946 tcg_gen_concat_tl_i64(t64
, hi
, lo
);
1950 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
1956 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
1958 #ifdef TARGET_SPARC64
1959 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
1960 TCGv_i64 c64
= tcg_temp_new_i64();
1962 /* We have two choices here: extend the 32 bit data and use movcond_i64,
1963 or fold the comparison down to 32 bits and use movcond_i32. Choose
1965 c32
= tcg_temp_new_i32();
1966 tcg_gen_setcondi_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
1967 tcg_gen_extrl_i64_i32(c32
, c64
);
1969 s1
= gen_load_fpr_F(dc
, rs
);
1970 s2
= gen_load_fpr_F(dc
, rd
);
1971 dst
= tcg_temp_new_i32();
1972 zero
= tcg_constant_i32(0);
1974 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
1976 gen_store_fpr_F(dc
, rd
, dst
);
1978 qemu_build_not_reached();
1982 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
1984 #ifdef TARGET_SPARC64
1985 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
1986 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, tcg_constant_tl(cmp
->c2
),
1987 gen_load_fpr_D(dc
, rs
),
1988 gen_load_fpr_D(dc
, rd
));
1989 gen_store_fpr_D(dc
, rd
, dst
);
1991 qemu_build_not_reached();
1995 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
1997 #ifdef TARGET_SPARC64
1998 int qd
= QFPREG(rd
);
1999 int qs
= QFPREG(rs
);
2000 TCGv c2
= tcg_constant_tl(cmp
->c2
);
2002 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, c2
,
2003 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2004 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, c2
,
2005 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2007 gen_update_fprs_dirty(dc
, qd
);
2009 qemu_build_not_reached();
2013 #ifdef TARGET_SPARC64
2014 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
)
2016 TCGv_i32 r_tl
= tcg_temp_new_i32();
2018 /* load env->tl into r_tl */
2019 tcg_gen_ld_i32(r_tl
, tcg_env
, offsetof(CPUSPARCState
, tl
));
2021 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2022 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2024 /* calculate offset to current trap state from env->ts, reuse r_tl */
2025 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2026 tcg_gen_addi_ptr(r_tsptr
, tcg_env
, offsetof(CPUSPARCState
, ts
));
2028 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2030 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2031 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2032 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2037 static int extract_dfpreg(DisasContext
*dc
, int x
)
2042 static int extract_qfpreg(DisasContext
*dc
, int x
)
2047 /* Include the auto-generated decoder. */
2048 #include "decode-insns.c.inc"
2050 #define TRANS(NAME, AVAIL, FUNC, ...) \
2051 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2052 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2054 #define avail_ALL(C) true
2055 #ifdef TARGET_SPARC64
2056 # define avail_32(C) false
2057 # define avail_ASR17(C) false
2058 # define avail_CASA(C) true
2059 # define avail_DIV(C) true
2060 # define avail_MUL(C) true
2061 # define avail_POWERDOWN(C) false
2062 # define avail_64(C) true
2063 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2064 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2065 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2066 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2068 # define avail_32(C) true
2069 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2070 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2071 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2072 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2073 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2074 # define avail_64(C) false
2075 # define avail_GL(C) false
2076 # define avail_HYPV(C) false
2077 # define avail_VIS1(C) false
2078 # define avail_VIS2(C) false
2081 /* Default case for non jump instructions. */
2082 static bool advance_pc(DisasContext
*dc
)
2091 case DYNAMIC_PC_LOOKUP
:
2093 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2094 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
2098 /* we can do a static jump */
2099 l1
= gen_new_label();
2100 tcg_gen_brcondi_tl(dc
->jump
.cond
, dc
->jump
.c1
, dc
->jump
.c2
, l1
);
2102 /* jump not taken */
2103 gen_goto_tb(dc
, 1, dc
->jump_pc
[1], dc
->jump_pc
[1] + 4);
2107 gen_goto_tb(dc
, 0, dc
->jump_pc
[0], dc
->jump_pc
[0] + 4);
2109 dc
->base
.is_jmp
= DISAS_NORETURN
;
2113 g_assert_not_reached();
2117 dc
->npc
= dc
->npc
+ 4;
2123 * Major opcodes 00 and 01 -- branches, call, and sethi
2126 static bool advance_jump_cond(DisasContext
*dc
, DisasCompare
*cmp
,
2127 bool annul
, int disp
)
2129 target_ulong dest
= address_mask_i(dc
, dc
->pc
+ disp
* 4);
2134 if (cmp
->cond
== TCG_COND_ALWAYS
) {
2145 if (cmp
->cond
== TCG_COND_NEVER
) {
2150 tcg_gen_addi_tl(cpu_pc
, cpu_pc
, 4);
2152 tcg_gen_addi_tl(cpu_npc
, cpu_pc
, 4);
2154 dc
->pc
= npc
+ (annul
? 4 : 0);
2155 dc
->npc
= dc
->pc
+ 4;
2164 TCGLabel
*l1
= gen_new_label();
2166 tcg_gen_brcondi_tl(tcg_invert_cond(cmp
->cond
), cmp
->c1
, cmp
->c2
, l1
);
2167 gen_goto_tb(dc
, 0, npc
, dest
);
2169 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
2171 dc
->base
.is_jmp
= DISAS_NORETURN
;
2176 case DYNAMIC_PC_LOOKUP
:
2177 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2178 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
2179 tcg_gen_movcond_tl(cmp
->cond
, cpu_npc
,
2180 cmp
->c1
, tcg_constant_tl(cmp
->c2
),
2181 tcg_constant_tl(dest
), cpu_npc
);
2185 g_assert_not_reached();
2191 dc
->jump_pc
[0] = dest
;
2192 dc
->jump_pc
[1] = npc
+ 4;
2194 /* The condition for cpu_cond is always NE -- normalize. */
2195 if (cmp
->cond
== TCG_COND_NE
) {
2196 tcg_gen_xori_tl(cpu_cond
, cmp
->c1
, cmp
->c2
);
2198 tcg_gen_setcondi_tl(cmp
->cond
, cpu_cond
, cmp
->c1
, cmp
->c2
);
2200 dc
->cpu_cond_live
= true;
2206 static bool raise_priv(DisasContext
*dc
)
2208 gen_exception(dc
, TT_PRIV_INSN
);
2212 static bool raise_unimpfpop(DisasContext
*dc
)
2214 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
2218 static bool gen_trap_float128(DisasContext
*dc
)
2220 if (dc
->def
->features
& CPU_FEATURE_FLOAT128
) {
2223 return raise_unimpfpop(dc
);
2226 static bool do_bpcc(DisasContext
*dc
, arg_bcc
*a
)
2230 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
2231 return advance_jump_cond(dc
, &cmp
, a
->a
, a
->i
);
2234 TRANS(Bicc
, ALL
, do_bpcc
, a
)
2235 TRANS(BPcc
, 64, do_bpcc
, a
)
2237 static bool do_fbpfcc(DisasContext
*dc
, arg_bcc
*a
)
2241 if (gen_trap_ifnofpu(dc
)) {
2244 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
2245 return advance_jump_cond(dc
, &cmp
, a
->a
, a
->i
);
2248 TRANS(FBPfcc
, 64, do_fbpfcc
, a
)
2249 TRANS(FBfcc
, ALL
, do_fbpfcc
, a
)
2251 static bool trans_BPr(DisasContext
*dc
, arg_BPr
*a
)
2255 if (!avail_64(dc
)) {
2258 if (!gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
))) {
2261 return advance_jump_cond(dc
, &cmp
, a
->a
, a
->i
);
2264 static bool trans_CALL(DisasContext
*dc
, arg_CALL
*a
)
2266 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
2268 gen_store_gpr(dc
, 15, tcg_constant_tl(dc
->pc
));
2274 static bool trans_NCP(DisasContext
*dc
, arg_NCP
*a
)
2277 * For sparc32, always generate the no-coprocessor exception.
2278 * For sparc64, always generate illegal instruction.
2280 #ifdef TARGET_SPARC64
2283 gen_exception(dc
, TT_NCP_INSN
);
2288 static bool trans_SETHI(DisasContext
*dc
, arg_SETHI
*a
)
2290 /* Special-case %g0 because that's the canonical nop. */
2292 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl((uint32_t)a
->i
<< 10));
2294 return advance_pc(dc
);
2298 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2301 static bool do_tcc(DisasContext
*dc
, int cond
, int cc
,
2302 int rs1
, bool imm
, int rs2_or_imm
)
2304 int mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2305 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2312 return advance_pc(dc
);
2316 * Immediate traps are the most common case. Since this value is
2317 * live across the branch, it really pays to evaluate the constant.
2319 if (rs1
== 0 && (imm
|| rs2_or_imm
== 0)) {
2320 trap
= tcg_constant_i32((rs2_or_imm
& mask
) + TT_TRAP
);
2322 trap
= tcg_temp_new_i32();
2323 tcg_gen_trunc_tl_i32(trap
, gen_load_gpr(dc
, rs1
));
2325 tcg_gen_addi_i32(trap
, trap
, rs2_or_imm
);
2327 TCGv_i32 t2
= tcg_temp_new_i32();
2328 tcg_gen_trunc_tl_i32(t2
, gen_load_gpr(dc
, rs2_or_imm
));
2329 tcg_gen_add_i32(trap
, trap
, t2
);
2331 tcg_gen_andi_i32(trap
, trap
, mask
);
2332 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2340 gen_helper_raise_exception(tcg_env
, trap
);
2341 dc
->base
.is_jmp
= DISAS_NORETURN
;
2345 /* Conditional trap. */
2347 lab
= delay_exceptionv(dc
, trap
);
2348 gen_compare(&cmp
, cc
, cond
, dc
);
2349 tcg_gen_brcondi_tl(cmp
.cond
, cmp
.c1
, cmp
.c2
, lab
);
2351 return advance_pc(dc
);
2354 static bool trans_Tcc_r(DisasContext
*dc
, arg_Tcc_r
*a
)
2356 if (avail_32(dc
) && a
->cc
) {
2359 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, false, a
->rs2
);
2362 static bool trans_Tcc_i_v7(DisasContext
*dc
, arg_Tcc_i_v7
*a
)
2367 return do_tcc(dc
, a
->cond
, 0, a
->rs1
, true, a
->i
);
2370 static bool trans_Tcc_i_v9(DisasContext
*dc
, arg_Tcc_i_v9
*a
)
2375 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, true, a
->i
);
2378 static bool trans_STBAR(DisasContext
*dc
, arg_STBAR
*a
)
2380 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2381 return advance_pc(dc
);
2384 static bool trans_MEMBAR(DisasContext
*dc
, arg_MEMBAR
*a
)
2390 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2391 tcg_gen_mb(a
->mmask
| TCG_BAR_SC
);
2394 /* For #Sync, etc, end the TB to recognize interrupts. */
2395 dc
->base
.is_jmp
= DISAS_EXIT
;
2397 return advance_pc(dc
);
2400 static bool do_rd_special(DisasContext
*dc
, bool priv
, int rd
,
2401 TCGv (*func
)(DisasContext
*, TCGv
))
2404 return raise_priv(dc
);
2406 gen_store_gpr(dc
, rd
, func(dc
, gen_dest_gpr(dc
, rd
)));
2407 return advance_pc(dc
);
2410 static TCGv
do_rdy(DisasContext
*dc
, TCGv dst
)
2415 static bool trans_RDY(DisasContext
*dc
, arg_RDY
*a
)
2418 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2419 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2420 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2422 if (avail_64(dc
) && a
->rs1
!= 0) {
2425 return do_rd_special(dc
, true, a
->rd
, do_rdy
);
2428 static TCGv
do_rd_leon3_config(DisasContext
*dc
, TCGv dst
)
2430 gen_helper_rdasr17(dst
, tcg_env
);
2434 TRANS(RDASR17
, ASR17
, do_rd_special
, true, a
->rd
, do_rd_leon3_config
)
2436 static TCGv
do_rdccr(DisasContext
*dc
, TCGv dst
)
2438 gen_helper_rdccr(dst
, tcg_env
);
2442 TRANS(RDCCR
, 64, do_rd_special
, true, a
->rd
, do_rdccr
)
2444 static TCGv
do_rdasi(DisasContext
*dc
, TCGv dst
)
2446 #ifdef TARGET_SPARC64
2447 return tcg_constant_tl(dc
->asi
);
2449 qemu_build_not_reached();
2453 TRANS(RDASI
, 64, do_rd_special
, true, a
->rd
, do_rdasi
)
2455 static TCGv
do_rdtick(DisasContext
*dc
, TCGv dst
)
2457 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
2459 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
2460 if (translator_io_start(&dc
->base
)) {
2461 dc
->base
.is_jmp
= DISAS_EXIT
;
2463 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
2464 tcg_constant_i32(dc
->mem_idx
));
2468 /* TODO: non-priv access only allowed when enabled. */
2469 TRANS(RDTICK
, 64, do_rd_special
, true, a
->rd
, do_rdtick
)
2471 static TCGv
do_rdpc(DisasContext
*dc
, TCGv dst
)
2473 return tcg_constant_tl(address_mask_i(dc
, dc
->pc
));
2476 TRANS(RDPC
, 64, do_rd_special
, true, a
->rd
, do_rdpc
)
2478 static TCGv
do_rdfprs(DisasContext
*dc
, TCGv dst
)
2480 tcg_gen_ext_i32_tl(dst
, cpu_fprs
);
2484 TRANS(RDFPRS
, 64, do_rd_special
, true, a
->rd
, do_rdfprs
)
2486 static TCGv
do_rdgsr(DisasContext
*dc
, TCGv dst
)
2488 gen_trap_ifnofpu(dc
);
2492 TRANS(RDGSR
, 64, do_rd_special
, true, a
->rd
, do_rdgsr
)
2494 static TCGv
do_rdsoftint(DisasContext
*dc
, TCGv dst
)
2496 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(softint
));
2500 TRANS(RDSOFTINT
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdsoftint
)
2502 static TCGv
do_rdtick_cmpr(DisasContext
*dc
, TCGv dst
)
2504 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(tick_cmpr
));
2508 /* TODO: non-priv access only allowed when enabled. */
2509 TRANS(RDTICK_CMPR
, 64, do_rd_special
, true, a
->rd
, do_rdtick_cmpr
)
2511 static TCGv
do_rdstick(DisasContext
*dc
, TCGv dst
)
2513 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
2515 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
2516 if (translator_io_start(&dc
->base
)) {
2517 dc
->base
.is_jmp
= DISAS_EXIT
;
2519 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
2520 tcg_constant_i32(dc
->mem_idx
));
2524 /* TODO: non-priv access only allowed when enabled. */
2525 TRANS(RDSTICK
, 64, do_rd_special
, true, a
->rd
, do_rdstick
)
2527 static TCGv
do_rdstick_cmpr(DisasContext
*dc
, TCGv dst
)
2529 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(stick_cmpr
));
2533 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2534 TRANS(RDSTICK_CMPR
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdstick_cmpr
)
2537 * UltraSPARC-T1 Strand status.
2538 * HYPV check maybe not enough, UA2005 & UA2007 describe
2539 * this ASR as impl. dep
2541 static TCGv
do_rdstrand_status(DisasContext
*dc
, TCGv dst
)
2543 return tcg_constant_tl(1);
2546 TRANS(RDSTRAND_STATUS
, HYPV
, do_rd_special
, true, a
->rd
, do_rdstrand_status
)
2548 static TCGv
do_rdpsr(DisasContext
*dc
, TCGv dst
)
2550 gen_helper_rdpsr(dst
, tcg_env
);
2554 TRANS(RDPSR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpsr
)
2556 static TCGv
do_rdhpstate(DisasContext
*dc
, TCGv dst
)
2558 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hpstate
));
2562 TRANS(RDHPR_hpstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhpstate
)
2564 static TCGv
do_rdhtstate(DisasContext
*dc
, TCGv dst
)
2566 TCGv_i32 tl
= tcg_temp_new_i32();
2567 TCGv_ptr tp
= tcg_temp_new_ptr();
2569 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
2570 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
2571 tcg_gen_shli_i32(tl
, tl
, 3);
2572 tcg_gen_ext_i32_ptr(tp
, tl
);
2573 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
2575 tcg_gen_ld_tl(dst
, tp
, env64_field_offsetof(htstate
));
2579 TRANS(RDHPR_htstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtstate
)
2581 static TCGv
do_rdhintp(DisasContext
*dc
, TCGv dst
)
2583 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hintp
));
2587 TRANS(RDHPR_hintp
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhintp
)
2589 static TCGv
do_rdhtba(DisasContext
*dc
, TCGv dst
)
2591 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(htba
));
2595 TRANS(RDHPR_htba
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtba
)
2597 static TCGv
do_rdhver(DisasContext
*dc
, TCGv dst
)
2599 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hver
));
2603 TRANS(RDHPR_hver
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhver
)
2605 static TCGv
do_rdhstick_cmpr(DisasContext
*dc
, TCGv dst
)
2607 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
2611 TRANS(RDHPR_hstick_cmpr
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
,
2614 static TCGv
do_rdwim(DisasContext
*dc
, TCGv dst
)
2616 tcg_gen_ld_tl(dst
, tcg_env
, env32_field_offsetof(wim
));
2620 TRANS(RDWIM
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwim
)
2622 static TCGv
do_rdtpc(DisasContext
*dc
, TCGv dst
)
2624 #ifdef TARGET_SPARC64
2625 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2627 gen_load_trap_state_at_tl(r_tsptr
);
2628 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tpc
));
2631 qemu_build_not_reached();
2635 TRANS(RDPR_tpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtpc
)
2637 static TCGv
do_rdtnpc(DisasContext
*dc
, TCGv dst
)
2639 #ifdef TARGET_SPARC64
2640 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2642 gen_load_trap_state_at_tl(r_tsptr
);
2643 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tnpc
));
2646 qemu_build_not_reached();
2650 TRANS(RDPR_tnpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtnpc
)
2652 static TCGv
do_rdtstate(DisasContext
*dc
, TCGv dst
)
2654 #ifdef TARGET_SPARC64
2655 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2657 gen_load_trap_state_at_tl(r_tsptr
);
2658 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tstate
));
2661 qemu_build_not_reached();
2665 TRANS(RDPR_tstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtstate
)
2667 static TCGv
do_rdtt(DisasContext
*dc
, TCGv dst
)
2669 #ifdef TARGET_SPARC64
2670 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2672 gen_load_trap_state_at_tl(r_tsptr
);
2673 tcg_gen_ld32s_tl(dst
, r_tsptr
, offsetof(trap_state
, tt
));
2676 qemu_build_not_reached();
2680 TRANS(RDPR_tt
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtt
)
2681 TRANS(RDPR_tick
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtick
)
2683 static TCGv
do_rdtba(DisasContext
*dc
, TCGv dst
)
2688 TRANS(RDTBR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
2689 TRANS(RDPR_tba
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
2691 static TCGv
do_rdpstate(DisasContext
*dc
, TCGv dst
)
2693 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(pstate
));
2697 TRANS(RDPR_pstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpstate
)
2699 static TCGv
do_rdtl(DisasContext
*dc
, TCGv dst
)
2701 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(tl
));
2705 TRANS(RDPR_tl
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtl
)
2707 static TCGv
do_rdpil(DisasContext
*dc
, TCGv dst
)
2709 tcg_gen_ld32s_tl(dst
, tcg_env
, env_field_offsetof(psrpil
));
2713 TRANS(RDPR_pil
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpil
)
2715 static TCGv
do_rdcwp(DisasContext
*dc
, TCGv dst
)
2717 gen_helper_rdcwp(dst
, tcg_env
);
2721 TRANS(RDPR_cwp
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcwp
)
2723 static TCGv
do_rdcansave(DisasContext
*dc
, TCGv dst
)
2725 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cansave
));
2729 TRANS(RDPR_cansave
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcansave
)
2731 static TCGv
do_rdcanrestore(DisasContext
*dc
, TCGv dst
)
2733 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(canrestore
));
2737 TRANS(RDPR_canrestore
, 64, do_rd_special
, supervisor(dc
), a
->rd
,
2740 static TCGv
do_rdcleanwin(DisasContext
*dc
, TCGv dst
)
2742 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cleanwin
));
2746 TRANS(RDPR_cleanwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcleanwin
)
2748 static TCGv
do_rdotherwin(DisasContext
*dc
, TCGv dst
)
2750 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(otherwin
));
2754 TRANS(RDPR_otherwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdotherwin
)
2756 static TCGv
do_rdwstate(DisasContext
*dc
, TCGv dst
)
2758 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(wstate
));
2762 TRANS(RDPR_wstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwstate
)
2764 static TCGv
do_rdgl(DisasContext
*dc
, TCGv dst
)
2766 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(gl
));
2770 TRANS(RDPR_gl
, GL
, do_rd_special
, supervisor(dc
), a
->rd
, do_rdgl
)
2772 /* UA2005 strand status */
2773 static TCGv
do_rdssr(DisasContext
*dc
, TCGv dst
)
2775 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(ssr
));
2779 TRANS(RDPR_strand_status
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdssr
)
2781 static TCGv
do_rdver(DisasContext
*dc
, TCGv dst
)
2783 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(version
));
2787 TRANS(RDPR_ver
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdver
)
2789 static bool trans_FLUSHW(DisasContext
*dc
, arg_FLUSHW
*a
)
2792 gen_helper_flushw(tcg_env
);
2793 return advance_pc(dc
);
2798 static bool do_wr_special(DisasContext
*dc
, arg_r_r_ri
*a
, bool priv
,
2799 void (*func
)(DisasContext
*, TCGv
))
2803 /* For simplicity, we under-decoded the rs2 form. */
2804 if (!a
->imm
&& (a
->rs2_or_imm
& ~0x1f)) {
2808 return raise_priv(dc
);
2811 if (a
->rs1
== 0 && (a
->imm
|| a
->rs2_or_imm
== 0)) {
2812 src
= tcg_constant_tl(a
->rs2_or_imm
);
2814 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
2815 if (a
->rs2_or_imm
== 0) {
2818 src
= tcg_temp_new();
2820 tcg_gen_xori_tl(src
, src1
, a
->rs2_or_imm
);
2822 tcg_gen_xor_tl(src
, src1
, gen_load_gpr(dc
, a
->rs2_or_imm
));
2827 return advance_pc(dc
);
2830 static void do_wry(DisasContext
*dc
, TCGv src
)
2832 tcg_gen_ext32u_tl(cpu_y
, src
);
2835 TRANS(WRY
, ALL
, do_wr_special
, a
, true, do_wry
)
2837 static void do_wrccr(DisasContext
*dc
, TCGv src
)
2839 gen_helper_wrccr(tcg_env
, src
);
2842 TRANS(WRCCR
, 64, do_wr_special
, a
, true, do_wrccr
)
2844 static void do_wrasi(DisasContext
*dc
, TCGv src
)
2846 TCGv tmp
= tcg_temp_new();
2848 tcg_gen_ext8u_tl(tmp
, src
);
2849 tcg_gen_st32_tl(tmp
, tcg_env
, env64_field_offsetof(asi
));
2850 /* End TB to notice changed ASI. */
2851 dc
->base
.is_jmp
= DISAS_EXIT
;
2854 TRANS(WRASI
, 64, do_wr_special
, a
, true, do_wrasi
)
2856 static void do_wrfprs(DisasContext
*dc
, TCGv src
)
2858 #ifdef TARGET_SPARC64
2859 tcg_gen_trunc_tl_i32(cpu_fprs
, src
);
2861 dc
->base
.is_jmp
= DISAS_EXIT
;
2863 qemu_build_not_reached();
2867 TRANS(WRFPRS
, 64, do_wr_special
, a
, true, do_wrfprs
)
2869 static void do_wrgsr(DisasContext
*dc
, TCGv src
)
2871 gen_trap_ifnofpu(dc
);
2872 tcg_gen_mov_tl(cpu_gsr
, src
);
2875 TRANS(WRGSR
, 64, do_wr_special
, a
, true, do_wrgsr
)
2877 static void do_wrsoftint_set(DisasContext
*dc
, TCGv src
)
2879 gen_helper_set_softint(tcg_env
, src
);
2882 TRANS(WRSOFTINT_SET
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_set
)
2884 static void do_wrsoftint_clr(DisasContext
*dc
, TCGv src
)
2886 gen_helper_clear_softint(tcg_env
, src
);
2889 TRANS(WRSOFTINT_CLR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_clr
)
2891 static void do_wrsoftint(DisasContext
*dc
, TCGv src
)
2893 gen_helper_write_softint(tcg_env
, src
);
2896 TRANS(WRSOFTINT
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint
)
2898 static void do_wrtick_cmpr(DisasContext
*dc
, TCGv src
)
2900 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
2902 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(tick_cmpr
));
2903 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
2904 translator_io_start(&dc
->base
);
2905 gen_helper_tick_set_limit(r_tickptr
, src
);
2906 /* End TB to handle timer interrupt */
2907 dc
->base
.is_jmp
= DISAS_EXIT
;
2910 TRANS(WRTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick_cmpr
)
2912 static void do_wrstick(DisasContext
*dc
, TCGv src
)
2914 #ifdef TARGET_SPARC64
2915 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
2917 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, offsetof(CPUSPARCState
, stick
));
2918 translator_io_start(&dc
->base
);
2919 gen_helper_tick_set_count(r_tickptr
, src
);
2920 /* End TB to handle timer interrupt */
2921 dc
->base
.is_jmp
= DISAS_EXIT
;
2923 qemu_build_not_reached();
2927 TRANS(WRSTICK
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick
)
2929 static void do_wrstick_cmpr(DisasContext
*dc
, TCGv src
)
2931 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
2933 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(stick_cmpr
));
2934 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
2935 translator_io_start(&dc
->base
);
2936 gen_helper_tick_set_limit(r_tickptr
, src
);
2937 /* End TB to handle timer interrupt */
2938 dc
->base
.is_jmp
= DISAS_EXIT
;
2941 TRANS(WRSTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick_cmpr
)
2943 static void do_wrpowerdown(DisasContext
*dc
, TCGv src
)
2947 gen_helper_power_down(tcg_env
);
2950 TRANS(WRPOWERDOWN
, POWERDOWN
, do_wr_special
, a
, supervisor(dc
), do_wrpowerdown
)
2952 static void do_wrpsr(DisasContext
*dc
, TCGv src
)
2954 gen_helper_wrpsr(tcg_env
, src
);
2955 dc
->base
.is_jmp
= DISAS_EXIT
;
2958 TRANS(WRPSR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrpsr
)
2960 static void do_wrwim(DisasContext
*dc
, TCGv src
)
2962 target_ulong mask
= MAKE_64BIT_MASK(0, dc
->def
->nwindows
);
2963 TCGv tmp
= tcg_temp_new();
2965 tcg_gen_andi_tl(tmp
, src
, mask
);
2966 tcg_gen_st_tl(tmp
, tcg_env
, env32_field_offsetof(wim
));
2969 TRANS(WRWIM
, 32, do_wr_special
, a
, supervisor(dc
), do_wrwim
)
2971 static void do_wrtpc(DisasContext
*dc
, TCGv src
)
2973 #ifdef TARGET_SPARC64
2974 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2976 gen_load_trap_state_at_tl(r_tsptr
);
2977 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tpc
));
2979 qemu_build_not_reached();
2983 TRANS(WRPR_tpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtpc
)
2985 static void do_wrtnpc(DisasContext
*dc
, TCGv src
)
2987 #ifdef TARGET_SPARC64
2988 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2990 gen_load_trap_state_at_tl(r_tsptr
);
2991 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tnpc
));
2993 qemu_build_not_reached();
2997 TRANS(WRPR_tnpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtnpc
)
2999 static void do_wrtstate(DisasContext
*dc
, TCGv src
)
3001 #ifdef TARGET_SPARC64
3002 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3004 gen_load_trap_state_at_tl(r_tsptr
);
3005 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tstate
));
3007 qemu_build_not_reached();
3011 TRANS(WRPR_tstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtstate
)
3013 static void do_wrtt(DisasContext
*dc
, TCGv src
)
3015 #ifdef TARGET_SPARC64
3016 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3018 gen_load_trap_state_at_tl(r_tsptr
);
3019 tcg_gen_st32_tl(src
, r_tsptr
, offsetof(trap_state
, tt
));
3021 qemu_build_not_reached();
3025 TRANS(WRPR_tt
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtt
)
3027 static void do_wrtick(DisasContext
*dc
, TCGv src
)
3029 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3031 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3032 translator_io_start(&dc
->base
);
3033 gen_helper_tick_set_count(r_tickptr
, src
);
3034 /* End TB to handle timer interrupt */
3035 dc
->base
.is_jmp
= DISAS_EXIT
;
3038 TRANS(WRPR_tick
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick
)
3040 static void do_wrtba(DisasContext
*dc
, TCGv src
)
3042 tcg_gen_mov_tl(cpu_tbr
, src
);
3045 TRANS(WRPR_tba
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3047 static void do_wrpstate(DisasContext
*dc
, TCGv src
)
3050 if (translator_io_start(&dc
->base
)) {
3051 dc
->base
.is_jmp
= DISAS_EXIT
;
3053 gen_helper_wrpstate(tcg_env
, src
);
3054 dc
->npc
= DYNAMIC_PC
;
3057 TRANS(WRPR_pstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpstate
)
3059 static void do_wrtl(DisasContext
*dc
, TCGv src
)
3062 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(tl
));
3063 dc
->npc
= DYNAMIC_PC
;
3066 TRANS(WRPR_tl
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtl
)
3068 static void do_wrpil(DisasContext
*dc
, TCGv src
)
3070 if (translator_io_start(&dc
->base
)) {
3071 dc
->base
.is_jmp
= DISAS_EXIT
;
3073 gen_helper_wrpil(tcg_env
, src
);
3076 TRANS(WRPR_pil
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpil
)
3078 static void do_wrcwp(DisasContext
*dc
, TCGv src
)
3080 gen_helper_wrcwp(tcg_env
, src
);
3083 TRANS(WRPR_cwp
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcwp
)
3085 static void do_wrcansave(DisasContext
*dc
, TCGv src
)
3087 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cansave
));
3090 TRANS(WRPR_cansave
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcansave
)
3092 static void do_wrcanrestore(DisasContext
*dc
, TCGv src
)
3094 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(canrestore
));
3097 TRANS(WRPR_canrestore
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcanrestore
)
3099 static void do_wrcleanwin(DisasContext
*dc
, TCGv src
)
3101 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cleanwin
));
3104 TRANS(WRPR_cleanwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcleanwin
)
3106 static void do_wrotherwin(DisasContext
*dc
, TCGv src
)
3108 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(otherwin
));
3111 TRANS(WRPR_otherwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrotherwin
)
3113 static void do_wrwstate(DisasContext
*dc
, TCGv src
)
3115 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(wstate
));
3118 TRANS(WRPR_wstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrwstate
)
3120 static void do_wrgl(DisasContext
*dc
, TCGv src
)
3122 gen_helper_wrgl(tcg_env
, src
);
3125 TRANS(WRPR_gl
, GL
, do_wr_special
, a
, supervisor(dc
), do_wrgl
)
3127 /* UA2005 strand status */
3128 static void do_wrssr(DisasContext
*dc
, TCGv src
)
3130 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(ssr
));
3133 TRANS(WRPR_strand_status
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrssr
)
3135 TRANS(WRTBR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3137 static void do_wrhpstate(DisasContext
*dc
, TCGv src
)
3139 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hpstate
));
3140 dc
->base
.is_jmp
= DISAS_EXIT
;
3143 TRANS(WRHPR_hpstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhpstate
)
3145 static void do_wrhtstate(DisasContext
*dc
, TCGv src
)
3147 TCGv_i32 tl
= tcg_temp_new_i32();
3148 TCGv_ptr tp
= tcg_temp_new_ptr();
3150 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
3151 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
3152 tcg_gen_shli_i32(tl
, tl
, 3);
3153 tcg_gen_ext_i32_ptr(tp
, tl
);
3154 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
3156 tcg_gen_st_tl(src
, tp
, env64_field_offsetof(htstate
));
3159 TRANS(WRHPR_htstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtstate
)
3161 static void do_wrhintp(DisasContext
*dc
, TCGv src
)
3163 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hintp
));
3166 TRANS(WRHPR_hintp
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhintp
)
3168 static void do_wrhtba(DisasContext
*dc
, TCGv src
)
3170 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(htba
));
3173 TRANS(WRHPR_htba
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtba
)
3175 static void do_wrhstick_cmpr(DisasContext
*dc
, TCGv src
)
3177 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3179 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
3180 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(hstick
));
3181 translator_io_start(&dc
->base
);
3182 gen_helper_tick_set_limit(r_tickptr
, src
);
3183 /* End TB to handle timer interrupt */
3184 dc
->base
.is_jmp
= DISAS_EXIT
;
3187 TRANS(WRHPR_hstick_cmpr
, HYPV
, do_wr_special
, a
, hypervisor(dc
),
3190 static bool do_saved_restored(DisasContext
*dc
, bool saved
)
3192 if (!supervisor(dc
)) {
3193 return raise_priv(dc
);
3196 gen_helper_saved(tcg_env
);
3198 gen_helper_restored(tcg_env
);
3200 return advance_pc(dc
);
3203 TRANS(SAVED
, 64, do_saved_restored
, true)
3204 TRANS(RESTORED
, 64, do_saved_restored
, false)
3206 static bool trans_NOP(DisasContext
*dc
, arg_NOP
*a
)
3208 return advance_pc(dc
);
3212 * TODO: Need a feature bit for sparcv8.
3213 * In the meantime, treat all 32-bit cpus like sparcv7.
3215 TRANS(NOP_v7
, 32, trans_NOP
, a
)
3216 TRANS(NOP_v9
, 64, trans_NOP
, a
)
3218 static bool do_arith_int(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3219 void (*func
)(TCGv
, TCGv
, TCGv
),
3220 void (*funci
)(TCGv
, TCGv
, target_long
),
3225 /* For simplicity, we under-decoded the rs2 form. */
3226 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3233 dst
= gen_dest_gpr(dc
, a
->rd
);
3235 src1
= gen_load_gpr(dc
, a
->rs1
);
3237 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3239 funci(dst
, src1
, a
->rs2_or_imm
);
3241 func(dst
, src1
, tcg_constant_tl(a
->rs2_or_imm
));
3244 func(dst
, src1
, cpu_regs
[a
->rs2_or_imm
]);
3248 if (TARGET_LONG_BITS
== 64) {
3249 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
3250 tcg_gen_movi_tl(cpu_icc_C
, 0);
3252 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
3253 tcg_gen_movi_tl(cpu_cc_C
, 0);
3254 tcg_gen_movi_tl(cpu_cc_V
, 0);
3257 gen_store_gpr(dc
, a
->rd
, dst
);
3258 return advance_pc(dc
);
3261 static bool do_arith(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3262 void (*func
)(TCGv
, TCGv
, TCGv
),
3263 void (*funci
)(TCGv
, TCGv
, target_long
),
3264 void (*func_cc
)(TCGv
, TCGv
, TCGv
))
3267 return do_arith_int(dc
, a
, func_cc
, NULL
, false);
3269 return do_arith_int(dc
, a
, func
, funci
, false);
3272 static bool do_logic(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3273 void (*func
)(TCGv
, TCGv
, TCGv
),
3274 void (*funci
)(TCGv
, TCGv
, target_long
))
3276 return do_arith_int(dc
, a
, func
, funci
, a
->cc
);
3279 TRANS(ADD
, ALL
, do_arith
, a
, tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_addcc
)
3280 TRANS(SUB
, ALL
, do_arith
, a
, tcg_gen_sub_tl
, tcg_gen_subi_tl
, gen_op_subcc
)
3281 TRANS(ADDC
, ALL
, do_arith
, a
, gen_op_addc
, NULL
, gen_op_addccc
)
3282 TRANS(SUBC
, ALL
, do_arith
, a
, gen_op_subc
, NULL
, gen_op_subccc
)
3284 TRANS(TADDcc
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_taddcc
)
3285 TRANS(TSUBcc
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_tsubcc
)
3286 TRANS(TADDccTV
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_taddcctv
)
3287 TRANS(TSUBccTV
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_tsubcctv
)
3289 TRANS(AND
, ALL
, do_logic
, a
, tcg_gen_and_tl
, tcg_gen_andi_tl
)
3290 TRANS(XOR
, ALL
, do_logic
, a
, tcg_gen_xor_tl
, tcg_gen_xori_tl
)
3291 TRANS(ANDN
, ALL
, do_logic
, a
, tcg_gen_andc_tl
, NULL
)
3292 TRANS(ORN
, ALL
, do_logic
, a
, tcg_gen_orc_tl
, NULL
)
3293 TRANS(XORN
, ALL
, do_logic
, a
, tcg_gen_eqv_tl
, NULL
)
3295 TRANS(MULX
, 64, do_arith
, a
, tcg_gen_mul_tl
, tcg_gen_muli_tl
, NULL
)
3296 TRANS(UMUL
, MUL
, do_logic
, a
, gen_op_umul
, NULL
)
3297 TRANS(SMUL
, MUL
, do_logic
, a
, gen_op_smul
, NULL
)
3298 TRANS(MULScc
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_mulscc
)
3300 TRANS(UDIVcc
, DIV
, do_arith
, a
, NULL
, NULL
, gen_op_udivcc
)
3301 TRANS(SDIV
, DIV
, do_arith
, a
, gen_op_sdiv
, NULL
, gen_op_sdivcc
)
3303 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3304 TRANS(POPC
, 64, do_arith
, a
, gen_op_popc
, NULL
, NULL
)
3306 static bool trans_OR(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
3308 /* OR with %g0 is the canonical alias for MOV. */
3309 if (!a
->cc
&& a
->rs1
== 0) {
3310 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3311 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl(a
->rs2_or_imm
));
3312 } else if (a
->rs2_or_imm
& ~0x1f) {
3313 /* For simplicity, we under-decoded the rs2 form. */
3316 gen_store_gpr(dc
, a
->rd
, cpu_regs
[a
->rs2_or_imm
]);
3318 return advance_pc(dc
);
3320 return do_logic(dc
, a
, tcg_gen_or_tl
, tcg_gen_ori_tl
);
3323 static bool trans_UDIV(DisasContext
*dc
, arg_r_r_ri
*a
)
3328 if (!avail_DIV(dc
)) {
3331 /* For simplicity, we under-decoded the rs2 form. */
3332 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3336 if (unlikely(a
->rs2_or_imm
== 0)) {
3337 gen_exception(dc
, TT_DIV_ZERO
);
3342 t2
= tcg_constant_i64((uint32_t)a
->rs2_or_imm
);
3350 n2
= tcg_temp_new_i32();
3351 tcg_gen_trunc_tl_i32(n2
, cpu_regs
[a
->rs2_or_imm
]);
3353 lab
= delay_exception(dc
, TT_DIV_ZERO
);
3354 tcg_gen_brcondi_i32(TCG_COND_EQ
, n2
, 0, lab
);
3356 t2
= tcg_temp_new_i64();
3357 #ifdef TARGET_SPARC64
3358 tcg_gen_ext32u_i64(t2
, cpu_regs
[a
->rs2_or_imm
]);
3360 tcg_gen_extu_i32_i64(t2
, cpu_regs
[a
->rs2_or_imm
]);
3364 t1
= tcg_temp_new_i64();
3365 tcg_gen_concat_tl_i64(t1
, gen_load_gpr(dc
, a
->rs1
), cpu_y
);
3367 tcg_gen_divu_i64(t1
, t1
, t2
);
3368 tcg_gen_umin_i64(t1
, t1
, tcg_constant_i64(UINT32_MAX
));
3370 dst
= gen_dest_gpr(dc
, a
->rd
);
3371 tcg_gen_trunc_i64_tl(dst
, t1
);
3372 gen_store_gpr(dc
, a
->rd
, dst
);
3373 return advance_pc(dc
);
3376 static bool trans_UDIVX(DisasContext
*dc
, arg_r_r_ri
*a
)
3378 TCGv dst
, src1
, src2
;
3380 if (!avail_64(dc
)) {
3383 /* For simplicity, we under-decoded the rs2 form. */
3384 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3388 if (unlikely(a
->rs2_or_imm
== 0)) {
3389 gen_exception(dc
, TT_DIV_ZERO
);
3394 src2
= tcg_constant_tl(a
->rs2_or_imm
);
3401 lab
= delay_exception(dc
, TT_DIV_ZERO
);
3402 src2
= cpu_regs
[a
->rs2_or_imm
];
3403 tcg_gen_brcondi_tl(TCG_COND_EQ
, src2
, 0, lab
);
3406 dst
= gen_dest_gpr(dc
, a
->rd
);
3407 src1
= gen_load_gpr(dc
, a
->rs1
);
3409 tcg_gen_divu_tl(dst
, src1
, src2
);
3410 gen_store_gpr(dc
, a
->rd
, dst
);
3411 return advance_pc(dc
);
3414 static bool trans_SDIVX(DisasContext
*dc
, arg_r_r_ri
*a
)
3416 TCGv dst
, src1
, src2
;
3418 if (!avail_64(dc
)) {
3421 /* For simplicity, we under-decoded the rs2 form. */
3422 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3426 if (unlikely(a
->rs2_or_imm
== 0)) {
3427 gen_exception(dc
, TT_DIV_ZERO
);
3431 dst
= gen_dest_gpr(dc
, a
->rd
);
3432 src1
= gen_load_gpr(dc
, a
->rs1
);
3435 if (unlikely(a
->rs2_or_imm
== -1)) {
3436 tcg_gen_neg_tl(dst
, src1
);
3437 gen_store_gpr(dc
, a
->rd
, dst
);
3438 return advance_pc(dc
);
3440 src2
= tcg_constant_tl(a
->rs2_or_imm
);
3448 lab
= delay_exception(dc
, TT_DIV_ZERO
);
3449 src2
= cpu_regs
[a
->rs2_or_imm
];
3450 tcg_gen_brcondi_tl(TCG_COND_EQ
, src2
, 0, lab
);
3453 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3454 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3456 t1
= tcg_temp_new();
3457 t2
= tcg_temp_new();
3458 tcg_gen_setcondi_tl(TCG_COND_EQ
, t1
, src1
, (target_long
)INT64_MIN
);
3459 tcg_gen_setcondi_tl(TCG_COND_EQ
, t2
, src2
, -1);
3460 tcg_gen_and_tl(t1
, t1
, t2
);
3461 tcg_gen_movcond_tl(TCG_COND_NE
, t1
, t1
, tcg_constant_tl(0),
3462 tcg_constant_tl(1), src2
);
3466 tcg_gen_div_tl(dst
, src1
, src2
);
3467 gen_store_gpr(dc
, a
->rd
, dst
);
3468 return advance_pc(dc
);
3471 static bool gen_edge(DisasContext
*dc
, arg_r_r_r
*a
,
3472 int width
, bool cc
, bool left
)
3474 TCGv dst
, s1
, s2
, lo1
, lo2
;
3475 uint64_t amask
, tabl
, tabr
;
3476 int shift
, imask
, omask
;
3478 dst
= gen_dest_gpr(dc
, a
->rd
);
3479 s1
= gen_load_gpr(dc
, a
->rs1
);
3480 s2
= gen_load_gpr(dc
, a
->rs2
);
3483 gen_op_subcc(cpu_cc_N
, s1
, s2
);
3487 * Theory of operation: there are two tables, left and right (not to
3488 * be confused with the left and right versions of the opcode). These
3489 * are indexed by the low 3 bits of the inputs. To make things "easy",
3490 * these tables are loaded into two constants, TABL and TABR below.
3491 * The operation index = (input & imask) << shift calculates the index
3492 * into the constant, while val = (table >> index) & omask calculates
3493 * the value we're looking for.
3501 tabl
= 0x80c0e0f0f8fcfeffULL
;
3502 tabr
= 0xff7f3f1f0f070301ULL
;
3504 tabl
= 0x0103070f1f3f7fffULL
;
3505 tabr
= 0xfffefcf8f0e0c080ULL
;
3525 tabl
= (2 << 2) | 3;
3526 tabr
= (3 << 2) | 1;
3528 tabl
= (1 << 2) | 3;
3529 tabr
= (3 << 2) | 2;
3536 lo1
= tcg_temp_new();
3537 lo2
= tcg_temp_new();
3538 tcg_gen_andi_tl(lo1
, s1
, imask
);
3539 tcg_gen_andi_tl(lo2
, s2
, imask
);
3540 tcg_gen_shli_tl(lo1
, lo1
, shift
);
3541 tcg_gen_shli_tl(lo2
, lo2
, shift
);
3543 tcg_gen_shr_tl(lo1
, tcg_constant_tl(tabl
), lo1
);
3544 tcg_gen_shr_tl(lo2
, tcg_constant_tl(tabr
), lo2
);
3545 tcg_gen_andi_tl(lo1
, lo1
, omask
);
3546 tcg_gen_andi_tl(lo2
, lo2
, omask
);
3548 amask
= address_mask_i(dc
, -8);
3549 tcg_gen_andi_tl(s1
, s1
, amask
);
3550 tcg_gen_andi_tl(s2
, s2
, amask
);
3552 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3553 tcg_gen_and_tl(lo2
, lo2
, lo1
);
3554 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, s1
, s2
, lo1
, lo2
);
3556 gen_store_gpr(dc
, a
->rd
, dst
);
3557 return advance_pc(dc
);
3560 TRANS(EDGE8cc
, VIS1
, gen_edge
, a
, 8, 1, 0)
3561 TRANS(EDGE8Lcc
, VIS1
, gen_edge
, a
, 8, 1, 1)
3562 TRANS(EDGE16cc
, VIS1
, gen_edge
, a
, 16, 1, 0)
3563 TRANS(EDGE16Lcc
, VIS1
, gen_edge
, a
, 16, 1, 1)
3564 TRANS(EDGE32cc
, VIS1
, gen_edge
, a
, 32, 1, 0)
3565 TRANS(EDGE32Lcc
, VIS1
, gen_edge
, a
, 32, 1, 1)
3567 TRANS(EDGE8N
, VIS2
, gen_edge
, a
, 8, 0, 0)
3568 TRANS(EDGE8LN
, VIS2
, gen_edge
, a
, 8, 0, 1)
3569 TRANS(EDGE16N
, VIS2
, gen_edge
, a
, 16, 0, 0)
3570 TRANS(EDGE16LN
, VIS2
, gen_edge
, a
, 16, 0, 1)
3571 TRANS(EDGE32N
, VIS2
, gen_edge
, a
, 32, 0, 0)
3572 TRANS(EDGE32LN
, VIS2
, gen_edge
, a
, 32, 0, 1)
3574 static bool do_rrr(DisasContext
*dc
, arg_r_r_r
*a
,
3575 void (*func
)(TCGv
, TCGv
, TCGv
))
3577 TCGv dst
= gen_dest_gpr(dc
, a
->rd
);
3578 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
3579 TCGv src2
= gen_load_gpr(dc
, a
->rs2
);
3581 func(dst
, src1
, src2
);
3582 gen_store_gpr(dc
, a
->rd
, dst
);
3583 return advance_pc(dc
);
3586 TRANS(ARRAY8
, VIS1
, do_rrr
, a
, gen_helper_array8
)
3587 TRANS(ARRAY16
, VIS1
, do_rrr
, a
, gen_op_array16
)
3588 TRANS(ARRAY32
, VIS1
, do_rrr
, a
, gen_op_array32
)
3590 static void gen_op_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
)
3592 #ifdef TARGET_SPARC64
3593 TCGv tmp
= tcg_temp_new();
3595 tcg_gen_add_tl(tmp
, s1
, s2
);
3596 tcg_gen_andi_tl(dst
, tmp
, -8);
3597 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
3599 g_assert_not_reached();
3603 static void gen_op_alignaddrl(TCGv dst
, TCGv s1
, TCGv s2
)
3605 #ifdef TARGET_SPARC64
3606 TCGv tmp
= tcg_temp_new();
3608 tcg_gen_add_tl(tmp
, s1
, s2
);
3609 tcg_gen_andi_tl(dst
, tmp
, -8);
3610 tcg_gen_neg_tl(tmp
, tmp
);
3611 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
3613 g_assert_not_reached();
3617 TRANS(ALIGNADDR
, VIS1
, do_rrr
, a
, gen_op_alignaddr
)
3618 TRANS(ALIGNADDRL
, VIS1
, do_rrr
, a
, gen_op_alignaddrl
)
3620 static void gen_op_bmask(TCGv dst
, TCGv s1
, TCGv s2
)
3622 #ifdef TARGET_SPARC64
3623 tcg_gen_add_tl(dst
, s1
, s2
);
3624 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, dst
, 32, 32);
3626 g_assert_not_reached();
3630 TRANS(BMASK
, VIS2
, do_rrr
, a
, gen_op_bmask
)
3632 static bool do_shift_r(DisasContext
*dc
, arg_shiftr
*a
, bool l
, bool u
)
3634 TCGv dst
, src1
, src2
;
3636 /* Reject 64-bit shifts for sparc32. */
3637 if (avail_32(dc
) && a
->x
) {
3641 src2
= tcg_temp_new();
3642 tcg_gen_andi_tl(src2
, gen_load_gpr(dc
, a
->rs2
), a
->x
? 63 : 31);
3643 src1
= gen_load_gpr(dc
, a
->rs1
);
3644 dst
= gen_dest_gpr(dc
, a
->rd
);
3647 tcg_gen_shl_tl(dst
, src1
, src2
);
3649 tcg_gen_ext32u_tl(dst
, dst
);
3653 tcg_gen_ext32u_tl(dst
, src1
);
3656 tcg_gen_shr_tl(dst
, src1
, src2
);
3659 tcg_gen_ext32s_tl(dst
, src1
);
3662 tcg_gen_sar_tl(dst
, src1
, src2
);
3664 gen_store_gpr(dc
, a
->rd
, dst
);
3665 return advance_pc(dc
);
3668 TRANS(SLL_r
, ALL
, do_shift_r
, a
, true, true)
3669 TRANS(SRL_r
, ALL
, do_shift_r
, a
, false, true)
3670 TRANS(SRA_r
, ALL
, do_shift_r
, a
, false, false)
3672 static bool do_shift_i(DisasContext
*dc
, arg_shifti
*a
, bool l
, bool u
)
3676 /* Reject 64-bit shifts for sparc32. */
3677 if (avail_32(dc
) && (a
->x
|| a
->i
>= 32)) {
3681 src1
= gen_load_gpr(dc
, a
->rs1
);
3682 dst
= gen_dest_gpr(dc
, a
->rd
);
3684 if (avail_32(dc
) || a
->x
) {
3686 tcg_gen_shli_tl(dst
, src1
, a
->i
);
3688 tcg_gen_shri_tl(dst
, src1
, a
->i
);
3690 tcg_gen_sari_tl(dst
, src1
, a
->i
);
3694 tcg_gen_deposit_z_tl(dst
, src1
, a
->i
, 32 - a
->i
);
3696 tcg_gen_extract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
3698 tcg_gen_sextract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
3701 gen_store_gpr(dc
, a
->rd
, dst
);
3702 return advance_pc(dc
);
3705 TRANS(SLL_i
, ALL
, do_shift_i
, a
, true, true)
3706 TRANS(SRL_i
, ALL
, do_shift_i
, a
, false, true)
3707 TRANS(SRA_i
, ALL
, do_shift_i
, a
, false, false)
3709 static TCGv
gen_rs2_or_imm(DisasContext
*dc
, bool imm
, int rs2_or_imm
)
3711 /* For simplicity, we under-decoded the rs2 form. */
3712 if (!imm
&& rs2_or_imm
& ~0x1f) {
3715 if (imm
|| rs2_or_imm
== 0) {
3716 return tcg_constant_tl(rs2_or_imm
);
3718 return cpu_regs
[rs2_or_imm
];
3722 static bool do_mov_cond(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, TCGv src2
)
3724 TCGv dst
= gen_load_gpr(dc
, rd
);
3725 TCGv c2
= tcg_constant_tl(cmp
->c2
);
3727 tcg_gen_movcond_tl(cmp
->cond
, dst
, cmp
->c1
, c2
, src2
, dst
);
3728 gen_store_gpr(dc
, rd
, dst
);
3729 return advance_pc(dc
);
3732 static bool trans_MOVcc(DisasContext
*dc
, arg_MOVcc
*a
)
3734 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
3740 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
3741 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
3744 static bool trans_MOVfcc(DisasContext
*dc
, arg_MOVfcc
*a
)
3746 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
3752 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
3753 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
3756 static bool trans_MOVR(DisasContext
*dc
, arg_MOVR
*a
)
3758 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
3764 if (!gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
))) {
3767 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
3770 static bool do_add_special(DisasContext
*dc
, arg_r_r_ri
*a
,
3771 bool (*func
)(DisasContext
*dc
, int rd
, TCGv src
))
3775 /* For simplicity, we under-decoded the rs2 form. */
3776 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3781 * Always load the sum into a new temporary.
3782 * This is required to capture the value across a window change,
3783 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
3785 sum
= tcg_temp_new();
3786 src1
= gen_load_gpr(dc
, a
->rs1
);
3787 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3788 tcg_gen_addi_tl(sum
, src1
, a
->rs2_or_imm
);
3790 tcg_gen_add_tl(sum
, src1
, cpu_regs
[a
->rs2_or_imm
]);
3792 return func(dc
, a
->rd
, sum
);
3795 static bool do_jmpl(DisasContext
*dc
, int rd
, TCGv src
)
3798 * Preserve pc across advance, so that we can delay
3799 * the writeback to rd until after src is consumed.
3801 target_ulong cur_pc
= dc
->pc
;
3803 gen_check_align(dc
, src
, 3);
3806 tcg_gen_mov_tl(cpu_npc
, src
);
3807 gen_address_mask(dc
, cpu_npc
);
3808 gen_store_gpr(dc
, rd
, tcg_constant_tl(cur_pc
));
3810 dc
->npc
= DYNAMIC_PC_LOOKUP
;
3814 TRANS(JMPL
, ALL
, do_add_special
, a
, do_jmpl
)
3816 static bool do_rett(DisasContext
*dc
, int rd
, TCGv src
)
3818 if (!supervisor(dc
)) {
3819 return raise_priv(dc
);
3822 gen_check_align(dc
, src
, 3);
3825 tcg_gen_mov_tl(cpu_npc
, src
);
3826 gen_helper_rett(tcg_env
);
3828 dc
->npc
= DYNAMIC_PC
;
3832 TRANS(RETT
, 32, do_add_special
, a
, do_rett
)
3834 static bool do_return(DisasContext
*dc
, int rd
, TCGv src
)
3836 gen_check_align(dc
, src
, 3);
3837 gen_helper_restore(tcg_env
);
3840 tcg_gen_mov_tl(cpu_npc
, src
);
3841 gen_address_mask(dc
, cpu_npc
);
3843 dc
->npc
= DYNAMIC_PC_LOOKUP
;
3847 TRANS(RETURN
, 64, do_add_special
, a
, do_return
)
3849 static bool do_save(DisasContext
*dc
, int rd
, TCGv src
)
3851 gen_helper_save(tcg_env
);
3852 gen_store_gpr(dc
, rd
, src
);
3853 return advance_pc(dc
);
3856 TRANS(SAVE
, ALL
, do_add_special
, a
, do_save
)
3858 static bool do_restore(DisasContext
*dc
, int rd
, TCGv src
)
3860 gen_helper_restore(tcg_env
);
3861 gen_store_gpr(dc
, rd
, src
);
3862 return advance_pc(dc
);
3865 TRANS(RESTORE
, ALL
, do_add_special
, a
, do_restore
)
3867 static bool do_done_retry(DisasContext
*dc
, bool done
)
3869 if (!supervisor(dc
)) {
3870 return raise_priv(dc
);
3872 dc
->npc
= DYNAMIC_PC
;
3873 dc
->pc
= DYNAMIC_PC
;
3874 translator_io_start(&dc
->base
);
3876 gen_helper_done(tcg_env
);
3878 gen_helper_retry(tcg_env
);
3883 TRANS(DONE
, 64, do_done_retry
, true)
3884 TRANS(RETRY
, 64, do_done_retry
, false)
3887 * Major opcode 11 -- load and store instructions
3890 static TCGv
gen_ldst_addr(DisasContext
*dc
, int rs1
, bool imm
, int rs2_or_imm
)
3892 TCGv addr
, tmp
= NULL
;
3894 /* For simplicity, we under-decoded the rs2 form. */
3895 if (!imm
&& rs2_or_imm
& ~0x1f) {
3899 addr
= gen_load_gpr(dc
, rs1
);
3901 tmp
= tcg_temp_new();
3903 tcg_gen_addi_tl(tmp
, addr
, rs2_or_imm
);
3905 tcg_gen_add_tl(tmp
, addr
, cpu_regs
[rs2_or_imm
]);
3911 tmp
= tcg_temp_new();
3913 tcg_gen_ext32u_tl(tmp
, addr
);
3919 static bool do_ld_gpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
3921 TCGv reg
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
3927 da
= resolve_asi(dc
, a
->asi
, mop
);
3929 reg
= gen_dest_gpr(dc
, a
->rd
);
3930 gen_ld_asi(dc
, &da
, reg
, addr
);
3931 gen_store_gpr(dc
, a
->rd
, reg
);
3932 return advance_pc(dc
);
3935 TRANS(LDUW
, ALL
, do_ld_gpr
, a
, MO_TEUL
)
3936 TRANS(LDUB
, ALL
, do_ld_gpr
, a
, MO_UB
)
3937 TRANS(LDUH
, ALL
, do_ld_gpr
, a
, MO_TEUW
)
3938 TRANS(LDSB
, ALL
, do_ld_gpr
, a
, MO_SB
)
3939 TRANS(LDSH
, ALL
, do_ld_gpr
, a
, MO_TESW
)
3940 TRANS(LDSW
, 64, do_ld_gpr
, a
, MO_TESL
)
3941 TRANS(LDX
, 64, do_ld_gpr
, a
, MO_TEUQ
)
3943 static bool do_st_gpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
3945 TCGv reg
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
3951 da
= resolve_asi(dc
, a
->asi
, mop
);
3953 reg
= gen_load_gpr(dc
, a
->rd
);
3954 gen_st_asi(dc
, &da
, reg
, addr
);
3955 return advance_pc(dc
);
3958 TRANS(STW
, ALL
, do_st_gpr
, a
, MO_TEUL
)
3959 TRANS(STB
, ALL
, do_st_gpr
, a
, MO_UB
)
3960 TRANS(STH
, ALL
, do_st_gpr
, a
, MO_TEUW
)
3961 TRANS(STX
, 64, do_st_gpr
, a
, MO_TEUQ
)
3963 static bool trans_LDD(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
3971 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
3975 da
= resolve_asi(dc
, a
->asi
, MO_TEUQ
);
3976 gen_ldda_asi(dc
, &da
, addr
, a
->rd
);
3977 return advance_pc(dc
);
3980 static bool trans_STD(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
3988 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
3992 da
= resolve_asi(dc
, a
->asi
, MO_TEUQ
);
3993 gen_stda_asi(dc
, &da
, addr
, a
->rd
);
3994 return advance_pc(dc
);
3997 static bool trans_LDSTUB(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4002 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4006 da
= resolve_asi(dc
, a
->asi
, MO_UB
);
4008 reg
= gen_dest_gpr(dc
, a
->rd
);
4009 gen_ldstub_asi(dc
, &da
, reg
, addr
);
4010 gen_store_gpr(dc
, a
->rd
, reg
);
4011 return advance_pc(dc
);
4014 static bool trans_SWAP(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4016 TCGv addr
, dst
, src
;
4019 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4023 da
= resolve_asi(dc
, a
->asi
, MO_TEUL
);
4025 dst
= gen_dest_gpr(dc
, a
->rd
);
4026 src
= gen_load_gpr(dc
, a
->rd
);
4027 gen_swap_asi(dc
, &da
, dst
, src
, addr
);
4028 gen_store_gpr(dc
, a
->rd
, dst
);
4029 return advance_pc(dc
);
4032 static bool do_casa(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4037 addr
= gen_ldst_addr(dc
, a
->rs1
, true, 0);
4041 da
= resolve_asi(dc
, a
->asi
, mop
);
4043 o
= gen_dest_gpr(dc
, a
->rd
);
4044 n
= gen_load_gpr(dc
, a
->rd
);
4045 c
= gen_load_gpr(dc
, a
->rs2_or_imm
);
4046 gen_cas_asi(dc
, &da
, o
, n
, c
, addr
);
4047 gen_store_gpr(dc
, a
->rd
, o
);
4048 return advance_pc(dc
);
4051 TRANS(CASA
, CASA
, do_casa
, a
, MO_TEUL
)
4052 TRANS(CASXA
, 64, do_casa
, a
, MO_TEUQ
)
4054 static bool do_ld_fpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp sz
)
4056 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4062 if (gen_trap_ifnofpu(dc
)) {
4065 if (sz
== MO_128
&& gen_trap_float128(dc
)) {
4068 da
= resolve_asi(dc
, a
->asi
, MO_TE
| sz
);
4069 gen_ldf_asi(dc
, &da
, sz
, addr
, a
->rd
);
4070 gen_update_fprs_dirty(dc
, a
->rd
);
4071 return advance_pc(dc
);
4074 TRANS(LDF
, ALL
, do_ld_fpr
, a
, MO_32
)
4075 TRANS(LDDF
, ALL
, do_ld_fpr
, a
, MO_64
)
4076 TRANS(LDQF
, ALL
, do_ld_fpr
, a
, MO_128
)
4078 TRANS(LDFA
, 64, do_ld_fpr
, a
, MO_32
)
4079 TRANS(LDDFA
, 64, do_ld_fpr
, a
, MO_64
)
4080 TRANS(LDQFA
, 64, do_ld_fpr
, a
, MO_128
)
4082 static bool do_st_fpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp sz
)
4084 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4090 if (gen_trap_ifnofpu(dc
)) {
4093 if (sz
== MO_128
&& gen_trap_float128(dc
)) {
4096 da
= resolve_asi(dc
, a
->asi
, MO_TE
| sz
);
4097 gen_stf_asi(dc
, &da
, sz
, addr
, a
->rd
);
4098 return advance_pc(dc
);
4101 TRANS(STF
, ALL
, do_st_fpr
, a
, MO_32
)
4102 TRANS(STDF
, ALL
, do_st_fpr
, a
, MO_64
)
4103 TRANS(STQF
, ALL
, do_st_fpr
, a
, MO_128
)
4105 TRANS(STFA
, 64, do_st_fpr
, a
, MO_32
)
4106 TRANS(STDFA
, 64, do_st_fpr
, a
, MO_64
)
4107 TRANS(STQFA
, 64, do_st_fpr
, a
, MO_128
)
4109 static bool trans_STDFQ(DisasContext
*dc
, arg_STDFQ
*a
)
4111 if (!avail_32(dc
)) {
4114 if (!supervisor(dc
)) {
4115 return raise_priv(dc
);
4117 if (gen_trap_ifnofpu(dc
)) {
4120 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
4124 static bool trans_LDFSR(DisasContext
*dc
, arg_r_r_ri
*a
)
4126 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4132 if (gen_trap_ifnofpu(dc
)) {
4136 tmp
= tcg_temp_new_i32();
4137 tcg_gen_qemu_ld_i32(tmp
, addr
, dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
4139 tcg_gen_extract_i32(cpu_fcc
[0], tmp
, FSR_FCC0_SHIFT
, 2);
4140 /* LDFSR does not change FCC[1-3]. */
4142 gen_helper_set_fsr_nofcc_noftt(tcg_env
, tmp
);
4143 return advance_pc(dc
);
4146 static bool trans_LDXFSR(DisasContext
*dc
, arg_r_r_ri
*a
)
4148 #ifdef TARGET_SPARC64
4149 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4156 if (gen_trap_ifnofpu(dc
)) {
4160 t64
= tcg_temp_new_i64();
4161 tcg_gen_qemu_ld_i64(t64
, addr
, dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
4163 lo
= tcg_temp_new_i32();
4165 tcg_gen_extr_i64_i32(lo
, hi
, t64
);
4166 tcg_gen_extract_i32(cpu_fcc
[0], lo
, FSR_FCC0_SHIFT
, 2);
4167 tcg_gen_extract_i32(cpu_fcc
[1], hi
, FSR_FCC1_SHIFT
- 32, 2);
4168 tcg_gen_extract_i32(cpu_fcc
[2], hi
, FSR_FCC2_SHIFT
- 32, 2);
4169 tcg_gen_extract_i32(cpu_fcc
[3], hi
, FSR_FCC3_SHIFT
- 32, 2);
4171 gen_helper_set_fsr_nofcc_noftt(tcg_env
, lo
);
4172 return advance_pc(dc
);
4178 static bool do_stfsr(DisasContext
*dc
, arg_r_r_ri
*a
, MemOp mop
)
4180 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4186 if (gen_trap_ifnofpu(dc
)) {
4190 fsr
= tcg_temp_new();
4191 gen_helper_get_fsr(fsr
, tcg_env
);
4192 tcg_gen_qemu_st_tl(fsr
, addr
, dc
->mem_idx
, mop
| MO_ALIGN
);
4193 return advance_pc(dc
);
4196 TRANS(STFSR
, ALL
, do_stfsr
, a
, MO_TEUL
)
4197 TRANS(STXFSR
, 64, do_stfsr
, a
, MO_TEUQ
)
4199 static bool do_fc(DisasContext
*dc
, int rd
, bool c
)
4203 if (gen_trap_ifnofpu(dc
)) {
4208 mask
= MAKE_64BIT_MASK(0, 32);
4210 mask
= MAKE_64BIT_MASK(32, 32);
4213 tcg_gen_ori_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rd
/ 2], mask
);
4215 tcg_gen_andi_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rd
/ 2], ~mask
);
4217 gen_update_fprs_dirty(dc
, rd
);
4218 return advance_pc(dc
);
4221 TRANS(FZEROs
, VIS1
, do_fc
, a
->rd
, 0)
4222 TRANS(FONEs
, VIS1
, do_fc
, a
->rd
, 1)
4224 static bool do_dc(DisasContext
*dc
, int rd
, int64_t c
)
4226 if (gen_trap_ifnofpu(dc
)) {
4230 tcg_gen_movi_i64(cpu_fpr
[rd
/ 2], c
);
4231 gen_update_fprs_dirty(dc
, rd
);
4232 return advance_pc(dc
);
4235 TRANS(FZEROd
, VIS1
, do_dc
, a
->rd
, 0)
4236 TRANS(FONEd
, VIS1
, do_dc
, a
->rd
, -1)
4238 static bool do_ff(DisasContext
*dc
, arg_r_r
*a
,
4239 void (*func
)(TCGv_i32
, TCGv_i32
))
4243 if (gen_trap_ifnofpu(dc
)) {
4247 tmp
= gen_load_fpr_F(dc
, a
->rs
);
4249 gen_store_fpr_F(dc
, a
->rd
, tmp
);
4250 return advance_pc(dc
);
4253 TRANS(FMOVs
, ALL
, do_ff
, a
, gen_op_fmovs
)
4254 TRANS(FNEGs
, ALL
, do_ff
, a
, gen_op_fnegs
)
4255 TRANS(FABSs
, ALL
, do_ff
, a
, gen_op_fabss
)
4256 TRANS(FSRCs
, VIS1
, do_ff
, a
, tcg_gen_mov_i32
)
4257 TRANS(FNOTs
, VIS1
, do_ff
, a
, tcg_gen_not_i32
)
4259 static bool do_fd(DisasContext
*dc
, arg_r_r
*a
,
4260 void (*func
)(TCGv_i32
, TCGv_i64
))
4265 if (gen_trap_ifnofpu(dc
)) {
4269 dst
= tcg_temp_new_i32();
4270 src
= gen_load_fpr_D(dc
, a
->rs
);
4272 gen_store_fpr_F(dc
, a
->rd
, dst
);
4273 return advance_pc(dc
);
4276 TRANS(FPACK16
, VIS1
, do_fd
, a
, gen_op_fpack16
)
4277 TRANS(FPACKFIX
, VIS1
, do_fd
, a
, gen_op_fpackfix
)
4279 static bool do_env_ff(DisasContext
*dc
, arg_r_r
*a
,
4280 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
4284 if (gen_trap_ifnofpu(dc
)) {
4288 tmp
= gen_load_fpr_F(dc
, a
->rs
);
4289 func(tmp
, tcg_env
, tmp
);
4290 gen_store_fpr_F(dc
, a
->rd
, tmp
);
4291 return advance_pc(dc
);
4294 TRANS(FSQRTs
, ALL
, do_env_ff
, a
, gen_helper_fsqrts
)
4295 TRANS(FiTOs
, ALL
, do_env_ff
, a
, gen_helper_fitos
)
4296 TRANS(FsTOi
, ALL
, do_env_ff
, a
, gen_helper_fstoi
)
4298 static bool do_env_fd(DisasContext
*dc
, arg_r_r
*a
,
4299 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
4304 if (gen_trap_ifnofpu(dc
)) {
4308 dst
= tcg_temp_new_i32();
4309 src
= gen_load_fpr_D(dc
, a
->rs
);
4310 func(dst
, tcg_env
, src
);
4311 gen_store_fpr_F(dc
, a
->rd
, dst
);
4312 return advance_pc(dc
);
4315 TRANS(FdTOs
, ALL
, do_env_fd
, a
, gen_helper_fdtos
)
4316 TRANS(FdTOi
, ALL
, do_env_fd
, a
, gen_helper_fdtoi
)
4317 TRANS(FxTOs
, 64, do_env_fd
, a
, gen_helper_fxtos
)
4319 static bool do_dd(DisasContext
*dc
, arg_r_r
*a
,
4320 void (*func
)(TCGv_i64
, TCGv_i64
))
4324 if (gen_trap_ifnofpu(dc
)) {
4328 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4329 src
= gen_load_fpr_D(dc
, a
->rs
);
4331 gen_store_fpr_D(dc
, a
->rd
, dst
);
4332 return advance_pc(dc
);
4335 TRANS(FMOVd
, 64, do_dd
, a
, gen_op_fmovd
)
4336 TRANS(FNEGd
, 64, do_dd
, a
, gen_op_fnegd
)
4337 TRANS(FABSd
, 64, do_dd
, a
, gen_op_fabsd
)
4338 TRANS(FSRCd
, VIS1
, do_dd
, a
, tcg_gen_mov_i64
)
4339 TRANS(FNOTd
, VIS1
, do_dd
, a
, tcg_gen_not_i64
)
4341 static bool do_env_dd(DisasContext
*dc
, arg_r_r
*a
,
4342 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
4346 if (gen_trap_ifnofpu(dc
)) {
4350 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4351 src
= gen_load_fpr_D(dc
, a
->rs
);
4352 func(dst
, tcg_env
, src
);
4353 gen_store_fpr_D(dc
, a
->rd
, dst
);
4354 return advance_pc(dc
);
4357 TRANS(FSQRTd
, ALL
, do_env_dd
, a
, gen_helper_fsqrtd
)
4358 TRANS(FxTOd
, 64, do_env_dd
, a
, gen_helper_fxtod
)
4359 TRANS(FdTOx
, 64, do_env_dd
, a
, gen_helper_fdtox
)
4361 static bool do_env_df(DisasContext
*dc
, arg_r_r
*a
,
4362 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
4367 if (gen_trap_ifnofpu(dc
)) {
4371 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4372 src
= gen_load_fpr_F(dc
, a
->rs
);
4373 func(dst
, tcg_env
, src
);
4374 gen_store_fpr_D(dc
, a
->rd
, dst
);
4375 return advance_pc(dc
);
4378 TRANS(FiTOd
, ALL
, do_env_df
, a
, gen_helper_fitod
)
4379 TRANS(FsTOd
, ALL
, do_env_df
, a
, gen_helper_fstod
)
4380 TRANS(FsTOx
, 64, do_env_df
, a
, gen_helper_fstox
)
4382 static bool do_qq(DisasContext
*dc
, arg_r_r
*a
,
4383 void (*func
)(TCGv_i128
, TCGv_i128
))
4387 if (gen_trap_ifnofpu(dc
)) {
4390 if (gen_trap_float128(dc
)) {
4394 gen_op_clear_ieee_excp_and_FTT();
4395 t
= gen_load_fpr_Q(dc
, a
->rs
);
4397 gen_store_fpr_Q(dc
, a
->rd
, t
);
4398 return advance_pc(dc
);
4401 TRANS(FMOVq
, 64, do_qq
, a
, tcg_gen_mov_i128
)
4402 TRANS(FNEGq
, 64, do_qq
, a
, gen_op_fnegq
)
4403 TRANS(FABSq
, 64, do_qq
, a
, gen_op_fabsq
)
4405 static bool do_env_qq(DisasContext
*dc
, arg_r_r
*a
,
4406 void (*func
)(TCGv_i128
, TCGv_env
, TCGv_i128
))
4410 if (gen_trap_ifnofpu(dc
)) {
4413 if (gen_trap_float128(dc
)) {
4417 t
= gen_load_fpr_Q(dc
, a
->rs
);
4418 func(t
, tcg_env
, t
);
4419 gen_store_fpr_Q(dc
, a
->rd
, t
);
4420 return advance_pc(dc
);
4423 TRANS(FSQRTq
, ALL
, do_env_qq
, a
, gen_helper_fsqrtq
)
4425 static bool do_env_fq(DisasContext
*dc
, arg_r_r
*a
,
4426 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i128
))
4431 if (gen_trap_ifnofpu(dc
)) {
4434 if (gen_trap_float128(dc
)) {
4438 src
= gen_load_fpr_Q(dc
, a
->rs
);
4439 dst
= tcg_temp_new_i32();
4440 func(dst
, tcg_env
, src
);
4441 gen_store_fpr_F(dc
, a
->rd
, dst
);
4442 return advance_pc(dc
);
4445 TRANS(FqTOs
, ALL
, do_env_fq
, a
, gen_helper_fqtos
)
4446 TRANS(FqTOi
, ALL
, do_env_fq
, a
, gen_helper_fqtoi
)
4448 static bool do_env_dq(DisasContext
*dc
, arg_r_r
*a
,
4449 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i128
))
4454 if (gen_trap_ifnofpu(dc
)) {
4457 if (gen_trap_float128(dc
)) {
4461 src
= gen_load_fpr_Q(dc
, a
->rs
);
4462 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4463 func(dst
, tcg_env
, src
);
4464 gen_store_fpr_D(dc
, a
->rd
, dst
);
4465 return advance_pc(dc
);
4468 TRANS(FqTOd
, ALL
, do_env_dq
, a
, gen_helper_fqtod
)
4469 TRANS(FqTOx
, 64, do_env_dq
, a
, gen_helper_fqtox
)
4471 static bool do_env_qf(DisasContext
*dc
, arg_r_r
*a
,
4472 void (*func
)(TCGv_i128
, TCGv_env
, TCGv_i32
))
4477 if (gen_trap_ifnofpu(dc
)) {
4480 if (gen_trap_float128(dc
)) {
4484 src
= gen_load_fpr_F(dc
, a
->rs
);
4485 dst
= tcg_temp_new_i128();
4486 func(dst
, tcg_env
, src
);
4487 gen_store_fpr_Q(dc
, a
->rd
, dst
);
4488 return advance_pc(dc
);
4491 TRANS(FiTOq
, ALL
, do_env_qf
, a
, gen_helper_fitoq
)
4492 TRANS(FsTOq
, ALL
, do_env_qf
, a
, gen_helper_fstoq
)
4494 static bool do_env_qd(DisasContext
*dc
, arg_r_r
*a
,
4495 void (*func
)(TCGv_i128
, TCGv_env
, TCGv_i64
))
4500 if (gen_trap_ifnofpu(dc
)) {
4503 if (gen_trap_float128(dc
)) {
4507 src
= gen_load_fpr_D(dc
, a
->rs
);
4508 dst
= tcg_temp_new_i128();
4509 func(dst
, tcg_env
, src
);
4510 gen_store_fpr_Q(dc
, a
->rd
, dst
);
4511 return advance_pc(dc
);
4514 TRANS(FdTOq
, ALL
, do_env_qd
, a
, gen_helper_fdtoq
)
4515 TRANS(FxTOq
, 64, do_env_qd
, a
, gen_helper_fxtoq
)
4517 static bool do_fff(DisasContext
*dc
, arg_r_r_r
*a
,
4518 void (*func
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
4520 TCGv_i32 src1
, src2
;
4522 if (gen_trap_ifnofpu(dc
)) {
4526 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4527 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4528 func(src1
, src1
, src2
);
4529 gen_store_fpr_F(dc
, a
->rd
, src1
);
4530 return advance_pc(dc
);
4533 TRANS(FPADD16s
, VIS1
, do_fff
, a
, tcg_gen_vec_add16_i32
)
4534 TRANS(FPADD32s
, VIS1
, do_fff
, a
, tcg_gen_add_i32
)
4535 TRANS(FPSUB16s
, VIS1
, do_fff
, a
, tcg_gen_vec_sub16_i32
)
4536 TRANS(FPSUB32s
, VIS1
, do_fff
, a
, tcg_gen_sub_i32
)
4537 TRANS(FNORs
, VIS1
, do_fff
, a
, tcg_gen_nor_i32
)
4538 TRANS(FANDNOTs
, VIS1
, do_fff
, a
, tcg_gen_andc_i32
)
4539 TRANS(FXORs
, VIS1
, do_fff
, a
, tcg_gen_xor_i32
)
4540 TRANS(FNANDs
, VIS1
, do_fff
, a
, tcg_gen_nand_i32
)
4541 TRANS(FANDs
, VIS1
, do_fff
, a
, tcg_gen_and_i32
)
4542 TRANS(FXNORs
, VIS1
, do_fff
, a
, tcg_gen_eqv_i32
)
4543 TRANS(FORNOTs
, VIS1
, do_fff
, a
, tcg_gen_orc_i32
)
4544 TRANS(FORs
, VIS1
, do_fff
, a
, tcg_gen_or_i32
)
4546 static bool do_env_fff(DisasContext
*dc
, arg_r_r_r
*a
,
4547 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
4549 TCGv_i32 src1
, src2
;
4551 if (gen_trap_ifnofpu(dc
)) {
4555 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4556 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4557 func(src1
, tcg_env
, src1
, src2
);
4558 gen_store_fpr_F(dc
, a
->rd
, src1
);
4559 return advance_pc(dc
);
4562 TRANS(FADDs
, ALL
, do_env_fff
, a
, gen_helper_fadds
)
4563 TRANS(FSUBs
, ALL
, do_env_fff
, a
, gen_helper_fsubs
)
4564 TRANS(FMULs
, ALL
, do_env_fff
, a
, gen_helper_fmuls
)
4565 TRANS(FDIVs
, ALL
, do_env_fff
, a
, gen_helper_fdivs
)
4567 static bool do_ddd(DisasContext
*dc
, arg_r_r_r
*a
,
4568 void (*func
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
4570 TCGv_i64 dst
, src1
, src2
;
4572 if (gen_trap_ifnofpu(dc
)) {
4576 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4577 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4578 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4579 func(dst
, src1
, src2
);
4580 gen_store_fpr_D(dc
, a
->rd
, dst
);
4581 return advance_pc(dc
);
4584 TRANS(FMUL8x16
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16
)
4585 TRANS(FMUL8x16AU
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16au
)
4586 TRANS(FMUL8x16AL
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16al
)
4587 TRANS(FMUL8SUx16
, VIS1
, do_ddd
, a
, gen_helper_fmul8sux16
)
4588 TRANS(FMUL8ULx16
, VIS1
, do_ddd
, a
, gen_helper_fmul8ulx16
)
4589 TRANS(FMULD8SUx16
, VIS1
, do_ddd
, a
, gen_helper_fmuld8sux16
)
4590 TRANS(FMULD8ULx16
, VIS1
, do_ddd
, a
, gen_helper_fmuld8ulx16
)
4591 TRANS(FPMERGE
, VIS1
, do_ddd
, a
, gen_helper_fpmerge
)
4592 TRANS(FEXPAND
, VIS1
, do_ddd
, a
, gen_helper_fexpand
)
4594 TRANS(FPADD16
, VIS1
, do_ddd
, a
, tcg_gen_vec_add16_i64
)
4595 TRANS(FPADD32
, VIS1
, do_ddd
, a
, tcg_gen_vec_add32_i64
)
4596 TRANS(FPSUB16
, VIS1
, do_ddd
, a
, tcg_gen_vec_sub16_i64
)
4597 TRANS(FPSUB32
, VIS1
, do_ddd
, a
, tcg_gen_vec_sub32_i64
)
4598 TRANS(FNORd
, VIS1
, do_ddd
, a
, tcg_gen_nor_i64
)
4599 TRANS(FANDNOTd
, VIS1
, do_ddd
, a
, tcg_gen_andc_i64
)
4600 TRANS(FXORd
, VIS1
, do_ddd
, a
, tcg_gen_xor_i64
)
4601 TRANS(FNANDd
, VIS1
, do_ddd
, a
, tcg_gen_nand_i64
)
4602 TRANS(FANDd
, VIS1
, do_ddd
, a
, tcg_gen_and_i64
)
4603 TRANS(FXNORd
, VIS1
, do_ddd
, a
, tcg_gen_eqv_i64
)
4604 TRANS(FORNOTd
, VIS1
, do_ddd
, a
, tcg_gen_orc_i64
)
4605 TRANS(FORd
, VIS1
, do_ddd
, a
, tcg_gen_or_i64
)
4607 TRANS(FPACK32
, VIS1
, do_ddd
, a
, gen_op_fpack32
)
4608 TRANS(FALIGNDATAg
, VIS1
, do_ddd
, a
, gen_op_faligndata
)
4609 TRANS(BSHUFFLE
, VIS2
, do_ddd
, a
, gen_op_bshuffle
)
4611 static bool do_rdd(DisasContext
*dc
, arg_r_r_r
*a
,
4612 void (*func
)(TCGv
, TCGv_i64
, TCGv_i64
))
4614 TCGv_i64 src1
, src2
;
4617 if (gen_trap_ifnofpu(dc
)) {
4621 dst
= gen_dest_gpr(dc
, a
->rd
);
4622 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4623 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4624 func(dst
, src1
, src2
);
4625 gen_store_gpr(dc
, a
->rd
, dst
);
4626 return advance_pc(dc
);
4629 TRANS(FPCMPLE16
, VIS1
, do_rdd
, a
, gen_helper_fcmple16
)
4630 TRANS(FPCMPNE16
, VIS1
, do_rdd
, a
, gen_helper_fcmpne16
)
4631 TRANS(FPCMPGT16
, VIS1
, do_rdd
, a
, gen_helper_fcmpgt16
)
4632 TRANS(FPCMPEQ16
, VIS1
, do_rdd
, a
, gen_helper_fcmpeq16
)
4634 TRANS(FPCMPLE32
, VIS1
, do_rdd
, a
, gen_helper_fcmple32
)
4635 TRANS(FPCMPNE32
, VIS1
, do_rdd
, a
, gen_helper_fcmpne32
)
4636 TRANS(FPCMPGT32
, VIS1
, do_rdd
, a
, gen_helper_fcmpgt32
)
4637 TRANS(FPCMPEQ32
, VIS1
, do_rdd
, a
, gen_helper_fcmpeq32
)
4639 static bool do_env_ddd(DisasContext
*dc
, arg_r_r_r
*a
,
4640 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
))
4642 TCGv_i64 dst
, src1
, src2
;
4644 if (gen_trap_ifnofpu(dc
)) {
4648 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4649 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4650 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4651 func(dst
, tcg_env
, src1
, src2
);
4652 gen_store_fpr_D(dc
, a
->rd
, dst
);
4653 return advance_pc(dc
);
4656 TRANS(FADDd
, ALL
, do_env_ddd
, a
, gen_helper_faddd
)
4657 TRANS(FSUBd
, ALL
, do_env_ddd
, a
, gen_helper_fsubd
)
4658 TRANS(FMULd
, ALL
, do_env_ddd
, a
, gen_helper_fmuld
)
4659 TRANS(FDIVd
, ALL
, do_env_ddd
, a
, gen_helper_fdivd
)
4661 static bool trans_FsMULd(DisasContext
*dc
, arg_r_r_r
*a
)
4664 TCGv_i32 src1
, src2
;
4666 if (gen_trap_ifnofpu(dc
)) {
4669 if (!(dc
->def
->features
& CPU_FEATURE_FSMULD
)) {
4670 return raise_unimpfpop(dc
);
4673 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4674 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4675 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4676 gen_helper_fsmuld(dst
, tcg_env
, src1
, src2
);
4677 gen_store_fpr_D(dc
, a
->rd
, dst
);
4678 return advance_pc(dc
);
4681 static bool do_dddd(DisasContext
*dc
, arg_r_r_r
*a
,
4682 void (*func
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
4684 TCGv_i64 dst
, src0
, src1
, src2
;
4686 if (gen_trap_ifnofpu(dc
)) {
4690 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4691 src0
= gen_load_fpr_D(dc
, a
->rd
);
4692 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4693 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4694 func(dst
, src0
, src1
, src2
);
4695 gen_store_fpr_D(dc
, a
->rd
, dst
);
4696 return advance_pc(dc
);
4699 TRANS(PDIST
, VIS1
, do_dddd
, a
, gen_helper_pdist
)
4701 static bool do_env_qqq(DisasContext
*dc
, arg_r_r_r
*a
,
4702 void (*func
)(TCGv_i128
, TCGv_env
, TCGv_i128
, TCGv_i128
))
4704 TCGv_i128 src1
, src2
;
4706 if (gen_trap_ifnofpu(dc
)) {
4709 if (gen_trap_float128(dc
)) {
4713 src1
= gen_load_fpr_Q(dc
, a
->rs1
);
4714 src2
= gen_load_fpr_Q(dc
, a
->rs2
);
4715 func(src1
, tcg_env
, src1
, src2
);
4716 gen_store_fpr_Q(dc
, a
->rd
, src1
);
4717 return advance_pc(dc
);
4720 TRANS(FADDq
, ALL
, do_env_qqq
, a
, gen_helper_faddq
)
4721 TRANS(FSUBq
, ALL
, do_env_qqq
, a
, gen_helper_fsubq
)
4722 TRANS(FMULq
, ALL
, do_env_qqq
, a
, gen_helper_fmulq
)
4723 TRANS(FDIVq
, ALL
, do_env_qqq
, a
, gen_helper_fdivq
)
4725 static bool trans_FdMULq(DisasContext
*dc
, arg_r_r_r
*a
)
4727 TCGv_i64 src1
, src2
;
4730 if (gen_trap_ifnofpu(dc
)) {
4733 if (gen_trap_float128(dc
)) {
4737 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4738 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4739 dst
= tcg_temp_new_i128();
4740 gen_helper_fdmulq(dst
, tcg_env
, src1
, src2
);
4741 gen_store_fpr_Q(dc
, a
->rd
, dst
);
4742 return advance_pc(dc
);
4745 static bool do_fmovr(DisasContext
*dc
, arg_FMOVRs
*a
, bool is_128
,
4746 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
4750 if (!gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
))) {
4753 if (gen_trap_ifnofpu(dc
)) {
4756 if (is_128
&& gen_trap_float128(dc
)) {
4760 gen_op_clear_ieee_excp_and_FTT();
4761 func(dc
, &cmp
, a
->rd
, a
->rs2
);
4762 return advance_pc(dc
);
4765 TRANS(FMOVRs
, 64, do_fmovr
, a
, false, gen_fmovs
)
4766 TRANS(FMOVRd
, 64, do_fmovr
, a
, false, gen_fmovd
)
4767 TRANS(FMOVRq
, 64, do_fmovr
, a
, true, gen_fmovq
)
4769 static bool do_fmovcc(DisasContext
*dc
, arg_FMOVscc
*a
, bool is_128
,
4770 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
4774 if (gen_trap_ifnofpu(dc
)) {
4777 if (is_128
&& gen_trap_float128(dc
)) {
4781 gen_op_clear_ieee_excp_and_FTT();
4782 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
4783 func(dc
, &cmp
, a
->rd
, a
->rs2
);
4784 return advance_pc(dc
);
4787 TRANS(FMOVscc
, 64, do_fmovcc
, a
, false, gen_fmovs
)
4788 TRANS(FMOVdcc
, 64, do_fmovcc
, a
, false, gen_fmovd
)
4789 TRANS(FMOVqcc
, 64, do_fmovcc
, a
, true, gen_fmovq
)
4791 static bool do_fmovfcc(DisasContext
*dc
, arg_FMOVsfcc
*a
, bool is_128
,
4792 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
4796 if (gen_trap_ifnofpu(dc
)) {
4799 if (is_128
&& gen_trap_float128(dc
)) {
4803 gen_op_clear_ieee_excp_and_FTT();
4804 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
4805 func(dc
, &cmp
, a
->rd
, a
->rs2
);
4806 return advance_pc(dc
);
4809 TRANS(FMOVsfcc
, 64, do_fmovfcc
, a
, false, gen_fmovs
)
4810 TRANS(FMOVdfcc
, 64, do_fmovfcc
, a
, false, gen_fmovd
)
4811 TRANS(FMOVqfcc
, 64, do_fmovfcc
, a
, true, gen_fmovq
)
4813 static bool do_fcmps(DisasContext
*dc
, arg_FCMPs
*a
, bool e
)
4815 TCGv_i32 src1
, src2
;
4817 if (avail_32(dc
) && a
->cc
!= 0) {
4820 if (gen_trap_ifnofpu(dc
)) {
4824 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4825 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4827 gen_helper_fcmpes(cpu_fcc
[a
->cc
], tcg_env
, src1
, src2
);
4829 gen_helper_fcmps(cpu_fcc
[a
->cc
], tcg_env
, src1
, src2
);
4831 return advance_pc(dc
);
4834 TRANS(FCMPs
, ALL
, do_fcmps
, a
, false)
4835 TRANS(FCMPEs
, ALL
, do_fcmps
, a
, true)
4837 static bool do_fcmpd(DisasContext
*dc
, arg_FCMPd
*a
, bool e
)
4839 TCGv_i64 src1
, src2
;
4841 if (avail_32(dc
) && a
->cc
!= 0) {
4844 if (gen_trap_ifnofpu(dc
)) {
4848 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4849 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4851 gen_helper_fcmped(cpu_fcc
[a
->cc
], tcg_env
, src1
, src2
);
4853 gen_helper_fcmpd(cpu_fcc
[a
->cc
], tcg_env
, src1
, src2
);
4855 return advance_pc(dc
);
4858 TRANS(FCMPd
, ALL
, do_fcmpd
, a
, false)
4859 TRANS(FCMPEd
, ALL
, do_fcmpd
, a
, true)
4861 static bool do_fcmpq(DisasContext
*dc
, arg_FCMPq
*a
, bool e
)
4863 TCGv_i128 src1
, src2
;
4865 if (avail_32(dc
) && a
->cc
!= 0) {
4868 if (gen_trap_ifnofpu(dc
)) {
4871 if (gen_trap_float128(dc
)) {
4875 src1
= gen_load_fpr_Q(dc
, a
->rs1
);
4876 src2
= gen_load_fpr_Q(dc
, a
->rs2
);
4878 gen_helper_fcmpeq(cpu_fcc
[a
->cc
], tcg_env
, src1
, src2
);
4880 gen_helper_fcmpq(cpu_fcc
[a
->cc
], tcg_env
, src1
, src2
);
4882 return advance_pc(dc
);
4885 TRANS(FCMPq
, ALL
, do_fcmpq
, a
, false)
4886 TRANS(FCMPEq
, ALL
, do_fcmpq
, a
, true)
4888 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
4890 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
4893 dc
->pc
= dc
->base
.pc_first
;
4894 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
4895 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
4896 dc
->def
= &cpu_env(cs
)->def
;
4897 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
4898 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
4899 #ifndef CONFIG_USER_ONLY
4900 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
4902 #ifdef TARGET_SPARC64
4904 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
4905 #ifndef CONFIG_USER_ONLY
4906 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
4910 * if we reach a page boundary, we stop generation so that the
4911 * PC of a TT_TFAULT exception is always in the right page
4913 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
4914 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
4917 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
4921 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4923 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
4924 target_ulong npc
= dc
->npc
;
4929 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
4930 npc
= dc
->jump_pc
[0] | JUMP_PC
;
4933 case DYNAMIC_PC_LOOKUP
:
4937 g_assert_not_reached();
4940 tcg_gen_insn_start(dc
->pc
, npc
);
4943 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
4945 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
4948 insn
= translator_ldl(cpu_env(cs
), &dc
->base
, dc
->pc
);
4949 dc
->base
.pc_next
+= 4;
4951 if (!decode(dc
, insn
)) {
4952 gen_exception(dc
, TT_ILL_INSN
);
4955 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
4958 if (dc
->pc
!= dc
->base
.pc_next
) {
4959 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
4963 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
4965 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
4966 DisasDelayException
*e
, *e_next
;
4971 switch (dc
->base
.is_jmp
) {
4973 case DISAS_TOO_MANY
:
4974 if (((dc
->pc
| dc
->npc
) & 3) == 0) {
4975 /* static PC and NPC: we can use direct chaining */
4976 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
4983 case DYNAMIC_PC_LOOKUP
:
4989 g_assert_not_reached();
4992 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
4998 gen_generic_branch(dc
);
5003 case DYNAMIC_PC_LOOKUP
:
5006 g_assert_not_reached();
5009 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
5012 tcg_gen_lookup_and_goto_ptr();
5014 tcg_gen_exit_tb(NULL
, 0);
5018 case DISAS_NORETURN
:
5024 tcg_gen_exit_tb(NULL
, 0);
5028 g_assert_not_reached();
5031 for (e
= dc
->delay_excp_list
; e
; e
= e_next
) {
5032 gen_set_label(e
->lab
);
5034 tcg_gen_movi_tl(cpu_pc
, e
->pc
);
5035 if (e
->npc
% 4 == 0) {
5036 tcg_gen_movi_tl(cpu_npc
, e
->npc
);
5038 gen_helper_raise_exception(tcg_env
, e
->excp
);
5045 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
,
5046 CPUState
*cpu
, FILE *logfile
)
5048 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5049 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5052 static const TranslatorOps sparc_tr_ops
= {
5053 .init_disas_context
= sparc_tr_init_disas_context
,
5054 .tb_start
= sparc_tr_tb_start
,
5055 .insn_start
= sparc_tr_insn_start
,
5056 .translate_insn
= sparc_tr_translate_insn
,
5057 .tb_stop
= sparc_tr_tb_stop
,
5058 .disas_log
= sparc_tr_disas_log
,
5061 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
5062 vaddr pc
, void *host_pc
)
5064 DisasContext dc
= {};
5066 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &sparc_tr_ops
, &dc
.base
);
5069 void sparc_tcg_init(void)
5071 static const char gregnames
[32][4] = {
5072 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5073 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5074 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5075 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5077 static const char fregnames
[32][4] = {
5078 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5079 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5080 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5081 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5084 static const struct { TCGv_i32
*ptr
; int off
; const char *name
; } r32
[] = {
5085 #ifdef TARGET_SPARC64
5086 { &cpu_fprs
, offsetof(CPUSPARCState
, fprs
), "fprs" },
5087 { &cpu_fcc
[0], offsetof(CPUSPARCState
, fcc
[0]), "fcc0" },
5088 { &cpu_fcc
[1], offsetof(CPUSPARCState
, fcc
[1]), "fcc1" },
5089 { &cpu_fcc
[2], offsetof(CPUSPARCState
, fcc
[2]), "fcc2" },
5090 { &cpu_fcc
[3], offsetof(CPUSPARCState
, fcc
[3]), "fcc3" },
5092 { &cpu_fcc
[0], offsetof(CPUSPARCState
, fcc
[0]), "fcc" },
5096 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5097 #ifdef TARGET_SPARC64
5098 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5099 { &cpu_xcc_Z
, offsetof(CPUSPARCState
, xcc_Z
), "xcc_Z" },
5100 { &cpu_xcc_C
, offsetof(CPUSPARCState
, xcc_C
), "xcc_C" },
5102 { &cpu_cc_N
, offsetof(CPUSPARCState
, cc_N
), "cc_N" },
5103 { &cpu_cc_V
, offsetof(CPUSPARCState
, cc_V
), "cc_V" },
5104 { &cpu_icc_Z
, offsetof(CPUSPARCState
, icc_Z
), "icc_Z" },
5105 { &cpu_icc_C
, offsetof(CPUSPARCState
, icc_C
), "icc_C" },
5106 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5107 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5108 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5109 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5110 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5115 cpu_regwptr
= tcg_global_mem_new_ptr(tcg_env
,
5116 offsetof(CPUSPARCState
, regwptr
),
5119 for (i
= 0; i
< ARRAY_SIZE(r32
); ++i
) {
5120 *r32
[i
].ptr
= tcg_global_mem_new_i32(tcg_env
, r32
[i
].off
, r32
[i
].name
);
5123 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5124 *rtl
[i
].ptr
= tcg_global_mem_new(tcg_env
, rtl
[i
].off
, rtl
[i
].name
);
5128 for (i
= 1; i
< 8; ++i
) {
5129 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
5130 offsetof(CPUSPARCState
, gregs
[i
]),
5134 for (i
= 8; i
< 32; ++i
) {
5135 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5136 (i
- 8) * sizeof(target_ulong
),
5140 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5141 cpu_fpr
[i
] = tcg_global_mem_new_i64(tcg_env
,
5142 offsetof(CPUSPARCState
, fpr
[i
]),
5147 void sparc_restore_state_to_opc(CPUState
*cs
,
5148 const TranslationBlock
*tb
,
5149 const uint64_t *data
)
5151 CPUSPARCState
*env
= cpu_env(cs
);
5152 target_ulong pc
= data
[0];
5153 target_ulong npc
= data
[1];
5156 if (npc
== DYNAMIC_PC
) {
5157 /* dynamic NPC: already stored */
5158 } else if (npc
& JUMP_PC
) {
5159 /* jump PC: use 'cond' and the jump targets of the translation */
5161 env
->npc
= npc
& ~3;