target/sparc: Prefer fast cpu_env() over slower CPU QOM cast macro
[qemu/kevin.git] / target / sparc / translate.c
blob319934d9bde29c02fb81c40927509e2d1db8f7bc
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef HELPER_H
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E) qemu_build_not_reached()
41 # define gen_helper_rett(E) qemu_build_not_reached()
42 # define gen_helper_power_down(E) qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
46 # define gen_helper_done(E) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
49 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
50 # define gen_helper_restored(E) qemu_build_not_reached()
51 # define gen_helper_retry(E) qemu_build_not_reached()
52 # define gen_helper_saved(E) qemu_build_not_reached()
53 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
54 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
55 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
56 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
57 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
58 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
59 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
60 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
61 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
62 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
63 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
64 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
87 # define MAXTL_MASK 0
88 #endif
90 /* Dynamic PC, must exit to main loop. */
91 #define DYNAMIC_PC 1
92 /* Dynamic PC, one of two values according to jump_pc[T2]. */
93 #define JUMP_PC 2
94 /* Dynamic PC, may lookup next TB. */
95 #define DYNAMIC_PC_LOOKUP 3
97 #define DISAS_EXIT DISAS_TARGET_0
99 /* global register indexes */
100 static TCGv_ptr cpu_regwptr;
101 static TCGv cpu_pc, cpu_npc;
102 static TCGv cpu_regs[32];
103 static TCGv cpu_y;
104 static TCGv cpu_tbr;
105 static TCGv cpu_cond;
106 static TCGv cpu_cc_N;
107 static TCGv cpu_cc_V;
108 static TCGv cpu_icc_Z;
109 static TCGv cpu_icc_C;
110 #ifdef TARGET_SPARC64
111 static TCGv cpu_xcc_Z;
112 static TCGv cpu_xcc_C;
113 static TCGv_i32 cpu_fprs;
114 static TCGv cpu_gsr;
115 #else
116 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
117 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
118 #endif
120 #ifdef TARGET_SPARC64
121 #define cpu_cc_Z cpu_xcc_Z
122 #define cpu_cc_C cpu_xcc_C
123 #else
124 #define cpu_cc_Z cpu_icc_Z
125 #define cpu_cc_C cpu_icc_C
126 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
127 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
128 #endif
130 /* Floating point registers */
131 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
132 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
134 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
135 #ifdef TARGET_SPARC64
136 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
137 # define env64_field_offsetof(X) env_field_offsetof(X)
138 #else
139 # define env32_field_offsetof(X) env_field_offsetof(X)
140 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
141 #endif
143 typedef struct DisasCompare {
144 TCGCond cond;
145 TCGv c1;
146 int c2;
147 } DisasCompare;
149 typedef struct DisasDelayException {
150 struct DisasDelayException *next;
151 TCGLabel *lab;
152 TCGv_i32 excp;
153 /* Saved state at parent insn. */
154 target_ulong pc;
155 target_ulong npc;
156 } DisasDelayException;
158 typedef struct DisasContext {
159 DisasContextBase base;
160 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
161 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
163 /* Used when JUMP_PC value is used. */
164 DisasCompare jump;
165 target_ulong jump_pc[2];
167 int mem_idx;
168 bool cpu_cond_live;
169 bool fpu_enabled;
170 bool address_mask_32bit;
171 #ifndef CONFIG_USER_ONLY
172 bool supervisor;
173 #ifdef TARGET_SPARC64
174 bool hypervisor;
175 #endif
176 #endif
178 sparc_def_t *def;
179 #ifdef TARGET_SPARC64
180 int fprs_dirty;
181 int asi;
182 #endif
183 DisasDelayException *delay_excp_list;
184 } DisasContext;
186 // This function uses non-native bit order
187 #define GET_FIELD(X, FROM, TO) \
188 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
190 // This function uses the order in the manuals, i.e. bit 0 is 2^0
191 #define GET_FIELD_SP(X, FROM, TO) \
192 GET_FIELD(X, 31 - (TO), 31 - (FROM))
194 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
195 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
197 #ifdef TARGET_SPARC64
198 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
199 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
200 #else
201 #define DFPREG(r) (r & 0x1e)
202 #define QFPREG(r) (r & 0x1c)
203 #endif
205 #define UA2005_HTRAP_MASK 0xff
206 #define V8_TRAP_MASK 0x7f
208 #define IS_IMM (insn & (1<<13))
210 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
212 #if defined(TARGET_SPARC64)
213 int bit = (rd < 32) ? 1 : 2;
214 /* If we know we've already set this bit within the TB,
215 we can avoid setting it again. */
216 if (!(dc->fprs_dirty & bit)) {
217 dc->fprs_dirty |= bit;
218 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
220 #endif
223 /* floating point registers moves */
224 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
226 TCGv_i32 ret = tcg_temp_new_i32();
227 if (src & 1) {
228 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
229 } else {
230 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
232 return ret;
235 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
237 TCGv_i64 t = tcg_temp_new_i64();
239 tcg_gen_extu_i32_i64(t, v);
240 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
241 (dst & 1 ? 0 : 32), 32);
242 gen_update_fprs_dirty(dc, dst);
245 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
247 src = DFPREG(src);
248 return cpu_fpr[src / 2];
251 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
253 dst = DFPREG(dst);
254 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
255 gen_update_fprs_dirty(dc, dst);
258 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
260 return cpu_fpr[DFPREG(dst) / 2];
263 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
265 TCGv_i128 ret = tcg_temp_new_i128();
267 src = QFPREG(src);
268 tcg_gen_concat_i64_i128(ret, cpu_fpr[src / 2 + 1], cpu_fpr[src / 2]);
269 return ret;
272 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
274 dst = DFPREG(dst);
275 tcg_gen_extr_i128_i64(cpu_fpr[dst / 2 + 1], cpu_fpr[dst / 2], v);
276 gen_update_fprs_dirty(dc, dst);
279 /* moves */
280 #ifdef CONFIG_USER_ONLY
281 #define supervisor(dc) 0
282 #define hypervisor(dc) 0
283 #else
284 #ifdef TARGET_SPARC64
285 #define hypervisor(dc) (dc->hypervisor)
286 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
287 #else
288 #define supervisor(dc) (dc->supervisor)
289 #define hypervisor(dc) 0
290 #endif
291 #endif
293 #if !defined(TARGET_SPARC64)
294 # define AM_CHECK(dc) false
295 #elif defined(TARGET_ABI32)
296 # define AM_CHECK(dc) true
297 #elif defined(CONFIG_USER_ONLY)
298 # define AM_CHECK(dc) false
299 #else
300 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
301 #endif
303 static void gen_address_mask(DisasContext *dc, TCGv addr)
305 if (AM_CHECK(dc)) {
306 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
310 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
312 return AM_CHECK(dc) ? (uint32_t)addr : addr;
315 static TCGv gen_load_gpr(DisasContext *dc, int reg)
317 if (reg > 0) {
318 assert(reg < 32);
319 return cpu_regs[reg];
320 } else {
321 TCGv t = tcg_temp_new();
322 tcg_gen_movi_tl(t, 0);
323 return t;
327 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
329 if (reg > 0) {
330 assert(reg < 32);
331 tcg_gen_mov_tl(cpu_regs[reg], v);
335 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
337 if (reg > 0) {
338 assert(reg < 32);
339 return cpu_regs[reg];
340 } else {
341 return tcg_temp_new();
345 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
347 return translator_use_goto_tb(&s->base, pc) &&
348 translator_use_goto_tb(&s->base, npc);
351 static void gen_goto_tb(DisasContext *s, int tb_num,
352 target_ulong pc, target_ulong npc)
354 if (use_goto_tb(s, pc, npc)) {
355 /* jump to same page: we can use a direct jump */
356 tcg_gen_goto_tb(tb_num);
357 tcg_gen_movi_tl(cpu_pc, pc);
358 tcg_gen_movi_tl(cpu_npc, npc);
359 tcg_gen_exit_tb(s->base.tb, tb_num);
360 } else {
361 /* jump to another page: we can use an indirect jump */
362 tcg_gen_movi_tl(cpu_pc, pc);
363 tcg_gen_movi_tl(cpu_npc, npc);
364 tcg_gen_lookup_and_goto_ptr();
368 static TCGv gen_carry32(void)
370 if (TARGET_LONG_BITS == 64) {
371 TCGv t = tcg_temp_new();
372 tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
373 return t;
375 return cpu_icc_C;
378 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
380 TCGv z = tcg_constant_tl(0);
382 if (cin) {
383 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
384 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
385 } else {
386 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
388 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
389 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
390 tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
391 if (TARGET_LONG_BITS == 64) {
393 * Carry-in to bit 32 is result ^ src1 ^ src2.
394 * We already have the src xor term in Z, from computation of V.
396 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
397 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
399 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
400 tcg_gen_mov_tl(dst, cpu_cc_N);
403 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
405 gen_op_addcc_int(dst, src1, src2, NULL);
408 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
410 TCGv t = tcg_temp_new();
412 /* Save the tag bits around modification of dst. */
413 tcg_gen_or_tl(t, src1, src2);
415 gen_op_addcc(dst, src1, src2);
417 /* Incorprate tag bits into icc.V */
418 tcg_gen_andi_tl(t, t, 3);
419 tcg_gen_neg_tl(t, t);
420 tcg_gen_ext32u_tl(t, t);
421 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
424 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
426 tcg_gen_add_tl(dst, src1, src2);
427 tcg_gen_add_tl(dst, dst, gen_carry32());
430 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
432 gen_op_addcc_int(dst, src1, src2, gen_carry32());
435 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
437 TCGv z = tcg_constant_tl(0);
439 if (cin) {
440 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
441 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
442 } else {
443 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
445 tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
446 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
447 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
448 tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
449 #ifdef TARGET_SPARC64
450 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
451 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
452 #endif
453 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
454 tcg_gen_mov_tl(dst, cpu_cc_N);
457 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
459 gen_op_subcc_int(dst, src1, src2, NULL);
462 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
464 TCGv t = tcg_temp_new();
466 /* Save the tag bits around modification of dst. */
467 tcg_gen_or_tl(t, src1, src2);
469 gen_op_subcc(dst, src1, src2);
471 /* Incorprate tag bits into icc.V */
472 tcg_gen_andi_tl(t, t, 3);
473 tcg_gen_neg_tl(t, t);
474 tcg_gen_ext32u_tl(t, t);
475 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
478 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
480 tcg_gen_sub_tl(dst, src1, src2);
481 tcg_gen_sub_tl(dst, dst, gen_carry32());
484 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
486 gen_op_subcc_int(dst, src1, src2, gen_carry32());
489 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
491 TCGv zero = tcg_constant_tl(0);
492 TCGv one = tcg_constant_tl(1);
493 TCGv t_src1 = tcg_temp_new();
494 TCGv t_src2 = tcg_temp_new();
495 TCGv t0 = tcg_temp_new();
497 tcg_gen_ext32u_tl(t_src1, src1);
498 tcg_gen_ext32u_tl(t_src2, src2);
501 * if (!(env->y & 1))
502 * src2 = 0;
504 tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
507 * b2 = src1 & 1;
508 * y = (b2 << 31) | (y >> 1);
510 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
511 tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
513 // b1 = N ^ V;
514 tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
517 * src1 = (b1 << 31) | (src1 >> 1)
519 tcg_gen_andi_tl(t0, t0, 1u << 31);
520 tcg_gen_shri_tl(t_src1, t_src1, 1);
521 tcg_gen_or_tl(t_src1, t_src1, t0);
523 gen_op_addcc(dst, t_src1, t_src2);
526 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
528 #if TARGET_LONG_BITS == 32
529 if (sign_ext) {
530 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
531 } else {
532 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
534 #else
535 TCGv t0 = tcg_temp_new_i64();
536 TCGv t1 = tcg_temp_new_i64();
538 if (sign_ext) {
539 tcg_gen_ext32s_i64(t0, src1);
540 tcg_gen_ext32s_i64(t1, src2);
541 } else {
542 tcg_gen_ext32u_i64(t0, src1);
543 tcg_gen_ext32u_i64(t1, src2);
546 tcg_gen_mul_i64(dst, t0, t1);
547 tcg_gen_shri_i64(cpu_y, dst, 32);
548 #endif
551 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
553 /* zero-extend truncated operands before multiplication */
554 gen_op_multiply(dst, src1, src2, 0);
557 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
559 /* sign-extend truncated operands before multiplication */
560 gen_op_multiply(dst, src1, src2, 1);
563 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
565 #ifdef TARGET_SPARC64
566 gen_helper_sdiv(dst, tcg_env, src1, src2);
567 tcg_gen_ext32s_tl(dst, dst);
568 #else
569 TCGv_i64 t64 = tcg_temp_new_i64();
570 gen_helper_sdiv(t64, tcg_env, src1, src2);
571 tcg_gen_trunc_i64_tl(dst, t64);
572 #endif
575 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
577 TCGv_i64 t64;
579 #ifdef TARGET_SPARC64
580 t64 = cpu_cc_V;
581 #else
582 t64 = tcg_temp_new_i64();
583 #endif
585 gen_helper_udiv(t64, tcg_env, src1, src2);
587 #ifdef TARGET_SPARC64
588 tcg_gen_ext32u_tl(cpu_cc_N, t64);
589 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
590 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
591 tcg_gen_movi_tl(cpu_icc_C, 0);
592 #else
593 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
594 #endif
595 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
596 tcg_gen_movi_tl(cpu_cc_C, 0);
597 tcg_gen_mov_tl(dst, cpu_cc_N);
600 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
602 TCGv_i64 t64;
604 #ifdef TARGET_SPARC64
605 t64 = cpu_cc_V;
606 #else
607 t64 = tcg_temp_new_i64();
608 #endif
610 gen_helper_sdiv(t64, tcg_env, src1, src2);
612 #ifdef TARGET_SPARC64
613 tcg_gen_ext32s_tl(cpu_cc_N, t64);
614 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
615 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
616 tcg_gen_movi_tl(cpu_icc_C, 0);
617 #else
618 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
619 #endif
620 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
621 tcg_gen_movi_tl(cpu_cc_C, 0);
622 tcg_gen_mov_tl(dst, cpu_cc_N);
625 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
627 gen_helper_taddcctv(dst, tcg_env, src1, src2);
630 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
632 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
635 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
637 tcg_gen_ctpop_tl(dst, src2);
640 #ifndef TARGET_SPARC64
641 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
643 g_assert_not_reached();
645 #endif
647 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
649 gen_helper_array8(dst, src1, src2);
650 tcg_gen_shli_tl(dst, dst, 1);
653 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
655 gen_helper_array8(dst, src1, src2);
656 tcg_gen_shli_tl(dst, dst, 2);
659 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
661 #ifdef TARGET_SPARC64
662 gen_helper_fpack16(dst, cpu_gsr, src);
663 #else
664 g_assert_not_reached();
665 #endif
668 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
670 #ifdef TARGET_SPARC64
671 gen_helper_fpackfix(dst, cpu_gsr, src);
672 #else
673 g_assert_not_reached();
674 #endif
677 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
679 #ifdef TARGET_SPARC64
680 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
681 #else
682 g_assert_not_reached();
683 #endif
686 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
688 #ifdef TARGET_SPARC64
689 TCGv t1, t2, shift;
691 t1 = tcg_temp_new();
692 t2 = tcg_temp_new();
693 shift = tcg_temp_new();
695 tcg_gen_andi_tl(shift, cpu_gsr, 7);
696 tcg_gen_shli_tl(shift, shift, 3);
697 tcg_gen_shl_tl(t1, s1, shift);
700 * A shift of 64 does not produce 0 in TCG. Divide this into a
701 * shift of (up to 63) followed by a constant shift of 1.
703 tcg_gen_xori_tl(shift, shift, 63);
704 tcg_gen_shr_tl(t2, s2, shift);
705 tcg_gen_shri_tl(t2, t2, 1);
707 tcg_gen_or_tl(dst, t1, t2);
708 #else
709 g_assert_not_reached();
710 #endif
713 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
715 #ifdef TARGET_SPARC64
716 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
717 #else
718 g_assert_not_reached();
719 #endif
722 static void finishing_insn(DisasContext *dc)
725 * From here, there is no future path through an unwinding exception.
726 * If the current insn cannot raise an exception, the computation of
727 * cpu_cond may be able to be elided.
729 if (dc->cpu_cond_live) {
730 tcg_gen_discard_tl(cpu_cond);
731 dc->cpu_cond_live = false;
735 static void gen_generic_branch(DisasContext *dc)
737 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
738 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
739 TCGv c2 = tcg_constant_tl(dc->jump.c2);
741 tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
744 /* call this function before using the condition register as it may
745 have been set for a jump */
746 static void flush_cond(DisasContext *dc)
748 if (dc->npc == JUMP_PC) {
749 gen_generic_branch(dc);
750 dc->npc = DYNAMIC_PC_LOOKUP;
754 static void save_npc(DisasContext *dc)
756 if (dc->npc & 3) {
757 switch (dc->npc) {
758 case JUMP_PC:
759 gen_generic_branch(dc);
760 dc->npc = DYNAMIC_PC_LOOKUP;
761 break;
762 case DYNAMIC_PC:
763 case DYNAMIC_PC_LOOKUP:
764 break;
765 default:
766 g_assert_not_reached();
768 } else {
769 tcg_gen_movi_tl(cpu_npc, dc->npc);
773 static void save_state(DisasContext *dc)
775 tcg_gen_movi_tl(cpu_pc, dc->pc);
776 save_npc(dc);
779 static void gen_exception(DisasContext *dc, int which)
781 finishing_insn(dc);
782 save_state(dc);
783 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
784 dc->base.is_jmp = DISAS_NORETURN;
787 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
789 DisasDelayException *e = g_new0(DisasDelayException, 1);
791 e->next = dc->delay_excp_list;
792 dc->delay_excp_list = e;
794 e->lab = gen_new_label();
795 e->excp = excp;
796 e->pc = dc->pc;
797 /* Caller must have used flush_cond before branch. */
798 assert(e->npc != JUMP_PC);
799 e->npc = dc->npc;
801 return e->lab;
804 static TCGLabel *delay_exception(DisasContext *dc, int excp)
806 return delay_exceptionv(dc, tcg_constant_i32(excp));
809 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
811 TCGv t = tcg_temp_new();
812 TCGLabel *lab;
814 tcg_gen_andi_tl(t, addr, mask);
816 flush_cond(dc);
817 lab = delay_exception(dc, TT_UNALIGNED);
818 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
821 static void gen_mov_pc_npc(DisasContext *dc)
823 finishing_insn(dc);
825 if (dc->npc & 3) {
826 switch (dc->npc) {
827 case JUMP_PC:
828 gen_generic_branch(dc);
829 tcg_gen_mov_tl(cpu_pc, cpu_npc);
830 dc->pc = DYNAMIC_PC_LOOKUP;
831 break;
832 case DYNAMIC_PC:
833 case DYNAMIC_PC_LOOKUP:
834 tcg_gen_mov_tl(cpu_pc, cpu_npc);
835 dc->pc = dc->npc;
836 break;
837 default:
838 g_assert_not_reached();
840 } else {
841 dc->pc = dc->npc;
845 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
846 DisasContext *dc)
848 TCGv t1;
850 cmp->c1 = t1 = tcg_temp_new();
851 cmp->c2 = 0;
853 switch (cond & 7) {
854 case 0x0: /* never */
855 cmp->cond = TCG_COND_NEVER;
856 cmp->c1 = tcg_constant_tl(0);
857 break;
859 case 0x1: /* eq: Z */
860 cmp->cond = TCG_COND_EQ;
861 if (TARGET_LONG_BITS == 32 || xcc) {
862 tcg_gen_mov_tl(t1, cpu_cc_Z);
863 } else {
864 tcg_gen_ext32u_tl(t1, cpu_icc_Z);
866 break;
868 case 0x2: /* le: Z | (N ^ V) */
870 * Simplify:
871 * cc_Z || (N ^ V) < 0 NE
872 * cc_Z && !((N ^ V) < 0) EQ
873 * cc_Z & ~((N ^ V) >> TLB) EQ
875 cmp->cond = TCG_COND_EQ;
876 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
877 tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
878 tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
879 if (TARGET_LONG_BITS == 64 && !xcc) {
880 tcg_gen_ext32u_tl(t1, t1);
882 break;
884 case 0x3: /* lt: N ^ V */
885 cmp->cond = TCG_COND_LT;
886 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
887 if (TARGET_LONG_BITS == 64 && !xcc) {
888 tcg_gen_ext32s_tl(t1, t1);
890 break;
892 case 0x4: /* leu: Z | C */
894 * Simplify:
895 * cc_Z == 0 || cc_C != 0 NE
896 * cc_Z != 0 && cc_C == 0 EQ
897 * cc_Z & (cc_C ? 0 : -1) EQ
898 * cc_Z & (cc_C - 1) EQ
900 cmp->cond = TCG_COND_EQ;
901 if (TARGET_LONG_BITS == 32 || xcc) {
902 tcg_gen_subi_tl(t1, cpu_cc_C, 1);
903 tcg_gen_and_tl(t1, t1, cpu_cc_Z);
904 } else {
905 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
906 tcg_gen_subi_tl(t1, t1, 1);
907 tcg_gen_and_tl(t1, t1, cpu_icc_Z);
908 tcg_gen_ext32u_tl(t1, t1);
910 break;
912 case 0x5: /* ltu: C */
913 cmp->cond = TCG_COND_NE;
914 if (TARGET_LONG_BITS == 32 || xcc) {
915 tcg_gen_mov_tl(t1, cpu_cc_C);
916 } else {
917 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
919 break;
921 case 0x6: /* neg: N */
922 cmp->cond = TCG_COND_LT;
923 if (TARGET_LONG_BITS == 32 || xcc) {
924 tcg_gen_mov_tl(t1, cpu_cc_N);
925 } else {
926 tcg_gen_ext32s_tl(t1, cpu_cc_N);
928 break;
930 case 0x7: /* vs: V */
931 cmp->cond = TCG_COND_LT;
932 if (TARGET_LONG_BITS == 32 || xcc) {
933 tcg_gen_mov_tl(t1, cpu_cc_V);
934 } else {
935 tcg_gen_ext32s_tl(t1, cpu_cc_V);
937 break;
939 if (cond & 8) {
940 cmp->cond = tcg_invert_cond(cmp->cond);
944 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
946 TCGv_i32 fcc = cpu_fcc[cc];
947 TCGv_i32 c1 = fcc;
948 int c2 = 0;
949 TCGCond tcond;
952 * FCC values:
953 * 0 =
954 * 1 <
955 * 2 >
956 * 3 unordered
958 switch (cond & 7) {
959 case 0x0: /* fbn */
960 tcond = TCG_COND_NEVER;
961 break;
962 case 0x1: /* fbne : !0 */
963 tcond = TCG_COND_NE;
964 break;
965 case 0x2: /* fblg : 1 or 2 */
966 /* fcc in {1,2} - 1 -> fcc in {0,1} */
967 c1 = tcg_temp_new_i32();
968 tcg_gen_addi_i32(c1, fcc, -1);
969 c2 = 1;
970 tcond = TCG_COND_LEU;
971 break;
972 case 0x3: /* fbul : 1 or 3 */
973 c1 = tcg_temp_new_i32();
974 tcg_gen_andi_i32(c1, fcc, 1);
975 tcond = TCG_COND_NE;
976 break;
977 case 0x4: /* fbl : 1 */
978 c2 = 1;
979 tcond = TCG_COND_EQ;
980 break;
981 case 0x5: /* fbug : 2 or 3 */
982 c2 = 2;
983 tcond = TCG_COND_GEU;
984 break;
985 case 0x6: /* fbg : 2 */
986 c2 = 2;
987 tcond = TCG_COND_EQ;
988 break;
989 case 0x7: /* fbu : 3 */
990 c2 = 3;
991 tcond = TCG_COND_EQ;
992 break;
994 if (cond & 8) {
995 tcond = tcg_invert_cond(tcond);
998 cmp->cond = tcond;
999 cmp->c2 = c2;
1000 cmp->c1 = tcg_temp_new();
1001 tcg_gen_extu_i32_tl(cmp->c1, c1);
1004 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1006 static const TCGCond cond_reg[4] = {
1007 TCG_COND_NEVER, /* reserved */
1008 TCG_COND_EQ,
1009 TCG_COND_LE,
1010 TCG_COND_LT,
1012 TCGCond tcond;
1014 if ((cond & 3) == 0) {
1015 return false;
1017 tcond = cond_reg[cond & 3];
1018 if (cond & 4) {
1019 tcond = tcg_invert_cond(tcond);
1022 cmp->cond = tcond;
1023 cmp->c1 = tcg_temp_new();
1024 cmp->c2 = 0;
1025 tcg_gen_mov_tl(cmp->c1, r_src);
1026 return true;
1029 static void gen_op_clear_ieee_excp_and_FTT(void)
1031 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1032 offsetof(CPUSPARCState, fsr_cexc_ftt));
1035 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1037 gen_op_clear_ieee_excp_and_FTT();
1038 tcg_gen_mov_i32(dst, src);
1041 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1043 gen_op_clear_ieee_excp_and_FTT();
1044 tcg_gen_xori_i32(dst, src, 1u << 31);
1047 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1049 gen_op_clear_ieee_excp_and_FTT();
1050 tcg_gen_andi_i32(dst, src, ~(1u << 31));
1053 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1055 gen_op_clear_ieee_excp_and_FTT();
1056 tcg_gen_mov_i64(dst, src);
1059 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1061 gen_op_clear_ieee_excp_and_FTT();
1062 tcg_gen_xori_i64(dst, src, 1ull << 63);
1065 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1067 gen_op_clear_ieee_excp_and_FTT();
1068 tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1071 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1073 TCGv_i64 l = tcg_temp_new_i64();
1074 TCGv_i64 h = tcg_temp_new_i64();
1076 tcg_gen_extr_i128_i64(l, h, src);
1077 tcg_gen_xori_i64(h, h, 1ull << 63);
1078 tcg_gen_concat_i64_i128(dst, l, h);
1081 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1083 TCGv_i64 l = tcg_temp_new_i64();
1084 TCGv_i64 h = tcg_temp_new_i64();
1086 tcg_gen_extr_i128_i64(l, h, src);
1087 tcg_gen_andi_i64(h, h, ~(1ull << 63));
1088 tcg_gen_concat_i64_i128(dst, l, h);
1091 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1094 * CEXC is only set when succesfully completing an FPop,
1095 * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1096 * Thus we can simply store FTT into this field.
1098 tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1099 offsetof(CPUSPARCState, fsr_cexc_ftt));
1100 gen_exception(dc, TT_FP_EXCP);
1103 static int gen_trap_ifnofpu(DisasContext *dc)
1105 #if !defined(CONFIG_USER_ONLY)
1106 if (!dc->fpu_enabled) {
1107 gen_exception(dc, TT_NFPU_INSN);
1108 return 1;
1110 #endif
1111 return 0;
1114 /* asi moves */
1115 typedef enum {
1116 GET_ASI_HELPER,
1117 GET_ASI_EXCP,
1118 GET_ASI_DIRECT,
1119 GET_ASI_DTWINX,
1120 GET_ASI_BLOCK,
1121 GET_ASI_SHORT,
1122 GET_ASI_BCOPY,
1123 GET_ASI_BFILL,
1124 } ASIType;
1126 typedef struct {
1127 ASIType type;
1128 int asi;
1129 int mem_idx;
1130 MemOp memop;
1131 } DisasASI;
1134 * Build DisasASI.
1135 * For asi == -1, treat as non-asi.
1136 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1138 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1140 ASIType type = GET_ASI_HELPER;
1141 int mem_idx = dc->mem_idx;
1143 if (asi == -1) {
1144 /* Artificial "non-asi" case. */
1145 type = GET_ASI_DIRECT;
1146 goto done;
1149 #ifndef TARGET_SPARC64
1150 /* Before v9, all asis are immediate and privileged. */
1151 if (asi < 0) {
1152 gen_exception(dc, TT_ILL_INSN);
1153 type = GET_ASI_EXCP;
1154 } else if (supervisor(dc)
1155 /* Note that LEON accepts ASI_USERDATA in user mode, for
1156 use with CASA. Also note that previous versions of
1157 QEMU allowed (and old versions of gcc emitted) ASI_P
1158 for LEON, which is incorrect. */
1159 || (asi == ASI_USERDATA
1160 && (dc->def->features & CPU_FEATURE_CASA))) {
1161 switch (asi) {
1162 case ASI_USERDATA: /* User data access */
1163 mem_idx = MMU_USER_IDX;
1164 type = GET_ASI_DIRECT;
1165 break;
1166 case ASI_KERNELDATA: /* Supervisor data access */
1167 mem_idx = MMU_KERNEL_IDX;
1168 type = GET_ASI_DIRECT;
1169 break;
1170 case ASI_M_BYPASS: /* MMU passthrough */
1171 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1172 mem_idx = MMU_PHYS_IDX;
1173 type = GET_ASI_DIRECT;
1174 break;
1175 case ASI_M_BCOPY: /* Block copy, sta access */
1176 mem_idx = MMU_KERNEL_IDX;
1177 type = GET_ASI_BCOPY;
1178 break;
1179 case ASI_M_BFILL: /* Block fill, stda access */
1180 mem_idx = MMU_KERNEL_IDX;
1181 type = GET_ASI_BFILL;
1182 break;
1185 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1186 * permissions check in get_physical_address(..).
1188 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1189 } else {
1190 gen_exception(dc, TT_PRIV_INSN);
1191 type = GET_ASI_EXCP;
1193 #else
1194 if (asi < 0) {
1195 asi = dc->asi;
1197 /* With v9, all asis below 0x80 are privileged. */
1198 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1199 down that bit into DisasContext. For the moment that's ok,
1200 since the direct implementations below doesn't have any ASIs
1201 in the restricted [0x30, 0x7f] range, and the check will be
1202 done properly in the helper. */
1203 if (!supervisor(dc) && asi < 0x80) {
1204 gen_exception(dc, TT_PRIV_ACT);
1205 type = GET_ASI_EXCP;
1206 } else {
1207 switch (asi) {
1208 case ASI_REAL: /* Bypass */
1209 case ASI_REAL_IO: /* Bypass, non-cacheable */
1210 case ASI_REAL_L: /* Bypass LE */
1211 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1212 case ASI_TWINX_REAL: /* Real address, twinx */
1213 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1214 case ASI_QUAD_LDD_PHYS:
1215 case ASI_QUAD_LDD_PHYS_L:
1216 mem_idx = MMU_PHYS_IDX;
1217 break;
1218 case ASI_N: /* Nucleus */
1219 case ASI_NL: /* Nucleus LE */
1220 case ASI_TWINX_N:
1221 case ASI_TWINX_NL:
1222 case ASI_NUCLEUS_QUAD_LDD:
1223 case ASI_NUCLEUS_QUAD_LDD_L:
1224 if (hypervisor(dc)) {
1225 mem_idx = MMU_PHYS_IDX;
1226 } else {
1227 mem_idx = MMU_NUCLEUS_IDX;
1229 break;
1230 case ASI_AIUP: /* As if user primary */
1231 case ASI_AIUPL: /* As if user primary LE */
1232 case ASI_TWINX_AIUP:
1233 case ASI_TWINX_AIUP_L:
1234 case ASI_BLK_AIUP_4V:
1235 case ASI_BLK_AIUP_L_4V:
1236 case ASI_BLK_AIUP:
1237 case ASI_BLK_AIUPL:
1238 mem_idx = MMU_USER_IDX;
1239 break;
1240 case ASI_AIUS: /* As if user secondary */
1241 case ASI_AIUSL: /* As if user secondary LE */
1242 case ASI_TWINX_AIUS:
1243 case ASI_TWINX_AIUS_L:
1244 case ASI_BLK_AIUS_4V:
1245 case ASI_BLK_AIUS_L_4V:
1246 case ASI_BLK_AIUS:
1247 case ASI_BLK_AIUSL:
1248 mem_idx = MMU_USER_SECONDARY_IDX;
1249 break;
1250 case ASI_S: /* Secondary */
1251 case ASI_SL: /* Secondary LE */
1252 case ASI_TWINX_S:
1253 case ASI_TWINX_SL:
1254 case ASI_BLK_COMMIT_S:
1255 case ASI_BLK_S:
1256 case ASI_BLK_SL:
1257 case ASI_FL8_S:
1258 case ASI_FL8_SL:
1259 case ASI_FL16_S:
1260 case ASI_FL16_SL:
1261 if (mem_idx == MMU_USER_IDX) {
1262 mem_idx = MMU_USER_SECONDARY_IDX;
1263 } else if (mem_idx == MMU_KERNEL_IDX) {
1264 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1266 break;
1267 case ASI_P: /* Primary */
1268 case ASI_PL: /* Primary LE */
1269 case ASI_TWINX_P:
1270 case ASI_TWINX_PL:
1271 case ASI_BLK_COMMIT_P:
1272 case ASI_BLK_P:
1273 case ASI_BLK_PL:
1274 case ASI_FL8_P:
1275 case ASI_FL8_PL:
1276 case ASI_FL16_P:
1277 case ASI_FL16_PL:
1278 break;
1280 switch (asi) {
1281 case ASI_REAL:
1282 case ASI_REAL_IO:
1283 case ASI_REAL_L:
1284 case ASI_REAL_IO_L:
1285 case ASI_N:
1286 case ASI_NL:
1287 case ASI_AIUP:
1288 case ASI_AIUPL:
1289 case ASI_AIUS:
1290 case ASI_AIUSL:
1291 case ASI_S:
1292 case ASI_SL:
1293 case ASI_P:
1294 case ASI_PL:
1295 type = GET_ASI_DIRECT;
1296 break;
1297 case ASI_TWINX_REAL:
1298 case ASI_TWINX_REAL_L:
1299 case ASI_TWINX_N:
1300 case ASI_TWINX_NL:
1301 case ASI_TWINX_AIUP:
1302 case ASI_TWINX_AIUP_L:
1303 case ASI_TWINX_AIUS:
1304 case ASI_TWINX_AIUS_L:
1305 case ASI_TWINX_P:
1306 case ASI_TWINX_PL:
1307 case ASI_TWINX_S:
1308 case ASI_TWINX_SL:
1309 case ASI_QUAD_LDD_PHYS:
1310 case ASI_QUAD_LDD_PHYS_L:
1311 case ASI_NUCLEUS_QUAD_LDD:
1312 case ASI_NUCLEUS_QUAD_LDD_L:
1313 type = GET_ASI_DTWINX;
1314 break;
1315 case ASI_BLK_COMMIT_P:
1316 case ASI_BLK_COMMIT_S:
1317 case ASI_BLK_AIUP_4V:
1318 case ASI_BLK_AIUP_L_4V:
1319 case ASI_BLK_AIUP:
1320 case ASI_BLK_AIUPL:
1321 case ASI_BLK_AIUS_4V:
1322 case ASI_BLK_AIUS_L_4V:
1323 case ASI_BLK_AIUS:
1324 case ASI_BLK_AIUSL:
1325 case ASI_BLK_S:
1326 case ASI_BLK_SL:
1327 case ASI_BLK_P:
1328 case ASI_BLK_PL:
1329 type = GET_ASI_BLOCK;
1330 break;
1331 case ASI_FL8_S:
1332 case ASI_FL8_SL:
1333 case ASI_FL8_P:
1334 case ASI_FL8_PL:
1335 memop = MO_UB;
1336 type = GET_ASI_SHORT;
1337 break;
1338 case ASI_FL16_S:
1339 case ASI_FL16_SL:
1340 case ASI_FL16_P:
1341 case ASI_FL16_PL:
1342 memop = MO_TEUW;
1343 type = GET_ASI_SHORT;
1344 break;
1346 /* The little-endian asis all have bit 3 set. */
1347 if (asi & 8) {
1348 memop ^= MO_BSWAP;
1351 #endif
1353 done:
1354 return (DisasASI){ type, asi, mem_idx, memop };
1357 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1358 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1359 TCGv_i32 asi, TCGv_i32 mop)
1361 g_assert_not_reached();
1364 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1365 TCGv_i32 asi, TCGv_i32 mop)
1367 g_assert_not_reached();
1369 #endif
1371 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1373 switch (da->type) {
1374 case GET_ASI_EXCP:
1375 break;
1376 case GET_ASI_DTWINX: /* Reserved for ldda. */
1377 gen_exception(dc, TT_ILL_INSN);
1378 break;
1379 case GET_ASI_DIRECT:
1380 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1381 break;
1382 default:
1384 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1385 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1387 save_state(dc);
1388 #ifdef TARGET_SPARC64
1389 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1390 #else
1392 TCGv_i64 t64 = tcg_temp_new_i64();
1393 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1394 tcg_gen_trunc_i64_tl(dst, t64);
1396 #endif
1398 break;
1402 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1404 switch (da->type) {
1405 case GET_ASI_EXCP:
1406 break;
1408 case GET_ASI_DTWINX: /* Reserved for stda. */
1409 if (TARGET_LONG_BITS == 32) {
1410 gen_exception(dc, TT_ILL_INSN);
1411 break;
1412 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1413 /* Pre OpenSPARC CPUs don't have these */
1414 gen_exception(dc, TT_ILL_INSN);
1415 break;
1417 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1418 /* fall through */
1420 case GET_ASI_DIRECT:
1421 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1422 break;
1424 case GET_ASI_BCOPY:
1425 assert(TARGET_LONG_BITS == 32);
1427 * Copy 32 bytes from the address in SRC to ADDR.
1429 * From Ross RT625 hyperSPARC manual, section 4.6:
1430 * "Block Copy and Block Fill will work only on cache line boundaries."
1432 * It does not specify if an unaliged address is truncated or trapped.
1433 * Previous qemu behaviour was to truncate to 4 byte alignment, which
1434 * is obviously wrong. The only place I can see this used is in the
1435 * Linux kernel which begins with page alignment, advancing by 32,
1436 * so is always aligned. Assume truncation as the simpler option.
1438 * Since the loads and stores are paired, allow the copy to happen
1439 * in the host endianness. The copy need not be atomic.
1442 MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1443 TCGv saddr = tcg_temp_new();
1444 TCGv daddr = tcg_temp_new();
1445 TCGv_i128 tmp = tcg_temp_new_i128();
1447 tcg_gen_andi_tl(saddr, src, -32);
1448 tcg_gen_andi_tl(daddr, addr, -32);
1449 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1450 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1451 tcg_gen_addi_tl(saddr, saddr, 16);
1452 tcg_gen_addi_tl(daddr, daddr, 16);
1453 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1454 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1456 break;
1458 default:
1460 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1461 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1463 save_state(dc);
1464 #ifdef TARGET_SPARC64
1465 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1466 #else
1468 TCGv_i64 t64 = tcg_temp_new_i64();
1469 tcg_gen_extu_tl_i64(t64, src);
1470 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1472 #endif
1474 /* A write to a TLB register may alter page maps. End the TB. */
1475 dc->npc = DYNAMIC_PC;
1477 break;
1481 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1482 TCGv dst, TCGv src, TCGv addr)
1484 switch (da->type) {
1485 case GET_ASI_EXCP:
1486 break;
1487 case GET_ASI_DIRECT:
1488 tcg_gen_atomic_xchg_tl(dst, addr, src,
1489 da->mem_idx, da->memop | MO_ALIGN);
1490 break;
1491 default:
1492 /* ??? Should be DAE_invalid_asi. */
1493 gen_exception(dc, TT_DATA_ACCESS);
1494 break;
1498 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1499 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1501 switch (da->type) {
1502 case GET_ASI_EXCP:
1503 return;
1504 case GET_ASI_DIRECT:
1505 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1506 da->mem_idx, da->memop | MO_ALIGN);
1507 break;
1508 default:
1509 /* ??? Should be DAE_invalid_asi. */
1510 gen_exception(dc, TT_DATA_ACCESS);
1511 break;
1515 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1517 switch (da->type) {
1518 case GET_ASI_EXCP:
1519 break;
1520 case GET_ASI_DIRECT:
1521 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1522 da->mem_idx, MO_UB);
1523 break;
1524 default:
1525 /* ??? In theory, this should be raise DAE_invalid_asi.
1526 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1527 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1528 gen_helper_exit_atomic(tcg_env);
1529 } else {
1530 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1531 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1532 TCGv_i64 s64, t64;
1534 save_state(dc);
1535 t64 = tcg_temp_new_i64();
1536 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1538 s64 = tcg_constant_i64(0xff);
1539 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1541 tcg_gen_trunc_i64_tl(dst, t64);
1543 /* End the TB. */
1544 dc->npc = DYNAMIC_PC;
1546 break;
1550 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1551 TCGv addr, int rd)
1553 MemOp memop = da->memop;
1554 MemOp size = memop & MO_SIZE;
1555 TCGv_i32 d32;
1556 TCGv_i64 d64;
1557 TCGv addr_tmp;
1559 /* TODO: Use 128-bit load/store below. */
1560 if (size == MO_128) {
1561 memop = (memop & ~MO_SIZE) | MO_64;
1564 switch (da->type) {
1565 case GET_ASI_EXCP:
1566 break;
1568 case GET_ASI_DIRECT:
1569 memop |= MO_ALIGN_4;
1570 switch (size) {
1571 case MO_32:
1572 d32 = tcg_temp_new_i32();
1573 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1574 gen_store_fpr_F(dc, rd, d32);
1575 break;
1577 case MO_64:
1578 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
1579 break;
1581 case MO_128:
1582 d64 = tcg_temp_new_i64();
1583 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1584 addr_tmp = tcg_temp_new();
1585 tcg_gen_addi_tl(addr_tmp, addr, 8);
1586 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1587 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1588 break;
1589 default:
1590 g_assert_not_reached();
1592 break;
1594 case GET_ASI_BLOCK:
1595 /* Valid for lddfa on aligned registers only. */
1596 if (orig_size == MO_64 && (rd & 7) == 0) {
1597 /* The first operation checks required alignment. */
1598 addr_tmp = tcg_temp_new();
1599 for (int i = 0; ; ++i) {
1600 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1601 memop | (i == 0 ? MO_ALIGN_64 : 0));
1602 if (i == 7) {
1603 break;
1605 tcg_gen_addi_tl(addr_tmp, addr, 8);
1606 addr = addr_tmp;
1608 } else {
1609 gen_exception(dc, TT_ILL_INSN);
1611 break;
1613 case GET_ASI_SHORT:
1614 /* Valid for lddfa only. */
1615 if (orig_size == MO_64) {
1616 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1617 memop | MO_ALIGN);
1618 } else {
1619 gen_exception(dc, TT_ILL_INSN);
1621 break;
1623 default:
1625 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1626 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1628 save_state(dc);
1629 /* According to the table in the UA2011 manual, the only
1630 other asis that are valid for ldfa/lddfa/ldqfa are
1631 the NO_FAULT asis. We still need a helper for these,
1632 but we can just use the integer asi helper for them. */
1633 switch (size) {
1634 case MO_32:
1635 d64 = tcg_temp_new_i64();
1636 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1637 d32 = tcg_temp_new_i32();
1638 tcg_gen_extrl_i64_i32(d32, d64);
1639 gen_store_fpr_F(dc, rd, d32);
1640 break;
1641 case MO_64:
1642 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
1643 r_asi, r_mop);
1644 break;
1645 case MO_128:
1646 d64 = tcg_temp_new_i64();
1647 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1648 addr_tmp = tcg_temp_new();
1649 tcg_gen_addi_tl(addr_tmp, addr, 8);
1650 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
1651 r_asi, r_mop);
1652 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1653 break;
1654 default:
1655 g_assert_not_reached();
1658 break;
1662 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1663 TCGv addr, int rd)
1665 MemOp memop = da->memop;
1666 MemOp size = memop & MO_SIZE;
1667 TCGv_i32 d32;
1668 TCGv addr_tmp;
1670 /* TODO: Use 128-bit load/store below. */
1671 if (size == MO_128) {
1672 memop = (memop & ~MO_SIZE) | MO_64;
1675 switch (da->type) {
1676 case GET_ASI_EXCP:
1677 break;
1679 case GET_ASI_DIRECT:
1680 memop |= MO_ALIGN_4;
1681 switch (size) {
1682 case MO_32:
1683 d32 = gen_load_fpr_F(dc, rd);
1684 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
1685 break;
1686 case MO_64:
1687 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1688 memop | MO_ALIGN_4);
1689 break;
1690 case MO_128:
1691 /* Only 4-byte alignment required. However, it is legal for the
1692 cpu to signal the alignment fault, and the OS trap handler is
1693 required to fix it up. Requiring 16-byte alignment here avoids
1694 having to probe the second page before performing the first
1695 write. */
1696 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1697 memop | MO_ALIGN_16);
1698 addr_tmp = tcg_temp_new();
1699 tcg_gen_addi_tl(addr_tmp, addr, 8);
1700 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1701 break;
1702 default:
1703 g_assert_not_reached();
1705 break;
1707 case GET_ASI_BLOCK:
1708 /* Valid for stdfa on aligned registers only. */
1709 if (orig_size == MO_64 && (rd & 7) == 0) {
1710 /* The first operation checks required alignment. */
1711 addr_tmp = tcg_temp_new();
1712 for (int i = 0; ; ++i) {
1713 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1714 memop | (i == 0 ? MO_ALIGN_64 : 0));
1715 if (i == 7) {
1716 break;
1718 tcg_gen_addi_tl(addr_tmp, addr, 8);
1719 addr = addr_tmp;
1721 } else {
1722 gen_exception(dc, TT_ILL_INSN);
1724 break;
1726 case GET_ASI_SHORT:
1727 /* Valid for stdfa only. */
1728 if (orig_size == MO_64) {
1729 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1730 memop | MO_ALIGN);
1731 } else {
1732 gen_exception(dc, TT_ILL_INSN);
1734 break;
1736 default:
1737 /* According to the table in the UA2011 manual, the only
1738 other asis that are valid for ldfa/lddfa/ldqfa are
1739 the PST* asis, which aren't currently handled. */
1740 gen_exception(dc, TT_ILL_INSN);
1741 break;
1745 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
1747 TCGv hi = gen_dest_gpr(dc, rd);
1748 TCGv lo = gen_dest_gpr(dc, rd + 1);
1750 switch (da->type) {
1751 case GET_ASI_EXCP:
1752 return;
1754 case GET_ASI_DTWINX:
1755 #ifdef TARGET_SPARC64
1757 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
1758 TCGv_i128 t = tcg_temp_new_i128();
1760 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
1762 * Note that LE twinx acts as if each 64-bit register result is
1763 * byte swapped. We perform one 128-bit LE load, so must swap
1764 * the order of the writebacks.
1766 if ((mop & MO_BSWAP) == MO_TE) {
1767 tcg_gen_extr_i128_i64(lo, hi, t);
1768 } else {
1769 tcg_gen_extr_i128_i64(hi, lo, t);
1772 break;
1773 #else
1774 g_assert_not_reached();
1775 #endif
1777 case GET_ASI_DIRECT:
1779 TCGv_i64 tmp = tcg_temp_new_i64();
1781 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
1783 /* Note that LE ldda acts as if each 32-bit register
1784 result is byte swapped. Having just performed one
1785 64-bit bswap, we need now to swap the writebacks. */
1786 if ((da->memop & MO_BSWAP) == MO_TE) {
1787 tcg_gen_extr_i64_tl(lo, hi, tmp);
1788 } else {
1789 tcg_gen_extr_i64_tl(hi, lo, tmp);
1792 break;
1794 default:
1795 /* ??? In theory we've handled all of the ASIs that are valid
1796 for ldda, and this should raise DAE_invalid_asi. However,
1797 real hardware allows others. This can be seen with e.g.
1798 FreeBSD 10.3 wrt ASI_IC_TAG. */
1800 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1801 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
1802 TCGv_i64 tmp = tcg_temp_new_i64();
1804 save_state(dc);
1805 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
1807 /* See above. */
1808 if ((da->memop & MO_BSWAP) == MO_TE) {
1809 tcg_gen_extr_i64_tl(lo, hi, tmp);
1810 } else {
1811 tcg_gen_extr_i64_tl(hi, lo, tmp);
1814 break;
1817 gen_store_gpr(dc, rd, hi);
1818 gen_store_gpr(dc, rd + 1, lo);
1821 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
1823 TCGv hi = gen_load_gpr(dc, rd);
1824 TCGv lo = gen_load_gpr(dc, rd + 1);
1826 switch (da->type) {
1827 case GET_ASI_EXCP:
1828 break;
1830 case GET_ASI_DTWINX:
1831 #ifdef TARGET_SPARC64
1833 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
1834 TCGv_i128 t = tcg_temp_new_i128();
1837 * Note that LE twinx acts as if each 64-bit register result is
1838 * byte swapped. We perform one 128-bit LE store, so must swap
1839 * the order of the construction.
1841 if ((mop & MO_BSWAP) == MO_TE) {
1842 tcg_gen_concat_i64_i128(t, lo, hi);
1843 } else {
1844 tcg_gen_concat_i64_i128(t, hi, lo);
1846 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
1848 break;
1849 #else
1850 g_assert_not_reached();
1851 #endif
1853 case GET_ASI_DIRECT:
1855 TCGv_i64 t64 = tcg_temp_new_i64();
1857 /* Note that LE stda acts as if each 32-bit register result is
1858 byte swapped. We will perform one 64-bit LE store, so now
1859 we must swap the order of the construction. */
1860 if ((da->memop & MO_BSWAP) == MO_TE) {
1861 tcg_gen_concat_tl_i64(t64, lo, hi);
1862 } else {
1863 tcg_gen_concat_tl_i64(t64, hi, lo);
1865 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
1867 break;
1869 case GET_ASI_BFILL:
1870 assert(TARGET_LONG_BITS == 32);
1872 * Store 32 bytes of [rd:rd+1] to ADDR.
1873 * See comments for GET_ASI_COPY above.
1876 MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
1877 TCGv_i64 t8 = tcg_temp_new_i64();
1878 TCGv_i128 t16 = tcg_temp_new_i128();
1879 TCGv daddr = tcg_temp_new();
1881 tcg_gen_concat_tl_i64(t8, lo, hi);
1882 tcg_gen_concat_i64_i128(t16, t8, t8);
1883 tcg_gen_andi_tl(daddr, addr, -32);
1884 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
1885 tcg_gen_addi_tl(daddr, daddr, 16);
1886 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
1888 break;
1890 default:
1891 /* ??? In theory we've handled all of the ASIs that are valid
1892 for stda, and this should raise DAE_invalid_asi. */
1894 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1895 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
1896 TCGv_i64 t64 = tcg_temp_new_i64();
1898 /* See above. */
1899 if ((da->memop & MO_BSWAP) == MO_TE) {
1900 tcg_gen_concat_tl_i64(t64, lo, hi);
1901 } else {
1902 tcg_gen_concat_tl_i64(t64, hi, lo);
1905 save_state(dc);
1906 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1908 break;
1912 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
1914 #ifdef TARGET_SPARC64
1915 TCGv_i32 c32, zero, dst, s1, s2;
1916 TCGv_i64 c64 = tcg_temp_new_i64();
1918 /* We have two choices here: extend the 32 bit data and use movcond_i64,
1919 or fold the comparison down to 32 bits and use movcond_i32. Choose
1920 the later. */
1921 c32 = tcg_temp_new_i32();
1922 tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
1923 tcg_gen_extrl_i64_i32(c32, c64);
1925 s1 = gen_load_fpr_F(dc, rs);
1926 s2 = gen_load_fpr_F(dc, rd);
1927 dst = tcg_temp_new_i32();
1928 zero = tcg_constant_i32(0);
1930 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
1932 gen_store_fpr_F(dc, rd, dst);
1933 #else
1934 qemu_build_not_reached();
1935 #endif
1938 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
1940 #ifdef TARGET_SPARC64
1941 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
1942 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
1943 gen_load_fpr_D(dc, rs),
1944 gen_load_fpr_D(dc, rd));
1945 gen_store_fpr_D(dc, rd, dst);
1946 #else
1947 qemu_build_not_reached();
1948 #endif
1951 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
1953 #ifdef TARGET_SPARC64
1954 int qd = QFPREG(rd);
1955 int qs = QFPREG(rs);
1956 TCGv c2 = tcg_constant_tl(cmp->c2);
1958 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, c2,
1959 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
1960 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, c2,
1961 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
1963 gen_update_fprs_dirty(dc, qd);
1964 #else
1965 qemu_build_not_reached();
1966 #endif
1969 #ifdef TARGET_SPARC64
1970 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
1972 TCGv_i32 r_tl = tcg_temp_new_i32();
1974 /* load env->tl into r_tl */
1975 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
1977 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
1978 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
1980 /* calculate offset to current trap state from env->ts, reuse r_tl */
1981 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
1982 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
1984 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
1986 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
1987 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
1988 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
1991 #endif
1993 static int extract_dfpreg(DisasContext *dc, int x)
1995 return DFPREG(x);
1998 static int extract_qfpreg(DisasContext *dc, int x)
2000 return QFPREG(x);
2003 /* Include the auto-generated decoder. */
2004 #include "decode-insns.c.inc"
2006 #define TRANS(NAME, AVAIL, FUNC, ...) \
2007 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2008 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2010 #define avail_ALL(C) true
2011 #ifdef TARGET_SPARC64
2012 # define avail_32(C) false
2013 # define avail_ASR17(C) false
2014 # define avail_CASA(C) true
2015 # define avail_DIV(C) true
2016 # define avail_MUL(C) true
2017 # define avail_POWERDOWN(C) false
2018 # define avail_64(C) true
2019 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2020 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2021 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2022 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2023 #else
2024 # define avail_32(C) true
2025 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2026 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2027 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2028 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2029 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2030 # define avail_64(C) false
2031 # define avail_GL(C) false
2032 # define avail_HYPV(C) false
2033 # define avail_VIS1(C) false
2034 # define avail_VIS2(C) false
2035 #endif
2037 /* Default case for non jump instructions. */
2038 static bool advance_pc(DisasContext *dc)
2040 TCGLabel *l1;
2042 finishing_insn(dc);
2044 if (dc->npc & 3) {
2045 switch (dc->npc) {
2046 case DYNAMIC_PC:
2047 case DYNAMIC_PC_LOOKUP:
2048 dc->pc = dc->npc;
2049 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2050 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2051 break;
2053 case JUMP_PC:
2054 /* we can do a static jump */
2055 l1 = gen_new_label();
2056 tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2058 /* jump not taken */
2059 gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2061 /* jump taken */
2062 gen_set_label(l1);
2063 gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2065 dc->base.is_jmp = DISAS_NORETURN;
2066 break;
2068 default:
2069 g_assert_not_reached();
2071 } else {
2072 dc->pc = dc->npc;
2073 dc->npc = dc->npc + 4;
2075 return true;
2079 * Major opcodes 00 and 01 -- branches, call, and sethi
2082 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2083 bool annul, int disp)
2085 target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2086 target_ulong npc;
2088 finishing_insn(dc);
2090 if (cmp->cond == TCG_COND_ALWAYS) {
2091 if (annul) {
2092 dc->pc = dest;
2093 dc->npc = dest + 4;
2094 } else {
2095 gen_mov_pc_npc(dc);
2096 dc->npc = dest;
2098 return true;
2101 if (cmp->cond == TCG_COND_NEVER) {
2102 npc = dc->npc;
2103 if (npc & 3) {
2104 gen_mov_pc_npc(dc);
2105 if (annul) {
2106 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2108 tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2109 } else {
2110 dc->pc = npc + (annul ? 4 : 0);
2111 dc->npc = dc->pc + 4;
2113 return true;
2116 flush_cond(dc);
2117 npc = dc->npc;
2119 if (annul) {
2120 TCGLabel *l1 = gen_new_label();
2122 tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2123 gen_goto_tb(dc, 0, npc, dest);
2124 gen_set_label(l1);
2125 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2127 dc->base.is_jmp = DISAS_NORETURN;
2128 } else {
2129 if (npc & 3) {
2130 switch (npc) {
2131 case DYNAMIC_PC:
2132 case DYNAMIC_PC_LOOKUP:
2133 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2134 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2135 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2136 cmp->c1, tcg_constant_tl(cmp->c2),
2137 tcg_constant_tl(dest), cpu_npc);
2138 dc->pc = npc;
2139 break;
2140 default:
2141 g_assert_not_reached();
2143 } else {
2144 dc->pc = npc;
2145 dc->npc = JUMP_PC;
2146 dc->jump = *cmp;
2147 dc->jump_pc[0] = dest;
2148 dc->jump_pc[1] = npc + 4;
2150 /* The condition for cpu_cond is always NE -- normalize. */
2151 if (cmp->cond == TCG_COND_NE) {
2152 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2153 } else {
2154 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2156 dc->cpu_cond_live = true;
2159 return true;
2162 static bool raise_priv(DisasContext *dc)
2164 gen_exception(dc, TT_PRIV_INSN);
2165 return true;
2168 static bool raise_unimpfpop(DisasContext *dc)
2170 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2171 return true;
2174 static bool gen_trap_float128(DisasContext *dc)
2176 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2177 return false;
2179 return raise_unimpfpop(dc);
2182 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2184 DisasCompare cmp;
2186 gen_compare(&cmp, a->cc, a->cond, dc);
2187 return advance_jump_cond(dc, &cmp, a->a, a->i);
2190 TRANS(Bicc, ALL, do_bpcc, a)
2191 TRANS(BPcc, 64, do_bpcc, a)
2193 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2195 DisasCompare cmp;
2197 if (gen_trap_ifnofpu(dc)) {
2198 return true;
2200 gen_fcompare(&cmp, a->cc, a->cond);
2201 return advance_jump_cond(dc, &cmp, a->a, a->i);
2204 TRANS(FBPfcc, 64, do_fbpfcc, a)
2205 TRANS(FBfcc, ALL, do_fbpfcc, a)
2207 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2209 DisasCompare cmp;
2211 if (!avail_64(dc)) {
2212 return false;
2214 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2215 return false;
2217 return advance_jump_cond(dc, &cmp, a->a, a->i);
2220 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2222 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2224 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2225 gen_mov_pc_npc(dc);
2226 dc->npc = target;
2227 return true;
2230 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2233 * For sparc32, always generate the no-coprocessor exception.
2234 * For sparc64, always generate illegal instruction.
2236 #ifdef TARGET_SPARC64
2237 return false;
2238 #else
2239 gen_exception(dc, TT_NCP_INSN);
2240 return true;
2241 #endif
2244 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2246 /* Special-case %g0 because that's the canonical nop. */
2247 if (a->rd) {
2248 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2250 return advance_pc(dc);
2254 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2257 static bool do_tcc(DisasContext *dc, int cond, int cc,
2258 int rs1, bool imm, int rs2_or_imm)
2260 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2261 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2262 DisasCompare cmp;
2263 TCGLabel *lab;
2264 TCGv_i32 trap;
2266 /* Trap never. */
2267 if (cond == 0) {
2268 return advance_pc(dc);
2272 * Immediate traps are the most common case. Since this value is
2273 * live across the branch, it really pays to evaluate the constant.
2275 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2276 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2277 } else {
2278 trap = tcg_temp_new_i32();
2279 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2280 if (imm) {
2281 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2282 } else {
2283 TCGv_i32 t2 = tcg_temp_new_i32();
2284 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2285 tcg_gen_add_i32(trap, trap, t2);
2287 tcg_gen_andi_i32(trap, trap, mask);
2288 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2291 finishing_insn(dc);
2293 /* Trap always. */
2294 if (cond == 8) {
2295 save_state(dc);
2296 gen_helper_raise_exception(tcg_env, trap);
2297 dc->base.is_jmp = DISAS_NORETURN;
2298 return true;
2301 /* Conditional trap. */
2302 flush_cond(dc);
2303 lab = delay_exceptionv(dc, trap);
2304 gen_compare(&cmp, cc, cond, dc);
2305 tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2307 return advance_pc(dc);
2310 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2312 if (avail_32(dc) && a->cc) {
2313 return false;
2315 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2318 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2320 if (avail_64(dc)) {
2321 return false;
2323 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2326 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2328 if (avail_32(dc)) {
2329 return false;
2331 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2334 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2336 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2337 return advance_pc(dc);
2340 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2342 if (avail_32(dc)) {
2343 return false;
2345 if (a->mmask) {
2346 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2347 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2349 if (a->cmask) {
2350 /* For #Sync, etc, end the TB to recognize interrupts. */
2351 dc->base.is_jmp = DISAS_EXIT;
2353 return advance_pc(dc);
2356 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2357 TCGv (*func)(DisasContext *, TCGv))
2359 if (!priv) {
2360 return raise_priv(dc);
2362 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2363 return advance_pc(dc);
2366 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2368 return cpu_y;
2371 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2374 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2375 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2376 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2378 if (avail_64(dc) && a->rs1 != 0) {
2379 return false;
2381 return do_rd_special(dc, true, a->rd, do_rdy);
2384 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2386 gen_helper_rdasr17(dst, tcg_env);
2387 return dst;
2390 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2392 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2394 gen_helper_rdccr(dst, tcg_env);
2395 return dst;
2398 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2400 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2402 #ifdef TARGET_SPARC64
2403 return tcg_constant_tl(dc->asi);
2404 #else
2405 qemu_build_not_reached();
2406 #endif
2409 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2411 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2413 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2415 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2416 if (translator_io_start(&dc->base)) {
2417 dc->base.is_jmp = DISAS_EXIT;
2419 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2420 tcg_constant_i32(dc->mem_idx));
2421 return dst;
2424 /* TODO: non-priv access only allowed when enabled. */
2425 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2427 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2429 return tcg_constant_tl(address_mask_i(dc, dc->pc));
2432 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2434 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2436 tcg_gen_ext_i32_tl(dst, cpu_fprs);
2437 return dst;
2440 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2442 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2444 gen_trap_ifnofpu(dc);
2445 return cpu_gsr;
2448 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2450 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2452 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2453 return dst;
2456 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2458 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2460 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2461 return dst;
2464 /* TODO: non-priv access only allowed when enabled. */
2465 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2467 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2469 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2471 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2472 if (translator_io_start(&dc->base)) {
2473 dc->base.is_jmp = DISAS_EXIT;
2475 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2476 tcg_constant_i32(dc->mem_idx));
2477 return dst;
2480 /* TODO: non-priv access only allowed when enabled. */
2481 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2483 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2485 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2486 return dst;
2489 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2490 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2493 * UltraSPARC-T1 Strand status.
2494 * HYPV check maybe not enough, UA2005 & UA2007 describe
2495 * this ASR as impl. dep
2497 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2499 return tcg_constant_tl(1);
2502 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2504 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2506 gen_helper_rdpsr(dst, tcg_env);
2507 return dst;
2510 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2512 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2514 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2515 return dst;
2518 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2520 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2522 TCGv_i32 tl = tcg_temp_new_i32();
2523 TCGv_ptr tp = tcg_temp_new_ptr();
2525 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2526 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2527 tcg_gen_shli_i32(tl, tl, 3);
2528 tcg_gen_ext_i32_ptr(tp, tl);
2529 tcg_gen_add_ptr(tp, tp, tcg_env);
2531 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2532 return dst;
2535 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2537 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2539 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2540 return dst;
2543 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2545 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2547 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2548 return dst;
2551 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2553 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2555 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2556 return dst;
2559 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2561 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2563 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2564 return dst;
2567 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2568 do_rdhstick_cmpr)
2570 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2572 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2573 return dst;
2576 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2578 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2580 #ifdef TARGET_SPARC64
2581 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2583 gen_load_trap_state_at_tl(r_tsptr);
2584 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2585 return dst;
2586 #else
2587 qemu_build_not_reached();
2588 #endif
2591 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2593 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2595 #ifdef TARGET_SPARC64
2596 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2598 gen_load_trap_state_at_tl(r_tsptr);
2599 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2600 return dst;
2601 #else
2602 qemu_build_not_reached();
2603 #endif
2606 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2608 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2610 #ifdef TARGET_SPARC64
2611 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2613 gen_load_trap_state_at_tl(r_tsptr);
2614 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2615 return dst;
2616 #else
2617 qemu_build_not_reached();
2618 #endif
2621 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2623 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2625 #ifdef TARGET_SPARC64
2626 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2628 gen_load_trap_state_at_tl(r_tsptr);
2629 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2630 return dst;
2631 #else
2632 qemu_build_not_reached();
2633 #endif
2636 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2637 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2639 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2641 return cpu_tbr;
2644 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2645 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2647 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2649 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2650 return dst;
2653 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2655 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2657 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2658 return dst;
2661 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
2663 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
2665 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
2666 return dst;
2669 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
2671 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
2673 gen_helper_rdcwp(dst, tcg_env);
2674 return dst;
2677 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
2679 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
2681 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
2682 return dst;
2685 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
2687 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
2689 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
2690 return dst;
2693 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
2694 do_rdcanrestore)
2696 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
2698 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
2699 return dst;
2702 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
2704 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
2706 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
2707 return dst;
2710 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
2712 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
2714 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
2715 return dst;
2718 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
2720 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
2722 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
2723 return dst;
2726 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
2728 /* UA2005 strand status */
2729 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
2731 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
2732 return dst;
2735 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
2737 static TCGv do_rdver(DisasContext *dc, TCGv dst)
2739 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
2740 return dst;
2743 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
2745 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
2747 if (avail_64(dc)) {
2748 gen_helper_flushw(tcg_env);
2749 return advance_pc(dc);
2751 return false;
2754 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
2755 void (*func)(DisasContext *, TCGv))
2757 TCGv src;
2759 /* For simplicity, we under-decoded the rs2 form. */
2760 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
2761 return false;
2763 if (!priv) {
2764 return raise_priv(dc);
2767 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
2768 src = tcg_constant_tl(a->rs2_or_imm);
2769 } else {
2770 TCGv src1 = gen_load_gpr(dc, a->rs1);
2771 if (a->rs2_or_imm == 0) {
2772 src = src1;
2773 } else {
2774 src = tcg_temp_new();
2775 if (a->imm) {
2776 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
2777 } else {
2778 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
2782 func(dc, src);
2783 return advance_pc(dc);
2786 static void do_wry(DisasContext *dc, TCGv src)
2788 tcg_gen_ext32u_tl(cpu_y, src);
2791 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
2793 static void do_wrccr(DisasContext *dc, TCGv src)
2795 gen_helper_wrccr(tcg_env, src);
2798 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
2800 static void do_wrasi(DisasContext *dc, TCGv src)
2802 TCGv tmp = tcg_temp_new();
2804 tcg_gen_ext8u_tl(tmp, src);
2805 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
2806 /* End TB to notice changed ASI. */
2807 dc->base.is_jmp = DISAS_EXIT;
2810 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
2812 static void do_wrfprs(DisasContext *dc, TCGv src)
2814 #ifdef TARGET_SPARC64
2815 tcg_gen_trunc_tl_i32(cpu_fprs, src);
2816 dc->fprs_dirty = 0;
2817 dc->base.is_jmp = DISAS_EXIT;
2818 #else
2819 qemu_build_not_reached();
2820 #endif
2823 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
2825 static void do_wrgsr(DisasContext *dc, TCGv src)
2827 gen_trap_ifnofpu(dc);
2828 tcg_gen_mov_tl(cpu_gsr, src);
2831 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
2833 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
2835 gen_helper_set_softint(tcg_env, src);
2838 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
2840 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
2842 gen_helper_clear_softint(tcg_env, src);
2845 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
2847 static void do_wrsoftint(DisasContext *dc, TCGv src)
2849 gen_helper_write_softint(tcg_env, src);
2852 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
2854 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
2856 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2858 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
2859 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2860 translator_io_start(&dc->base);
2861 gen_helper_tick_set_limit(r_tickptr, src);
2862 /* End TB to handle timer interrupt */
2863 dc->base.is_jmp = DISAS_EXIT;
2866 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
2868 static void do_wrstick(DisasContext *dc, TCGv src)
2870 #ifdef TARGET_SPARC64
2871 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2873 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
2874 translator_io_start(&dc->base);
2875 gen_helper_tick_set_count(r_tickptr, src);
2876 /* End TB to handle timer interrupt */
2877 dc->base.is_jmp = DISAS_EXIT;
2878 #else
2879 qemu_build_not_reached();
2880 #endif
2883 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
2885 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
2887 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2889 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
2890 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2891 translator_io_start(&dc->base);
2892 gen_helper_tick_set_limit(r_tickptr, src);
2893 /* End TB to handle timer interrupt */
2894 dc->base.is_jmp = DISAS_EXIT;
2897 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
2899 static void do_wrpowerdown(DisasContext *dc, TCGv src)
2901 finishing_insn(dc);
2902 save_state(dc);
2903 gen_helper_power_down(tcg_env);
2906 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
2908 static void do_wrpsr(DisasContext *dc, TCGv src)
2910 gen_helper_wrpsr(tcg_env, src);
2911 dc->base.is_jmp = DISAS_EXIT;
2914 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
2916 static void do_wrwim(DisasContext *dc, TCGv src)
2918 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
2919 TCGv tmp = tcg_temp_new();
2921 tcg_gen_andi_tl(tmp, src, mask);
2922 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
2925 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
2927 static void do_wrtpc(DisasContext *dc, TCGv src)
2929 #ifdef TARGET_SPARC64
2930 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2932 gen_load_trap_state_at_tl(r_tsptr);
2933 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
2934 #else
2935 qemu_build_not_reached();
2936 #endif
2939 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
2941 static void do_wrtnpc(DisasContext *dc, TCGv src)
2943 #ifdef TARGET_SPARC64
2944 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2946 gen_load_trap_state_at_tl(r_tsptr);
2947 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
2948 #else
2949 qemu_build_not_reached();
2950 #endif
2953 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
2955 static void do_wrtstate(DisasContext *dc, TCGv src)
2957 #ifdef TARGET_SPARC64
2958 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2960 gen_load_trap_state_at_tl(r_tsptr);
2961 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
2962 #else
2963 qemu_build_not_reached();
2964 #endif
2967 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
2969 static void do_wrtt(DisasContext *dc, TCGv src)
2971 #ifdef TARGET_SPARC64
2972 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2974 gen_load_trap_state_at_tl(r_tsptr);
2975 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
2976 #else
2977 qemu_build_not_reached();
2978 #endif
2981 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
2983 static void do_wrtick(DisasContext *dc, TCGv src)
2985 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2987 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2988 translator_io_start(&dc->base);
2989 gen_helper_tick_set_count(r_tickptr, src);
2990 /* End TB to handle timer interrupt */
2991 dc->base.is_jmp = DISAS_EXIT;
2994 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
2996 static void do_wrtba(DisasContext *dc, TCGv src)
2998 tcg_gen_mov_tl(cpu_tbr, src);
3001 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3003 static void do_wrpstate(DisasContext *dc, TCGv src)
3005 save_state(dc);
3006 if (translator_io_start(&dc->base)) {
3007 dc->base.is_jmp = DISAS_EXIT;
3009 gen_helper_wrpstate(tcg_env, src);
3010 dc->npc = DYNAMIC_PC;
3013 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3015 static void do_wrtl(DisasContext *dc, TCGv src)
3017 save_state(dc);
3018 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3019 dc->npc = DYNAMIC_PC;
3022 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3024 static void do_wrpil(DisasContext *dc, TCGv src)
3026 if (translator_io_start(&dc->base)) {
3027 dc->base.is_jmp = DISAS_EXIT;
3029 gen_helper_wrpil(tcg_env, src);
3032 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3034 static void do_wrcwp(DisasContext *dc, TCGv src)
3036 gen_helper_wrcwp(tcg_env, src);
3039 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3041 static void do_wrcansave(DisasContext *dc, TCGv src)
3043 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3046 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3048 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3050 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3053 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3055 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3057 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3060 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3062 static void do_wrotherwin(DisasContext *dc, TCGv src)
3064 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3067 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3069 static void do_wrwstate(DisasContext *dc, TCGv src)
3071 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3074 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3076 static void do_wrgl(DisasContext *dc, TCGv src)
3078 gen_helper_wrgl(tcg_env, src);
3081 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3083 /* UA2005 strand status */
3084 static void do_wrssr(DisasContext *dc, TCGv src)
3086 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3089 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3091 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3093 static void do_wrhpstate(DisasContext *dc, TCGv src)
3095 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3096 dc->base.is_jmp = DISAS_EXIT;
3099 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3101 static void do_wrhtstate(DisasContext *dc, TCGv src)
3103 TCGv_i32 tl = tcg_temp_new_i32();
3104 TCGv_ptr tp = tcg_temp_new_ptr();
3106 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3107 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3108 tcg_gen_shli_i32(tl, tl, 3);
3109 tcg_gen_ext_i32_ptr(tp, tl);
3110 tcg_gen_add_ptr(tp, tp, tcg_env);
3112 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3115 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3117 static void do_wrhintp(DisasContext *dc, TCGv src)
3119 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3122 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3124 static void do_wrhtba(DisasContext *dc, TCGv src)
3126 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3129 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3131 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3133 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3135 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3136 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3137 translator_io_start(&dc->base);
3138 gen_helper_tick_set_limit(r_tickptr, src);
3139 /* End TB to handle timer interrupt */
3140 dc->base.is_jmp = DISAS_EXIT;
3143 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3144 do_wrhstick_cmpr)
3146 static bool do_saved_restored(DisasContext *dc, bool saved)
3148 if (!supervisor(dc)) {
3149 return raise_priv(dc);
3151 if (saved) {
3152 gen_helper_saved(tcg_env);
3153 } else {
3154 gen_helper_restored(tcg_env);
3156 return advance_pc(dc);
3159 TRANS(SAVED, 64, do_saved_restored, true)
3160 TRANS(RESTORED, 64, do_saved_restored, false)
3162 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3164 return advance_pc(dc);
3168 * TODO: Need a feature bit for sparcv8.
3169 * In the meantime, treat all 32-bit cpus like sparcv7.
3171 TRANS(NOP_v7, 32, trans_NOP, a)
3172 TRANS(NOP_v9, 64, trans_NOP, a)
3174 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3175 void (*func)(TCGv, TCGv, TCGv),
3176 void (*funci)(TCGv, TCGv, target_long),
3177 bool logic_cc)
3179 TCGv dst, src1;
3181 /* For simplicity, we under-decoded the rs2 form. */
3182 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3183 return false;
3186 if (logic_cc) {
3187 dst = cpu_cc_N;
3188 } else {
3189 dst = gen_dest_gpr(dc, a->rd);
3191 src1 = gen_load_gpr(dc, a->rs1);
3193 if (a->imm || a->rs2_or_imm == 0) {
3194 if (funci) {
3195 funci(dst, src1, a->rs2_or_imm);
3196 } else {
3197 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3199 } else {
3200 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3203 if (logic_cc) {
3204 if (TARGET_LONG_BITS == 64) {
3205 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3206 tcg_gen_movi_tl(cpu_icc_C, 0);
3208 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3209 tcg_gen_movi_tl(cpu_cc_C, 0);
3210 tcg_gen_movi_tl(cpu_cc_V, 0);
3213 gen_store_gpr(dc, a->rd, dst);
3214 return advance_pc(dc);
3217 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3218 void (*func)(TCGv, TCGv, TCGv),
3219 void (*funci)(TCGv, TCGv, target_long),
3220 void (*func_cc)(TCGv, TCGv, TCGv))
3222 if (a->cc) {
3223 return do_arith_int(dc, a, func_cc, NULL, false);
3225 return do_arith_int(dc, a, func, funci, false);
3228 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3229 void (*func)(TCGv, TCGv, TCGv),
3230 void (*funci)(TCGv, TCGv, target_long))
3232 return do_arith_int(dc, a, func, funci, a->cc);
3235 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3236 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3237 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3238 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3240 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3241 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3242 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3243 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3245 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3246 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3247 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3248 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3249 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3251 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3252 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3253 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3254 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3256 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3257 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3259 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3260 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3262 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3264 /* OR with %g0 is the canonical alias for MOV. */
3265 if (!a->cc && a->rs1 == 0) {
3266 if (a->imm || a->rs2_or_imm == 0) {
3267 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3268 } else if (a->rs2_or_imm & ~0x1f) {
3269 /* For simplicity, we under-decoded the rs2 form. */
3270 return false;
3271 } else {
3272 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3274 return advance_pc(dc);
3276 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3279 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3281 TCGv_i64 t1, t2;
3282 TCGv dst;
3284 if (!avail_DIV(dc)) {
3285 return false;
3287 /* For simplicity, we under-decoded the rs2 form. */
3288 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3289 return false;
3292 if (unlikely(a->rs2_or_imm == 0)) {
3293 gen_exception(dc, TT_DIV_ZERO);
3294 return true;
3297 if (a->imm) {
3298 t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3299 } else {
3300 TCGLabel *lab;
3301 TCGv_i32 n2;
3303 finishing_insn(dc);
3304 flush_cond(dc);
3306 n2 = tcg_temp_new_i32();
3307 tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3309 lab = delay_exception(dc, TT_DIV_ZERO);
3310 tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3312 t2 = tcg_temp_new_i64();
3313 #ifdef TARGET_SPARC64
3314 tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3315 #else
3316 tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3317 #endif
3320 t1 = tcg_temp_new_i64();
3321 tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3323 tcg_gen_divu_i64(t1, t1, t2);
3324 tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3326 dst = gen_dest_gpr(dc, a->rd);
3327 tcg_gen_trunc_i64_tl(dst, t1);
3328 gen_store_gpr(dc, a->rd, dst);
3329 return advance_pc(dc);
3332 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3334 TCGv dst, src1, src2;
3336 if (!avail_64(dc)) {
3337 return false;
3339 /* For simplicity, we under-decoded the rs2 form. */
3340 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3341 return false;
3344 if (unlikely(a->rs2_or_imm == 0)) {
3345 gen_exception(dc, TT_DIV_ZERO);
3346 return true;
3349 if (a->imm) {
3350 src2 = tcg_constant_tl(a->rs2_or_imm);
3351 } else {
3352 TCGLabel *lab;
3354 finishing_insn(dc);
3355 flush_cond(dc);
3357 lab = delay_exception(dc, TT_DIV_ZERO);
3358 src2 = cpu_regs[a->rs2_or_imm];
3359 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3362 dst = gen_dest_gpr(dc, a->rd);
3363 src1 = gen_load_gpr(dc, a->rs1);
3365 tcg_gen_divu_tl(dst, src1, src2);
3366 gen_store_gpr(dc, a->rd, dst);
3367 return advance_pc(dc);
3370 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3372 TCGv dst, src1, src2;
3374 if (!avail_64(dc)) {
3375 return false;
3377 /* For simplicity, we under-decoded the rs2 form. */
3378 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3379 return false;
3382 if (unlikely(a->rs2_or_imm == 0)) {
3383 gen_exception(dc, TT_DIV_ZERO);
3384 return true;
3387 dst = gen_dest_gpr(dc, a->rd);
3388 src1 = gen_load_gpr(dc, a->rs1);
3390 if (a->imm) {
3391 if (unlikely(a->rs2_or_imm == -1)) {
3392 tcg_gen_neg_tl(dst, src1);
3393 gen_store_gpr(dc, a->rd, dst);
3394 return advance_pc(dc);
3396 src2 = tcg_constant_tl(a->rs2_or_imm);
3397 } else {
3398 TCGLabel *lab;
3399 TCGv t1, t2;
3401 finishing_insn(dc);
3402 flush_cond(dc);
3404 lab = delay_exception(dc, TT_DIV_ZERO);
3405 src2 = cpu_regs[a->rs2_or_imm];
3406 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3409 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3410 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3412 t1 = tcg_temp_new();
3413 t2 = tcg_temp_new();
3414 tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3415 tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3416 tcg_gen_and_tl(t1, t1, t2);
3417 tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3418 tcg_constant_tl(1), src2);
3419 src2 = t1;
3422 tcg_gen_div_tl(dst, src1, src2);
3423 gen_store_gpr(dc, a->rd, dst);
3424 return advance_pc(dc);
3427 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3428 int width, bool cc, bool left)
3430 TCGv dst, s1, s2, lo1, lo2;
3431 uint64_t amask, tabl, tabr;
3432 int shift, imask, omask;
3434 dst = gen_dest_gpr(dc, a->rd);
3435 s1 = gen_load_gpr(dc, a->rs1);
3436 s2 = gen_load_gpr(dc, a->rs2);
3438 if (cc) {
3439 gen_op_subcc(cpu_cc_N, s1, s2);
3443 * Theory of operation: there are two tables, left and right (not to
3444 * be confused with the left and right versions of the opcode). These
3445 * are indexed by the low 3 bits of the inputs. To make things "easy",
3446 * these tables are loaded into two constants, TABL and TABR below.
3447 * The operation index = (input & imask) << shift calculates the index
3448 * into the constant, while val = (table >> index) & omask calculates
3449 * the value we're looking for.
3451 switch (width) {
3452 case 8:
3453 imask = 0x7;
3454 shift = 3;
3455 omask = 0xff;
3456 if (left) {
3457 tabl = 0x80c0e0f0f8fcfeffULL;
3458 tabr = 0xff7f3f1f0f070301ULL;
3459 } else {
3460 tabl = 0x0103070f1f3f7fffULL;
3461 tabr = 0xfffefcf8f0e0c080ULL;
3463 break;
3464 case 16:
3465 imask = 0x6;
3466 shift = 1;
3467 omask = 0xf;
3468 if (left) {
3469 tabl = 0x8cef;
3470 tabr = 0xf731;
3471 } else {
3472 tabl = 0x137f;
3473 tabr = 0xfec8;
3475 break;
3476 case 32:
3477 imask = 0x4;
3478 shift = 0;
3479 omask = 0x3;
3480 if (left) {
3481 tabl = (2 << 2) | 3;
3482 tabr = (3 << 2) | 1;
3483 } else {
3484 tabl = (1 << 2) | 3;
3485 tabr = (3 << 2) | 2;
3487 break;
3488 default:
3489 abort();
3492 lo1 = tcg_temp_new();
3493 lo2 = tcg_temp_new();
3494 tcg_gen_andi_tl(lo1, s1, imask);
3495 tcg_gen_andi_tl(lo2, s2, imask);
3496 tcg_gen_shli_tl(lo1, lo1, shift);
3497 tcg_gen_shli_tl(lo2, lo2, shift);
3499 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3500 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3501 tcg_gen_andi_tl(lo1, lo1, omask);
3502 tcg_gen_andi_tl(lo2, lo2, omask);
3504 amask = address_mask_i(dc, -8);
3505 tcg_gen_andi_tl(s1, s1, amask);
3506 tcg_gen_andi_tl(s2, s2, amask);
3508 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3509 tcg_gen_and_tl(lo2, lo2, lo1);
3510 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3512 gen_store_gpr(dc, a->rd, dst);
3513 return advance_pc(dc);
3516 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3517 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3518 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3519 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3520 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3521 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3523 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3524 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3525 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3526 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3527 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3528 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3530 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3531 void (*func)(TCGv, TCGv, TCGv))
3533 TCGv dst = gen_dest_gpr(dc, a->rd);
3534 TCGv src1 = gen_load_gpr(dc, a->rs1);
3535 TCGv src2 = gen_load_gpr(dc, a->rs2);
3537 func(dst, src1, src2);
3538 gen_store_gpr(dc, a->rd, dst);
3539 return advance_pc(dc);
3542 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3543 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3544 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3546 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3548 #ifdef TARGET_SPARC64
3549 TCGv tmp = tcg_temp_new();
3551 tcg_gen_add_tl(tmp, s1, s2);
3552 tcg_gen_andi_tl(dst, tmp, -8);
3553 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3554 #else
3555 g_assert_not_reached();
3556 #endif
3559 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3561 #ifdef TARGET_SPARC64
3562 TCGv tmp = tcg_temp_new();
3564 tcg_gen_add_tl(tmp, s1, s2);
3565 tcg_gen_andi_tl(dst, tmp, -8);
3566 tcg_gen_neg_tl(tmp, tmp);
3567 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3568 #else
3569 g_assert_not_reached();
3570 #endif
3573 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3574 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3576 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3578 #ifdef TARGET_SPARC64
3579 tcg_gen_add_tl(dst, s1, s2);
3580 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3581 #else
3582 g_assert_not_reached();
3583 #endif
3586 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3588 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3590 TCGv dst, src1, src2;
3592 /* Reject 64-bit shifts for sparc32. */
3593 if (avail_32(dc) && a->x) {
3594 return false;
3597 src2 = tcg_temp_new();
3598 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3599 src1 = gen_load_gpr(dc, a->rs1);
3600 dst = gen_dest_gpr(dc, a->rd);
3602 if (l) {
3603 tcg_gen_shl_tl(dst, src1, src2);
3604 if (!a->x) {
3605 tcg_gen_ext32u_tl(dst, dst);
3607 } else if (u) {
3608 if (!a->x) {
3609 tcg_gen_ext32u_tl(dst, src1);
3610 src1 = dst;
3612 tcg_gen_shr_tl(dst, src1, src2);
3613 } else {
3614 if (!a->x) {
3615 tcg_gen_ext32s_tl(dst, src1);
3616 src1 = dst;
3618 tcg_gen_sar_tl(dst, src1, src2);
3620 gen_store_gpr(dc, a->rd, dst);
3621 return advance_pc(dc);
3624 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3625 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3626 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3628 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3630 TCGv dst, src1;
3632 /* Reject 64-bit shifts for sparc32. */
3633 if (avail_32(dc) && (a->x || a->i >= 32)) {
3634 return false;
3637 src1 = gen_load_gpr(dc, a->rs1);
3638 dst = gen_dest_gpr(dc, a->rd);
3640 if (avail_32(dc) || a->x) {
3641 if (l) {
3642 tcg_gen_shli_tl(dst, src1, a->i);
3643 } else if (u) {
3644 tcg_gen_shri_tl(dst, src1, a->i);
3645 } else {
3646 tcg_gen_sari_tl(dst, src1, a->i);
3648 } else {
3649 if (l) {
3650 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3651 } else if (u) {
3652 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3653 } else {
3654 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3657 gen_store_gpr(dc, a->rd, dst);
3658 return advance_pc(dc);
3661 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
3662 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
3663 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
3665 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
3667 /* For simplicity, we under-decoded the rs2 form. */
3668 if (!imm && rs2_or_imm & ~0x1f) {
3669 return NULL;
3671 if (imm || rs2_or_imm == 0) {
3672 return tcg_constant_tl(rs2_or_imm);
3673 } else {
3674 return cpu_regs[rs2_or_imm];
3678 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
3680 TCGv dst = gen_load_gpr(dc, rd);
3681 TCGv c2 = tcg_constant_tl(cmp->c2);
3683 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
3684 gen_store_gpr(dc, rd, dst);
3685 return advance_pc(dc);
3688 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
3690 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3691 DisasCompare cmp;
3693 if (src2 == NULL) {
3694 return false;
3696 gen_compare(&cmp, a->cc, a->cond, dc);
3697 return do_mov_cond(dc, &cmp, a->rd, src2);
3700 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
3702 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3703 DisasCompare cmp;
3705 if (src2 == NULL) {
3706 return false;
3708 gen_fcompare(&cmp, a->cc, a->cond);
3709 return do_mov_cond(dc, &cmp, a->rd, src2);
3712 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
3714 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3715 DisasCompare cmp;
3717 if (src2 == NULL) {
3718 return false;
3720 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
3721 return false;
3723 return do_mov_cond(dc, &cmp, a->rd, src2);
3726 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
3727 bool (*func)(DisasContext *dc, int rd, TCGv src))
3729 TCGv src1, sum;
3731 /* For simplicity, we under-decoded the rs2 form. */
3732 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3733 return false;
3737 * Always load the sum into a new temporary.
3738 * This is required to capture the value across a window change,
3739 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
3741 sum = tcg_temp_new();
3742 src1 = gen_load_gpr(dc, a->rs1);
3743 if (a->imm || a->rs2_or_imm == 0) {
3744 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
3745 } else {
3746 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
3748 return func(dc, a->rd, sum);
3751 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
3754 * Preserve pc across advance, so that we can delay
3755 * the writeback to rd until after src is consumed.
3757 target_ulong cur_pc = dc->pc;
3759 gen_check_align(dc, src, 3);
3761 gen_mov_pc_npc(dc);
3762 tcg_gen_mov_tl(cpu_npc, src);
3763 gen_address_mask(dc, cpu_npc);
3764 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
3766 dc->npc = DYNAMIC_PC_LOOKUP;
3767 return true;
3770 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
3772 static bool do_rett(DisasContext *dc, int rd, TCGv src)
3774 if (!supervisor(dc)) {
3775 return raise_priv(dc);
3778 gen_check_align(dc, src, 3);
3780 gen_mov_pc_npc(dc);
3781 tcg_gen_mov_tl(cpu_npc, src);
3782 gen_helper_rett(tcg_env);
3784 dc->npc = DYNAMIC_PC;
3785 return true;
3788 TRANS(RETT, 32, do_add_special, a, do_rett)
3790 static bool do_return(DisasContext *dc, int rd, TCGv src)
3792 gen_check_align(dc, src, 3);
3793 gen_helper_restore(tcg_env);
3795 gen_mov_pc_npc(dc);
3796 tcg_gen_mov_tl(cpu_npc, src);
3797 gen_address_mask(dc, cpu_npc);
3799 dc->npc = DYNAMIC_PC_LOOKUP;
3800 return true;
3803 TRANS(RETURN, 64, do_add_special, a, do_return)
3805 static bool do_save(DisasContext *dc, int rd, TCGv src)
3807 gen_helper_save(tcg_env);
3808 gen_store_gpr(dc, rd, src);
3809 return advance_pc(dc);
3812 TRANS(SAVE, ALL, do_add_special, a, do_save)
3814 static bool do_restore(DisasContext *dc, int rd, TCGv src)
3816 gen_helper_restore(tcg_env);
3817 gen_store_gpr(dc, rd, src);
3818 return advance_pc(dc);
3821 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
3823 static bool do_done_retry(DisasContext *dc, bool done)
3825 if (!supervisor(dc)) {
3826 return raise_priv(dc);
3828 dc->npc = DYNAMIC_PC;
3829 dc->pc = DYNAMIC_PC;
3830 translator_io_start(&dc->base);
3831 if (done) {
3832 gen_helper_done(tcg_env);
3833 } else {
3834 gen_helper_retry(tcg_env);
3836 return true;
3839 TRANS(DONE, 64, do_done_retry, true)
3840 TRANS(RETRY, 64, do_done_retry, false)
3843 * Major opcode 11 -- load and store instructions
3846 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
3848 TCGv addr, tmp = NULL;
3850 /* For simplicity, we under-decoded the rs2 form. */
3851 if (!imm && rs2_or_imm & ~0x1f) {
3852 return NULL;
3855 addr = gen_load_gpr(dc, rs1);
3856 if (rs2_or_imm) {
3857 tmp = tcg_temp_new();
3858 if (imm) {
3859 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
3860 } else {
3861 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
3863 addr = tmp;
3865 if (AM_CHECK(dc)) {
3866 if (!tmp) {
3867 tmp = tcg_temp_new();
3869 tcg_gen_ext32u_tl(tmp, addr);
3870 addr = tmp;
3872 return addr;
3875 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
3877 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3878 DisasASI da;
3880 if (addr == NULL) {
3881 return false;
3883 da = resolve_asi(dc, a->asi, mop);
3885 reg = gen_dest_gpr(dc, a->rd);
3886 gen_ld_asi(dc, &da, reg, addr);
3887 gen_store_gpr(dc, a->rd, reg);
3888 return advance_pc(dc);
3891 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
3892 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
3893 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
3894 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
3895 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
3896 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
3897 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
3899 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
3901 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3902 DisasASI da;
3904 if (addr == NULL) {
3905 return false;
3907 da = resolve_asi(dc, a->asi, mop);
3909 reg = gen_load_gpr(dc, a->rd);
3910 gen_st_asi(dc, &da, reg, addr);
3911 return advance_pc(dc);
3914 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
3915 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
3916 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
3917 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
3919 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
3921 TCGv addr;
3922 DisasASI da;
3924 if (a->rd & 1) {
3925 return false;
3927 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3928 if (addr == NULL) {
3929 return false;
3931 da = resolve_asi(dc, a->asi, MO_TEUQ);
3932 gen_ldda_asi(dc, &da, addr, a->rd);
3933 return advance_pc(dc);
3936 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
3938 TCGv addr;
3939 DisasASI da;
3941 if (a->rd & 1) {
3942 return false;
3944 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3945 if (addr == NULL) {
3946 return false;
3948 da = resolve_asi(dc, a->asi, MO_TEUQ);
3949 gen_stda_asi(dc, &da, addr, a->rd);
3950 return advance_pc(dc);
3953 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
3955 TCGv addr, reg;
3956 DisasASI da;
3958 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3959 if (addr == NULL) {
3960 return false;
3962 da = resolve_asi(dc, a->asi, MO_UB);
3964 reg = gen_dest_gpr(dc, a->rd);
3965 gen_ldstub_asi(dc, &da, reg, addr);
3966 gen_store_gpr(dc, a->rd, reg);
3967 return advance_pc(dc);
3970 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
3972 TCGv addr, dst, src;
3973 DisasASI da;
3975 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3976 if (addr == NULL) {
3977 return false;
3979 da = resolve_asi(dc, a->asi, MO_TEUL);
3981 dst = gen_dest_gpr(dc, a->rd);
3982 src = gen_load_gpr(dc, a->rd);
3983 gen_swap_asi(dc, &da, dst, src, addr);
3984 gen_store_gpr(dc, a->rd, dst);
3985 return advance_pc(dc);
3988 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
3990 TCGv addr, o, n, c;
3991 DisasASI da;
3993 addr = gen_ldst_addr(dc, a->rs1, true, 0);
3994 if (addr == NULL) {
3995 return false;
3997 da = resolve_asi(dc, a->asi, mop);
3999 o = gen_dest_gpr(dc, a->rd);
4000 n = gen_load_gpr(dc, a->rd);
4001 c = gen_load_gpr(dc, a->rs2_or_imm);
4002 gen_cas_asi(dc, &da, o, n, c, addr);
4003 gen_store_gpr(dc, a->rd, o);
4004 return advance_pc(dc);
4007 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4008 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4010 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4012 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4013 DisasASI da;
4015 if (addr == NULL) {
4016 return false;
4018 if (gen_trap_ifnofpu(dc)) {
4019 return true;
4021 if (sz == MO_128 && gen_trap_float128(dc)) {
4022 return true;
4024 da = resolve_asi(dc, a->asi, MO_TE | sz);
4025 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4026 gen_update_fprs_dirty(dc, a->rd);
4027 return advance_pc(dc);
4030 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4031 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4032 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4034 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4035 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4036 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4038 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4040 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4041 DisasASI da;
4043 if (addr == NULL) {
4044 return false;
4046 if (gen_trap_ifnofpu(dc)) {
4047 return true;
4049 if (sz == MO_128 && gen_trap_float128(dc)) {
4050 return true;
4052 da = resolve_asi(dc, a->asi, MO_TE | sz);
4053 gen_stf_asi(dc, &da, sz, addr, a->rd);
4054 return advance_pc(dc);
4057 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4058 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4059 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4061 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4062 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4063 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4065 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4067 if (!avail_32(dc)) {
4068 return false;
4070 if (!supervisor(dc)) {
4071 return raise_priv(dc);
4073 if (gen_trap_ifnofpu(dc)) {
4074 return true;
4076 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4077 return true;
4080 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4082 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4083 TCGv_i32 tmp;
4085 if (addr == NULL) {
4086 return false;
4088 if (gen_trap_ifnofpu(dc)) {
4089 return true;
4092 tmp = tcg_temp_new_i32();
4093 tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4095 tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4096 /* LDFSR does not change FCC[1-3]. */
4098 gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4099 return advance_pc(dc);
4102 static bool trans_LDXFSR(DisasContext *dc, arg_r_r_ri *a)
4104 #ifdef TARGET_SPARC64
4105 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4106 TCGv_i64 t64;
4107 TCGv_i32 lo, hi;
4109 if (addr == NULL) {
4110 return false;
4112 if (gen_trap_ifnofpu(dc)) {
4113 return true;
4116 t64 = tcg_temp_new_i64();
4117 tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4119 lo = tcg_temp_new_i32();
4120 hi = cpu_fcc[3];
4121 tcg_gen_extr_i64_i32(lo, hi, t64);
4122 tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4123 tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4124 tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4125 tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4127 gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4128 return advance_pc(dc);
4129 #else
4130 return false;
4131 #endif
4134 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4136 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4137 TCGv fsr;
4139 if (addr == NULL) {
4140 return false;
4142 if (gen_trap_ifnofpu(dc)) {
4143 return true;
4146 fsr = tcg_temp_new();
4147 gen_helper_get_fsr(fsr, tcg_env);
4148 tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4149 return advance_pc(dc);
4152 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4153 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4155 static bool do_fc(DisasContext *dc, int rd, bool c)
4157 uint64_t mask;
4159 if (gen_trap_ifnofpu(dc)) {
4160 return true;
4163 if (rd & 1) {
4164 mask = MAKE_64BIT_MASK(0, 32);
4165 } else {
4166 mask = MAKE_64BIT_MASK(32, 32);
4168 if (c) {
4169 tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4170 } else {
4171 tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4173 gen_update_fprs_dirty(dc, rd);
4174 return advance_pc(dc);
4177 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4178 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4180 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4182 if (gen_trap_ifnofpu(dc)) {
4183 return true;
4186 tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4187 gen_update_fprs_dirty(dc, rd);
4188 return advance_pc(dc);
4191 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4192 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4194 static bool do_ff(DisasContext *dc, arg_r_r *a,
4195 void (*func)(TCGv_i32, TCGv_i32))
4197 TCGv_i32 tmp;
4199 if (gen_trap_ifnofpu(dc)) {
4200 return true;
4203 tmp = gen_load_fpr_F(dc, a->rs);
4204 func(tmp, tmp);
4205 gen_store_fpr_F(dc, a->rd, tmp);
4206 return advance_pc(dc);
4209 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4210 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4211 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4212 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4213 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4215 static bool do_fd(DisasContext *dc, arg_r_r *a,
4216 void (*func)(TCGv_i32, TCGv_i64))
4218 TCGv_i32 dst;
4219 TCGv_i64 src;
4221 if (gen_trap_ifnofpu(dc)) {
4222 return true;
4225 dst = tcg_temp_new_i32();
4226 src = gen_load_fpr_D(dc, a->rs);
4227 func(dst, src);
4228 gen_store_fpr_F(dc, a->rd, dst);
4229 return advance_pc(dc);
4232 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4233 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4235 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4236 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4238 TCGv_i32 tmp;
4240 if (gen_trap_ifnofpu(dc)) {
4241 return true;
4244 tmp = gen_load_fpr_F(dc, a->rs);
4245 func(tmp, tcg_env, tmp);
4246 gen_store_fpr_F(dc, a->rd, tmp);
4247 return advance_pc(dc);
4250 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4251 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4252 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4254 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4255 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4257 TCGv_i32 dst;
4258 TCGv_i64 src;
4260 if (gen_trap_ifnofpu(dc)) {
4261 return true;
4264 dst = tcg_temp_new_i32();
4265 src = gen_load_fpr_D(dc, a->rs);
4266 func(dst, tcg_env, src);
4267 gen_store_fpr_F(dc, a->rd, dst);
4268 return advance_pc(dc);
4271 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4272 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4273 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4275 static bool do_dd(DisasContext *dc, arg_r_r *a,
4276 void (*func)(TCGv_i64, TCGv_i64))
4278 TCGv_i64 dst, src;
4280 if (gen_trap_ifnofpu(dc)) {
4281 return true;
4284 dst = gen_dest_fpr_D(dc, a->rd);
4285 src = gen_load_fpr_D(dc, a->rs);
4286 func(dst, src);
4287 gen_store_fpr_D(dc, a->rd, dst);
4288 return advance_pc(dc);
4291 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4292 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4293 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4294 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4295 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4297 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4298 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4300 TCGv_i64 dst, src;
4302 if (gen_trap_ifnofpu(dc)) {
4303 return true;
4306 dst = gen_dest_fpr_D(dc, a->rd);
4307 src = gen_load_fpr_D(dc, a->rs);
4308 func(dst, tcg_env, src);
4309 gen_store_fpr_D(dc, a->rd, dst);
4310 return advance_pc(dc);
4313 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4314 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4315 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4317 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4318 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4320 TCGv_i64 dst;
4321 TCGv_i32 src;
4323 if (gen_trap_ifnofpu(dc)) {
4324 return true;
4327 dst = gen_dest_fpr_D(dc, a->rd);
4328 src = gen_load_fpr_F(dc, a->rs);
4329 func(dst, tcg_env, src);
4330 gen_store_fpr_D(dc, a->rd, dst);
4331 return advance_pc(dc);
4334 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4335 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4336 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4338 static bool do_qq(DisasContext *dc, arg_r_r *a,
4339 void (*func)(TCGv_i128, TCGv_i128))
4341 TCGv_i128 t;
4343 if (gen_trap_ifnofpu(dc)) {
4344 return true;
4346 if (gen_trap_float128(dc)) {
4347 return true;
4350 gen_op_clear_ieee_excp_and_FTT();
4351 t = gen_load_fpr_Q(dc, a->rs);
4352 func(t, t);
4353 gen_store_fpr_Q(dc, a->rd, t);
4354 return advance_pc(dc);
4357 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4358 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4359 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4361 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4362 void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4364 TCGv_i128 t;
4366 if (gen_trap_ifnofpu(dc)) {
4367 return true;
4369 if (gen_trap_float128(dc)) {
4370 return true;
4373 t = gen_load_fpr_Q(dc, a->rs);
4374 func(t, tcg_env, t);
4375 gen_store_fpr_Q(dc, a->rd, t);
4376 return advance_pc(dc);
4379 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4381 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4382 void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4384 TCGv_i128 src;
4385 TCGv_i32 dst;
4387 if (gen_trap_ifnofpu(dc)) {
4388 return true;
4390 if (gen_trap_float128(dc)) {
4391 return true;
4394 src = gen_load_fpr_Q(dc, a->rs);
4395 dst = tcg_temp_new_i32();
4396 func(dst, tcg_env, src);
4397 gen_store_fpr_F(dc, a->rd, dst);
4398 return advance_pc(dc);
4401 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4402 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4404 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4405 void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4407 TCGv_i128 src;
4408 TCGv_i64 dst;
4410 if (gen_trap_ifnofpu(dc)) {
4411 return true;
4413 if (gen_trap_float128(dc)) {
4414 return true;
4417 src = gen_load_fpr_Q(dc, a->rs);
4418 dst = gen_dest_fpr_D(dc, a->rd);
4419 func(dst, tcg_env, src);
4420 gen_store_fpr_D(dc, a->rd, dst);
4421 return advance_pc(dc);
4424 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4425 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4427 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4428 void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4430 TCGv_i32 src;
4431 TCGv_i128 dst;
4433 if (gen_trap_ifnofpu(dc)) {
4434 return true;
4436 if (gen_trap_float128(dc)) {
4437 return true;
4440 src = gen_load_fpr_F(dc, a->rs);
4441 dst = tcg_temp_new_i128();
4442 func(dst, tcg_env, src);
4443 gen_store_fpr_Q(dc, a->rd, dst);
4444 return advance_pc(dc);
4447 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4448 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4450 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4451 void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4453 TCGv_i64 src;
4454 TCGv_i128 dst;
4456 if (gen_trap_ifnofpu(dc)) {
4457 return true;
4459 if (gen_trap_float128(dc)) {
4460 return true;
4463 src = gen_load_fpr_D(dc, a->rs);
4464 dst = tcg_temp_new_i128();
4465 func(dst, tcg_env, src);
4466 gen_store_fpr_Q(dc, a->rd, dst);
4467 return advance_pc(dc);
4470 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4471 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4473 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4474 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4476 TCGv_i32 src1, src2;
4478 if (gen_trap_ifnofpu(dc)) {
4479 return true;
4482 src1 = gen_load_fpr_F(dc, a->rs1);
4483 src2 = gen_load_fpr_F(dc, a->rs2);
4484 func(src1, src1, src2);
4485 gen_store_fpr_F(dc, a->rd, src1);
4486 return advance_pc(dc);
4489 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4490 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4491 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4492 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4493 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4494 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4495 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4496 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4497 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4498 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4499 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4500 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4502 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4503 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4505 TCGv_i32 src1, src2;
4507 if (gen_trap_ifnofpu(dc)) {
4508 return true;
4511 src1 = gen_load_fpr_F(dc, a->rs1);
4512 src2 = gen_load_fpr_F(dc, a->rs2);
4513 func(src1, tcg_env, src1, src2);
4514 gen_store_fpr_F(dc, a->rd, src1);
4515 return advance_pc(dc);
4518 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4519 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4520 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4521 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4523 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4524 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4526 TCGv_i64 dst, src1, src2;
4528 if (gen_trap_ifnofpu(dc)) {
4529 return true;
4532 dst = gen_dest_fpr_D(dc, a->rd);
4533 src1 = gen_load_fpr_D(dc, a->rs1);
4534 src2 = gen_load_fpr_D(dc, a->rs2);
4535 func(dst, src1, src2);
4536 gen_store_fpr_D(dc, a->rd, dst);
4537 return advance_pc(dc);
4540 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
4541 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
4542 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
4543 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4544 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4545 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
4546 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
4547 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
4548 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
4550 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4551 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4552 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4553 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4554 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4555 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4556 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4557 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4558 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4559 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4560 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4561 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4563 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4564 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4565 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4567 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4568 void (*func)(TCGv, TCGv_i64, TCGv_i64))
4570 TCGv_i64 src1, src2;
4571 TCGv dst;
4573 if (gen_trap_ifnofpu(dc)) {
4574 return true;
4577 dst = gen_dest_gpr(dc, a->rd);
4578 src1 = gen_load_fpr_D(dc, a->rs1);
4579 src2 = gen_load_fpr_D(dc, a->rs2);
4580 func(dst, src1, src2);
4581 gen_store_gpr(dc, a->rd, dst);
4582 return advance_pc(dc);
4585 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4586 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4587 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4588 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4590 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4591 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4592 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4593 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4595 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4596 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4598 TCGv_i64 dst, src1, src2;
4600 if (gen_trap_ifnofpu(dc)) {
4601 return true;
4604 dst = gen_dest_fpr_D(dc, a->rd);
4605 src1 = gen_load_fpr_D(dc, a->rs1);
4606 src2 = gen_load_fpr_D(dc, a->rs2);
4607 func(dst, tcg_env, src1, src2);
4608 gen_store_fpr_D(dc, a->rd, dst);
4609 return advance_pc(dc);
4612 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4613 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4614 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4615 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4617 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4619 TCGv_i64 dst;
4620 TCGv_i32 src1, src2;
4622 if (gen_trap_ifnofpu(dc)) {
4623 return true;
4625 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4626 return raise_unimpfpop(dc);
4629 dst = gen_dest_fpr_D(dc, a->rd);
4630 src1 = gen_load_fpr_F(dc, a->rs1);
4631 src2 = gen_load_fpr_F(dc, a->rs2);
4632 gen_helper_fsmuld(dst, tcg_env, src1, src2);
4633 gen_store_fpr_D(dc, a->rd, dst);
4634 return advance_pc(dc);
4637 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4638 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4640 TCGv_i64 dst, src0, src1, src2;
4642 if (gen_trap_ifnofpu(dc)) {
4643 return true;
4646 dst = gen_dest_fpr_D(dc, a->rd);
4647 src0 = gen_load_fpr_D(dc, a->rd);
4648 src1 = gen_load_fpr_D(dc, a->rs1);
4649 src2 = gen_load_fpr_D(dc, a->rs2);
4650 func(dst, src0, src1, src2);
4651 gen_store_fpr_D(dc, a->rd, dst);
4652 return advance_pc(dc);
4655 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4657 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4658 void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
4660 TCGv_i128 src1, src2;
4662 if (gen_trap_ifnofpu(dc)) {
4663 return true;
4665 if (gen_trap_float128(dc)) {
4666 return true;
4669 src1 = gen_load_fpr_Q(dc, a->rs1);
4670 src2 = gen_load_fpr_Q(dc, a->rs2);
4671 func(src1, tcg_env, src1, src2);
4672 gen_store_fpr_Q(dc, a->rd, src1);
4673 return advance_pc(dc);
4676 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4677 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
4678 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
4679 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
4681 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
4683 TCGv_i64 src1, src2;
4684 TCGv_i128 dst;
4686 if (gen_trap_ifnofpu(dc)) {
4687 return true;
4689 if (gen_trap_float128(dc)) {
4690 return true;
4693 src1 = gen_load_fpr_D(dc, a->rs1);
4694 src2 = gen_load_fpr_D(dc, a->rs2);
4695 dst = tcg_temp_new_i128();
4696 gen_helper_fdmulq(dst, tcg_env, src1, src2);
4697 gen_store_fpr_Q(dc, a->rd, dst);
4698 return advance_pc(dc);
4701 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
4702 void (*func)(DisasContext *, DisasCompare *, int, int))
4704 DisasCompare cmp;
4706 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4707 return false;
4709 if (gen_trap_ifnofpu(dc)) {
4710 return true;
4712 if (is_128 && gen_trap_float128(dc)) {
4713 return true;
4716 gen_op_clear_ieee_excp_and_FTT();
4717 func(dc, &cmp, a->rd, a->rs2);
4718 return advance_pc(dc);
4721 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
4722 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
4723 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
4725 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
4726 void (*func)(DisasContext *, DisasCompare *, int, int))
4728 DisasCompare cmp;
4730 if (gen_trap_ifnofpu(dc)) {
4731 return true;
4733 if (is_128 && gen_trap_float128(dc)) {
4734 return true;
4737 gen_op_clear_ieee_excp_and_FTT();
4738 gen_compare(&cmp, a->cc, a->cond, dc);
4739 func(dc, &cmp, a->rd, a->rs2);
4740 return advance_pc(dc);
4743 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
4744 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
4745 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
4747 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
4748 void (*func)(DisasContext *, DisasCompare *, int, int))
4750 DisasCompare cmp;
4752 if (gen_trap_ifnofpu(dc)) {
4753 return true;
4755 if (is_128 && gen_trap_float128(dc)) {
4756 return true;
4759 gen_op_clear_ieee_excp_and_FTT();
4760 gen_fcompare(&cmp, a->cc, a->cond);
4761 func(dc, &cmp, a->rd, a->rs2);
4762 return advance_pc(dc);
4765 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
4766 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
4767 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
4769 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
4771 TCGv_i32 src1, src2;
4773 if (avail_32(dc) && a->cc != 0) {
4774 return false;
4776 if (gen_trap_ifnofpu(dc)) {
4777 return true;
4780 src1 = gen_load_fpr_F(dc, a->rs1);
4781 src2 = gen_load_fpr_F(dc, a->rs2);
4782 if (e) {
4783 gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
4784 } else {
4785 gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
4787 return advance_pc(dc);
4790 TRANS(FCMPs, ALL, do_fcmps, a, false)
4791 TRANS(FCMPEs, ALL, do_fcmps, a, true)
4793 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
4795 TCGv_i64 src1, src2;
4797 if (avail_32(dc) && a->cc != 0) {
4798 return false;
4800 if (gen_trap_ifnofpu(dc)) {
4801 return true;
4804 src1 = gen_load_fpr_D(dc, a->rs1);
4805 src2 = gen_load_fpr_D(dc, a->rs2);
4806 if (e) {
4807 gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
4808 } else {
4809 gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
4811 return advance_pc(dc);
4814 TRANS(FCMPd, ALL, do_fcmpd, a, false)
4815 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
4817 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
4819 TCGv_i128 src1, src2;
4821 if (avail_32(dc) && a->cc != 0) {
4822 return false;
4824 if (gen_trap_ifnofpu(dc)) {
4825 return true;
4827 if (gen_trap_float128(dc)) {
4828 return true;
4831 src1 = gen_load_fpr_Q(dc, a->rs1);
4832 src2 = gen_load_fpr_Q(dc, a->rs2);
4833 if (e) {
4834 gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
4835 } else {
4836 gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
4838 return advance_pc(dc);
4841 TRANS(FCMPq, ALL, do_fcmpq, a, false)
4842 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
4844 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4846 DisasContext *dc = container_of(dcbase, DisasContext, base);
4847 int bound;
4849 dc->pc = dc->base.pc_first;
4850 dc->npc = (target_ulong)dc->base.tb->cs_base;
4851 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
4852 dc->def = &cpu_env(cs)->def;
4853 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
4854 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
4855 #ifndef CONFIG_USER_ONLY
4856 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
4857 #endif
4858 #ifdef TARGET_SPARC64
4859 dc->fprs_dirty = 0;
4860 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
4861 #ifndef CONFIG_USER_ONLY
4862 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
4863 #endif
4864 #endif
4866 * if we reach a page boundary, we stop generation so that the
4867 * PC of a TT_TFAULT exception is always in the right page
4869 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
4870 dc->base.max_insns = MIN(dc->base.max_insns, bound);
4873 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
4877 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4879 DisasContext *dc = container_of(dcbase, DisasContext, base);
4880 target_ulong npc = dc->npc;
4882 if (npc & 3) {
4883 switch (npc) {
4884 case JUMP_PC:
4885 assert(dc->jump_pc[1] == dc->pc + 4);
4886 npc = dc->jump_pc[0] | JUMP_PC;
4887 break;
4888 case DYNAMIC_PC:
4889 case DYNAMIC_PC_LOOKUP:
4890 npc = DYNAMIC_PC;
4891 break;
4892 default:
4893 g_assert_not_reached();
4896 tcg_gen_insn_start(dc->pc, npc);
4899 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4901 DisasContext *dc = container_of(dcbase, DisasContext, base);
4902 unsigned int insn;
4904 insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
4905 dc->base.pc_next += 4;
4907 if (!decode(dc, insn)) {
4908 gen_exception(dc, TT_ILL_INSN);
4911 if (dc->base.is_jmp == DISAS_NORETURN) {
4912 return;
4914 if (dc->pc != dc->base.pc_next) {
4915 dc->base.is_jmp = DISAS_TOO_MANY;
4919 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4921 DisasContext *dc = container_of(dcbase, DisasContext, base);
4922 DisasDelayException *e, *e_next;
4923 bool may_lookup;
4925 finishing_insn(dc);
4927 switch (dc->base.is_jmp) {
4928 case DISAS_NEXT:
4929 case DISAS_TOO_MANY:
4930 if (((dc->pc | dc->npc) & 3) == 0) {
4931 /* static PC and NPC: we can use direct chaining */
4932 gen_goto_tb(dc, 0, dc->pc, dc->npc);
4933 break;
4936 may_lookup = true;
4937 if (dc->pc & 3) {
4938 switch (dc->pc) {
4939 case DYNAMIC_PC_LOOKUP:
4940 break;
4941 case DYNAMIC_PC:
4942 may_lookup = false;
4943 break;
4944 default:
4945 g_assert_not_reached();
4947 } else {
4948 tcg_gen_movi_tl(cpu_pc, dc->pc);
4951 if (dc->npc & 3) {
4952 switch (dc->npc) {
4953 case JUMP_PC:
4954 gen_generic_branch(dc);
4955 break;
4956 case DYNAMIC_PC:
4957 may_lookup = false;
4958 break;
4959 case DYNAMIC_PC_LOOKUP:
4960 break;
4961 default:
4962 g_assert_not_reached();
4964 } else {
4965 tcg_gen_movi_tl(cpu_npc, dc->npc);
4967 if (may_lookup) {
4968 tcg_gen_lookup_and_goto_ptr();
4969 } else {
4970 tcg_gen_exit_tb(NULL, 0);
4972 break;
4974 case DISAS_NORETURN:
4975 break;
4977 case DISAS_EXIT:
4978 /* Exit TB */
4979 save_state(dc);
4980 tcg_gen_exit_tb(NULL, 0);
4981 break;
4983 default:
4984 g_assert_not_reached();
4987 for (e = dc->delay_excp_list; e ; e = e_next) {
4988 gen_set_label(e->lab);
4990 tcg_gen_movi_tl(cpu_pc, e->pc);
4991 if (e->npc % 4 == 0) {
4992 tcg_gen_movi_tl(cpu_npc, e->npc);
4994 gen_helper_raise_exception(tcg_env, e->excp);
4996 e_next = e->next;
4997 g_free(e);
5001 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5002 CPUState *cpu, FILE *logfile)
5004 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5005 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5008 static const TranslatorOps sparc_tr_ops = {
5009 .init_disas_context = sparc_tr_init_disas_context,
5010 .tb_start = sparc_tr_tb_start,
5011 .insn_start = sparc_tr_insn_start,
5012 .translate_insn = sparc_tr_translate_insn,
5013 .tb_stop = sparc_tr_tb_stop,
5014 .disas_log = sparc_tr_disas_log,
5017 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5018 vaddr pc, void *host_pc)
5020 DisasContext dc = {};
5022 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5025 void sparc_tcg_init(void)
5027 static const char gregnames[32][4] = {
5028 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5029 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5030 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5031 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5033 static const char fregnames[32][4] = {
5034 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5035 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5036 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5037 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5040 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5041 #ifdef TARGET_SPARC64
5042 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5043 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5044 { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5045 { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5046 { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5047 #else
5048 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5049 #endif
5052 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5053 #ifdef TARGET_SPARC64
5054 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5055 { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5056 { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5057 #endif
5058 { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5059 { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5060 { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5061 { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5062 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5063 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5064 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5065 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5066 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5069 unsigned int i;
5071 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5072 offsetof(CPUSPARCState, regwptr),
5073 "regwptr");
5075 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5076 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5079 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5080 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5083 cpu_regs[0] = NULL;
5084 for (i = 1; i < 8; ++i) {
5085 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5086 offsetof(CPUSPARCState, gregs[i]),
5087 gregnames[i]);
5090 for (i = 8; i < 32; ++i) {
5091 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5092 (i - 8) * sizeof(target_ulong),
5093 gregnames[i]);
5096 for (i = 0; i < TARGET_DPREGS; i++) {
5097 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5098 offsetof(CPUSPARCState, fpr[i]),
5099 fregnames[i]);
5103 void sparc_restore_state_to_opc(CPUState *cs,
5104 const TranslationBlock *tb,
5105 const uint64_t *data)
5107 CPUSPARCState *env = cpu_env(cs);
5108 target_ulong pc = data[0];
5109 target_ulong npc = data[1];
5111 env->pc = pc;
5112 if (npc == DYNAMIC_PC) {
5113 /* dynamic NPC: already stored */
5114 } else if (npc & JUMP_PC) {
5115 /* jump PC: use 'cond' and the jump targets of the translation */
5116 if (env->cond) {
5117 env->npc = npc & ~3;
5118 } else {
5119 env->npc = pc + 4;
5121 } else {
5122 env->npc = npc;