target/sparc: Perform DFPREG/QFPREG in decodetree
[qemu/kevin.git] / target / sparc / translate.c
blobf3c52c7c4873691dad2b1ca30f3e412cdd3d74f0
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "asi.h"
33 #define HELPER_H "helper.h"
34 #include "exec/helper-info.c.inc"
35 #undef HELPER_H
37 #ifdef TARGET_SPARC64
38 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
39 # define gen_helper_rdasr17(D, E) qemu_build_not_reached()
40 # define gen_helper_rett(E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_done(E) qemu_build_not_reached()
46 # define gen_helper_flushw(E) qemu_build_not_reached()
47 # define gen_helper_fmul8x16a(D, S1, S2) qemu_build_not_reached()
48 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
49 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
50 # define gen_helper_restored(E) qemu_build_not_reached()
51 # define gen_helper_retry(E) qemu_build_not_reached()
52 # define gen_helper_saved(E) qemu_build_not_reached()
53 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
54 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
55 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
56 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
57 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
58 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
59 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
60 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
61 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
62 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
63 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
64 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
83 # define MAXTL_MASK 0
84 #endif
86 /* Dynamic PC, must exit to main loop. */
87 #define DYNAMIC_PC 1
88 /* Dynamic PC, one of two values according to jump_pc[T2]. */
89 #define JUMP_PC 2
90 /* Dynamic PC, may lookup next TB. */
91 #define DYNAMIC_PC_LOOKUP 3
93 #define DISAS_EXIT DISAS_TARGET_0
95 /* global register indexes */
96 static TCGv_ptr cpu_regwptr;
97 static TCGv cpu_pc, cpu_npc;
98 static TCGv cpu_regs[32];
99 static TCGv cpu_y;
100 static TCGv cpu_tbr;
101 static TCGv cpu_cond;
102 static TCGv cpu_cc_N;
103 static TCGv cpu_cc_V;
104 static TCGv cpu_icc_Z;
105 static TCGv cpu_icc_C;
106 #ifdef TARGET_SPARC64
107 static TCGv cpu_xcc_Z;
108 static TCGv cpu_xcc_C;
109 static TCGv_i32 cpu_fprs;
110 static TCGv cpu_gsr;
111 #else
112 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
113 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
114 #endif
116 #ifdef TARGET_SPARC64
117 #define cpu_cc_Z cpu_xcc_Z
118 #define cpu_cc_C cpu_xcc_C
119 #else
120 #define cpu_cc_Z cpu_icc_Z
121 #define cpu_cc_C cpu_icc_C
122 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
123 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
124 #endif
126 /* Floating point registers */
127 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
128 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
130 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
131 #ifdef TARGET_SPARC64
132 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
133 # define env64_field_offsetof(X) env_field_offsetof(X)
134 #else
135 # define env32_field_offsetof(X) env_field_offsetof(X)
136 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
137 #endif
139 typedef struct DisasCompare {
140 TCGCond cond;
141 TCGv c1;
142 int c2;
143 } DisasCompare;
145 typedef struct DisasDelayException {
146 struct DisasDelayException *next;
147 TCGLabel *lab;
148 TCGv_i32 excp;
149 /* Saved state at parent insn. */
150 target_ulong pc;
151 target_ulong npc;
152 } DisasDelayException;
154 typedef struct DisasContext {
155 DisasContextBase base;
156 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
157 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
159 /* Used when JUMP_PC value is used. */
160 DisasCompare jump;
161 target_ulong jump_pc[2];
163 int mem_idx;
164 bool cpu_cond_live;
165 bool fpu_enabled;
166 bool address_mask_32bit;
167 #ifndef CONFIG_USER_ONLY
168 bool supervisor;
169 #ifdef TARGET_SPARC64
170 bool hypervisor;
171 #endif
172 #endif
174 sparc_def_t *def;
175 #ifdef TARGET_SPARC64
176 int fprs_dirty;
177 int asi;
178 #endif
179 DisasDelayException *delay_excp_list;
180 } DisasContext;
182 // This function uses non-native bit order
183 #define GET_FIELD(X, FROM, TO) \
184 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
186 // This function uses the order in the manuals, i.e. bit 0 is 2^0
187 #define GET_FIELD_SP(X, FROM, TO) \
188 GET_FIELD(X, 31 - (TO), 31 - (FROM))
190 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
191 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
193 #define UA2005_HTRAP_MASK 0xff
194 #define V8_TRAP_MASK 0x7f
196 #define IS_IMM (insn & (1<<13))
198 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
200 #if defined(TARGET_SPARC64)
201 int bit = (rd < 32) ? 1 : 2;
202 /* If we know we've already set this bit within the TB,
203 we can avoid setting it again. */
204 if (!(dc->fprs_dirty & bit)) {
205 dc->fprs_dirty |= bit;
206 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
208 #endif
211 /* floating point registers moves */
212 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
214 TCGv_i32 ret = tcg_temp_new_i32();
215 if (src & 1) {
216 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
217 } else {
218 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
220 return ret;
223 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
225 TCGv_i64 t = tcg_temp_new_i64();
227 tcg_gen_extu_i32_i64(t, v);
228 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
229 (dst & 1 ? 0 : 32), 32);
230 gen_update_fprs_dirty(dc, dst);
233 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
235 return cpu_fpr[src / 2];
238 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
240 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
241 gen_update_fprs_dirty(dc, dst);
244 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
246 return cpu_fpr[dst / 2];
249 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
251 TCGv_i128 ret = tcg_temp_new_i128();
253 tcg_gen_concat_i64_i128(ret, cpu_fpr[src / 2 + 1], cpu_fpr[src / 2]);
254 return ret;
257 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
259 tcg_gen_extr_i128_i64(cpu_fpr[dst / 2 + 1], cpu_fpr[dst / 2], v);
260 gen_update_fprs_dirty(dc, dst);
263 /* moves */
264 #ifdef CONFIG_USER_ONLY
265 #define supervisor(dc) 0
266 #define hypervisor(dc) 0
267 #else
268 #ifdef TARGET_SPARC64
269 #define hypervisor(dc) (dc->hypervisor)
270 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
271 #else
272 #define supervisor(dc) (dc->supervisor)
273 #define hypervisor(dc) 0
274 #endif
275 #endif
277 #if !defined(TARGET_SPARC64)
278 # define AM_CHECK(dc) false
279 #elif defined(TARGET_ABI32)
280 # define AM_CHECK(dc) true
281 #elif defined(CONFIG_USER_ONLY)
282 # define AM_CHECK(dc) false
283 #else
284 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
285 #endif
287 static void gen_address_mask(DisasContext *dc, TCGv addr)
289 if (AM_CHECK(dc)) {
290 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
294 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
296 return AM_CHECK(dc) ? (uint32_t)addr : addr;
299 static TCGv gen_load_gpr(DisasContext *dc, int reg)
301 if (reg > 0) {
302 assert(reg < 32);
303 return cpu_regs[reg];
304 } else {
305 TCGv t = tcg_temp_new();
306 tcg_gen_movi_tl(t, 0);
307 return t;
311 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
313 if (reg > 0) {
314 assert(reg < 32);
315 tcg_gen_mov_tl(cpu_regs[reg], v);
319 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
321 if (reg > 0) {
322 assert(reg < 32);
323 return cpu_regs[reg];
324 } else {
325 return tcg_temp_new();
329 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
331 return translator_use_goto_tb(&s->base, pc) &&
332 translator_use_goto_tb(&s->base, npc);
335 static void gen_goto_tb(DisasContext *s, int tb_num,
336 target_ulong pc, target_ulong npc)
338 if (use_goto_tb(s, pc, npc)) {
339 /* jump to same page: we can use a direct jump */
340 tcg_gen_goto_tb(tb_num);
341 tcg_gen_movi_tl(cpu_pc, pc);
342 tcg_gen_movi_tl(cpu_npc, npc);
343 tcg_gen_exit_tb(s->base.tb, tb_num);
344 } else {
345 /* jump to another page: we can use an indirect jump */
346 tcg_gen_movi_tl(cpu_pc, pc);
347 tcg_gen_movi_tl(cpu_npc, npc);
348 tcg_gen_lookup_and_goto_ptr();
352 static TCGv gen_carry32(void)
354 if (TARGET_LONG_BITS == 64) {
355 TCGv t = tcg_temp_new();
356 tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
357 return t;
359 return cpu_icc_C;
362 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
364 TCGv z = tcg_constant_tl(0);
366 if (cin) {
367 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
368 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
369 } else {
370 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
372 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
373 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
374 tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
375 if (TARGET_LONG_BITS == 64) {
377 * Carry-in to bit 32 is result ^ src1 ^ src2.
378 * We already have the src xor term in Z, from computation of V.
380 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
381 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
383 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
384 tcg_gen_mov_tl(dst, cpu_cc_N);
387 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
389 gen_op_addcc_int(dst, src1, src2, NULL);
392 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
394 TCGv t = tcg_temp_new();
396 /* Save the tag bits around modification of dst. */
397 tcg_gen_or_tl(t, src1, src2);
399 gen_op_addcc(dst, src1, src2);
401 /* Incorprate tag bits into icc.V */
402 tcg_gen_andi_tl(t, t, 3);
403 tcg_gen_neg_tl(t, t);
404 tcg_gen_ext32u_tl(t, t);
405 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
408 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
410 tcg_gen_add_tl(dst, src1, src2);
411 tcg_gen_add_tl(dst, dst, gen_carry32());
414 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
416 gen_op_addcc_int(dst, src1, src2, gen_carry32());
419 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
421 TCGv z = tcg_constant_tl(0);
423 if (cin) {
424 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
425 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
426 } else {
427 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
429 tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
430 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
431 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
432 tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
433 #ifdef TARGET_SPARC64
434 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
435 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
436 #endif
437 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
438 tcg_gen_mov_tl(dst, cpu_cc_N);
441 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
443 gen_op_subcc_int(dst, src1, src2, NULL);
446 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
448 TCGv t = tcg_temp_new();
450 /* Save the tag bits around modification of dst. */
451 tcg_gen_or_tl(t, src1, src2);
453 gen_op_subcc(dst, src1, src2);
455 /* Incorprate tag bits into icc.V */
456 tcg_gen_andi_tl(t, t, 3);
457 tcg_gen_neg_tl(t, t);
458 tcg_gen_ext32u_tl(t, t);
459 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
462 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
464 tcg_gen_sub_tl(dst, src1, src2);
465 tcg_gen_sub_tl(dst, dst, gen_carry32());
468 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
470 gen_op_subcc_int(dst, src1, src2, gen_carry32());
473 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
475 TCGv zero = tcg_constant_tl(0);
476 TCGv one = tcg_constant_tl(1);
477 TCGv t_src1 = tcg_temp_new();
478 TCGv t_src2 = tcg_temp_new();
479 TCGv t0 = tcg_temp_new();
481 tcg_gen_ext32u_tl(t_src1, src1);
482 tcg_gen_ext32u_tl(t_src2, src2);
485 * if (!(env->y & 1))
486 * src2 = 0;
488 tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
491 * b2 = src1 & 1;
492 * y = (b2 << 31) | (y >> 1);
494 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
495 tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
497 // b1 = N ^ V;
498 tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
501 * src1 = (b1 << 31) | (src1 >> 1)
503 tcg_gen_andi_tl(t0, t0, 1u << 31);
504 tcg_gen_shri_tl(t_src1, t_src1, 1);
505 tcg_gen_or_tl(t_src1, t_src1, t0);
507 gen_op_addcc(dst, t_src1, t_src2);
510 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
512 #if TARGET_LONG_BITS == 32
513 if (sign_ext) {
514 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
515 } else {
516 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
518 #else
519 TCGv t0 = tcg_temp_new_i64();
520 TCGv t1 = tcg_temp_new_i64();
522 if (sign_ext) {
523 tcg_gen_ext32s_i64(t0, src1);
524 tcg_gen_ext32s_i64(t1, src2);
525 } else {
526 tcg_gen_ext32u_i64(t0, src1);
527 tcg_gen_ext32u_i64(t1, src2);
530 tcg_gen_mul_i64(dst, t0, t1);
531 tcg_gen_shri_i64(cpu_y, dst, 32);
532 #endif
535 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
537 /* zero-extend truncated operands before multiplication */
538 gen_op_multiply(dst, src1, src2, 0);
541 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
543 /* sign-extend truncated operands before multiplication */
544 gen_op_multiply(dst, src1, src2, 1);
547 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
549 #ifdef TARGET_SPARC64
550 gen_helper_sdiv(dst, tcg_env, src1, src2);
551 tcg_gen_ext32s_tl(dst, dst);
552 #else
553 TCGv_i64 t64 = tcg_temp_new_i64();
554 gen_helper_sdiv(t64, tcg_env, src1, src2);
555 tcg_gen_trunc_i64_tl(dst, t64);
556 #endif
559 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
561 TCGv_i64 t64;
563 #ifdef TARGET_SPARC64
564 t64 = cpu_cc_V;
565 #else
566 t64 = tcg_temp_new_i64();
567 #endif
569 gen_helper_udiv(t64, tcg_env, src1, src2);
571 #ifdef TARGET_SPARC64
572 tcg_gen_ext32u_tl(cpu_cc_N, t64);
573 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
574 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
575 tcg_gen_movi_tl(cpu_icc_C, 0);
576 #else
577 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
578 #endif
579 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
580 tcg_gen_movi_tl(cpu_cc_C, 0);
581 tcg_gen_mov_tl(dst, cpu_cc_N);
584 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
586 TCGv_i64 t64;
588 #ifdef TARGET_SPARC64
589 t64 = cpu_cc_V;
590 #else
591 t64 = tcg_temp_new_i64();
592 #endif
594 gen_helper_sdiv(t64, tcg_env, src1, src2);
596 #ifdef TARGET_SPARC64
597 tcg_gen_ext32s_tl(cpu_cc_N, t64);
598 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
599 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
600 tcg_gen_movi_tl(cpu_icc_C, 0);
601 #else
602 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
603 #endif
604 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
605 tcg_gen_movi_tl(cpu_cc_C, 0);
606 tcg_gen_mov_tl(dst, cpu_cc_N);
609 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
611 gen_helper_taddcctv(dst, tcg_env, src1, src2);
614 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
616 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
619 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
621 tcg_gen_ctpop_tl(dst, src2);
624 #ifndef TARGET_SPARC64
625 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
627 g_assert_not_reached();
629 #endif
631 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
633 gen_helper_array8(dst, src1, src2);
634 tcg_gen_shli_tl(dst, dst, 1);
637 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
639 gen_helper_array8(dst, src1, src2);
640 tcg_gen_shli_tl(dst, dst, 2);
643 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
645 #ifdef TARGET_SPARC64
646 gen_helper_fpack16(dst, cpu_gsr, src);
647 #else
648 g_assert_not_reached();
649 #endif
652 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
654 #ifdef TARGET_SPARC64
655 gen_helper_fpackfix(dst, cpu_gsr, src);
656 #else
657 g_assert_not_reached();
658 #endif
661 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
663 #ifdef TARGET_SPARC64
664 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
665 #else
666 g_assert_not_reached();
667 #endif
670 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
672 #ifdef TARGET_SPARC64
673 TCGv t1, t2, shift;
675 t1 = tcg_temp_new();
676 t2 = tcg_temp_new();
677 shift = tcg_temp_new();
679 tcg_gen_andi_tl(shift, cpu_gsr, 7);
680 tcg_gen_shli_tl(shift, shift, 3);
681 tcg_gen_shl_tl(t1, s1, shift);
684 * A shift of 64 does not produce 0 in TCG. Divide this into a
685 * shift of (up to 63) followed by a constant shift of 1.
687 tcg_gen_xori_tl(shift, shift, 63);
688 tcg_gen_shr_tl(t2, s2, shift);
689 tcg_gen_shri_tl(t2, t2, 1);
691 tcg_gen_or_tl(dst, t1, t2);
692 #else
693 g_assert_not_reached();
694 #endif
697 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
699 #ifdef TARGET_SPARC64
700 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
701 #else
702 g_assert_not_reached();
703 #endif
706 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
708 tcg_gen_ext16s_i32(src2, src2);
709 gen_helper_fmul8x16a(dst, src1, src2);
712 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
714 tcg_gen_sari_i32(src2, src2, 16);
715 gen_helper_fmul8x16a(dst, src1, src2);
718 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
720 TCGv_i32 t0 = tcg_temp_new_i32();
721 TCGv_i32 t1 = tcg_temp_new_i32();
722 TCGv_i32 t2 = tcg_temp_new_i32();
724 tcg_gen_ext8u_i32(t0, src1);
725 tcg_gen_ext16s_i32(t1, src2);
726 tcg_gen_mul_i32(t0, t0, t1);
728 tcg_gen_extract_i32(t1, src1, 16, 8);
729 tcg_gen_sextract_i32(t2, src2, 16, 16);
730 tcg_gen_mul_i32(t1, t1, t2);
732 tcg_gen_concat_i32_i64(dst, t0, t1);
735 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
737 TCGv_i32 t0 = tcg_temp_new_i32();
738 TCGv_i32 t1 = tcg_temp_new_i32();
739 TCGv_i32 t2 = tcg_temp_new_i32();
742 * The insn description talks about extracting the upper 8 bits
743 * of the signed 16-bit input rs1, performing the multiply, then
744 * shifting left by 8 bits. Instead, zap the lower 8 bits of
745 * the rs1 input, which avoids the need for two shifts.
747 tcg_gen_ext16s_i32(t0, src1);
748 tcg_gen_andi_i32(t0, t0, ~0xff);
749 tcg_gen_ext16s_i32(t1, src2);
750 tcg_gen_mul_i32(t0, t0, t1);
752 tcg_gen_sextract_i32(t1, src1, 16, 16);
753 tcg_gen_andi_i32(t1, t1, ~0xff);
754 tcg_gen_sextract_i32(t2, src2, 16, 16);
755 tcg_gen_mul_i32(t1, t1, t2);
757 tcg_gen_concat_i32_i64(dst, t0, t1);
760 static void finishing_insn(DisasContext *dc)
763 * From here, there is no future path through an unwinding exception.
764 * If the current insn cannot raise an exception, the computation of
765 * cpu_cond may be able to be elided.
767 if (dc->cpu_cond_live) {
768 tcg_gen_discard_tl(cpu_cond);
769 dc->cpu_cond_live = false;
773 static void gen_generic_branch(DisasContext *dc)
775 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
776 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
777 TCGv c2 = tcg_constant_tl(dc->jump.c2);
779 tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
782 /* call this function before using the condition register as it may
783 have been set for a jump */
784 static void flush_cond(DisasContext *dc)
786 if (dc->npc == JUMP_PC) {
787 gen_generic_branch(dc);
788 dc->npc = DYNAMIC_PC_LOOKUP;
792 static void save_npc(DisasContext *dc)
794 if (dc->npc & 3) {
795 switch (dc->npc) {
796 case JUMP_PC:
797 gen_generic_branch(dc);
798 dc->npc = DYNAMIC_PC_LOOKUP;
799 break;
800 case DYNAMIC_PC:
801 case DYNAMIC_PC_LOOKUP:
802 break;
803 default:
804 g_assert_not_reached();
806 } else {
807 tcg_gen_movi_tl(cpu_npc, dc->npc);
811 static void save_state(DisasContext *dc)
813 tcg_gen_movi_tl(cpu_pc, dc->pc);
814 save_npc(dc);
817 static void gen_exception(DisasContext *dc, int which)
819 finishing_insn(dc);
820 save_state(dc);
821 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
822 dc->base.is_jmp = DISAS_NORETURN;
825 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
827 DisasDelayException *e = g_new0(DisasDelayException, 1);
829 e->next = dc->delay_excp_list;
830 dc->delay_excp_list = e;
832 e->lab = gen_new_label();
833 e->excp = excp;
834 e->pc = dc->pc;
835 /* Caller must have used flush_cond before branch. */
836 assert(e->npc != JUMP_PC);
837 e->npc = dc->npc;
839 return e->lab;
842 static TCGLabel *delay_exception(DisasContext *dc, int excp)
844 return delay_exceptionv(dc, tcg_constant_i32(excp));
847 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
849 TCGv t = tcg_temp_new();
850 TCGLabel *lab;
852 tcg_gen_andi_tl(t, addr, mask);
854 flush_cond(dc);
855 lab = delay_exception(dc, TT_UNALIGNED);
856 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
859 static void gen_mov_pc_npc(DisasContext *dc)
861 finishing_insn(dc);
863 if (dc->npc & 3) {
864 switch (dc->npc) {
865 case JUMP_PC:
866 gen_generic_branch(dc);
867 tcg_gen_mov_tl(cpu_pc, cpu_npc);
868 dc->pc = DYNAMIC_PC_LOOKUP;
869 break;
870 case DYNAMIC_PC:
871 case DYNAMIC_PC_LOOKUP:
872 tcg_gen_mov_tl(cpu_pc, cpu_npc);
873 dc->pc = dc->npc;
874 break;
875 default:
876 g_assert_not_reached();
878 } else {
879 dc->pc = dc->npc;
883 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
884 DisasContext *dc)
886 TCGv t1;
888 cmp->c1 = t1 = tcg_temp_new();
889 cmp->c2 = 0;
891 switch (cond & 7) {
892 case 0x0: /* never */
893 cmp->cond = TCG_COND_NEVER;
894 cmp->c1 = tcg_constant_tl(0);
895 break;
897 case 0x1: /* eq: Z */
898 cmp->cond = TCG_COND_EQ;
899 if (TARGET_LONG_BITS == 32 || xcc) {
900 tcg_gen_mov_tl(t1, cpu_cc_Z);
901 } else {
902 tcg_gen_ext32u_tl(t1, cpu_icc_Z);
904 break;
906 case 0x2: /* le: Z | (N ^ V) */
908 * Simplify:
909 * cc_Z || (N ^ V) < 0 NE
910 * cc_Z && !((N ^ V) < 0) EQ
911 * cc_Z & ~((N ^ V) >> TLB) EQ
913 cmp->cond = TCG_COND_EQ;
914 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
915 tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
916 tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
917 if (TARGET_LONG_BITS == 64 && !xcc) {
918 tcg_gen_ext32u_tl(t1, t1);
920 break;
922 case 0x3: /* lt: N ^ V */
923 cmp->cond = TCG_COND_LT;
924 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
925 if (TARGET_LONG_BITS == 64 && !xcc) {
926 tcg_gen_ext32s_tl(t1, t1);
928 break;
930 case 0x4: /* leu: Z | C */
932 * Simplify:
933 * cc_Z == 0 || cc_C != 0 NE
934 * cc_Z != 0 && cc_C == 0 EQ
935 * cc_Z & (cc_C ? 0 : -1) EQ
936 * cc_Z & (cc_C - 1) EQ
938 cmp->cond = TCG_COND_EQ;
939 if (TARGET_LONG_BITS == 32 || xcc) {
940 tcg_gen_subi_tl(t1, cpu_cc_C, 1);
941 tcg_gen_and_tl(t1, t1, cpu_cc_Z);
942 } else {
943 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
944 tcg_gen_subi_tl(t1, t1, 1);
945 tcg_gen_and_tl(t1, t1, cpu_icc_Z);
946 tcg_gen_ext32u_tl(t1, t1);
948 break;
950 case 0x5: /* ltu: C */
951 cmp->cond = TCG_COND_NE;
952 if (TARGET_LONG_BITS == 32 || xcc) {
953 tcg_gen_mov_tl(t1, cpu_cc_C);
954 } else {
955 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
957 break;
959 case 0x6: /* neg: N */
960 cmp->cond = TCG_COND_LT;
961 if (TARGET_LONG_BITS == 32 || xcc) {
962 tcg_gen_mov_tl(t1, cpu_cc_N);
963 } else {
964 tcg_gen_ext32s_tl(t1, cpu_cc_N);
966 break;
968 case 0x7: /* vs: V */
969 cmp->cond = TCG_COND_LT;
970 if (TARGET_LONG_BITS == 32 || xcc) {
971 tcg_gen_mov_tl(t1, cpu_cc_V);
972 } else {
973 tcg_gen_ext32s_tl(t1, cpu_cc_V);
975 break;
977 if (cond & 8) {
978 cmp->cond = tcg_invert_cond(cmp->cond);
982 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
984 TCGv_i32 fcc = cpu_fcc[cc];
985 TCGv_i32 c1 = fcc;
986 int c2 = 0;
987 TCGCond tcond;
990 * FCC values:
991 * 0 =
992 * 1 <
993 * 2 >
994 * 3 unordered
996 switch (cond & 7) {
997 case 0x0: /* fbn */
998 tcond = TCG_COND_NEVER;
999 break;
1000 case 0x1: /* fbne : !0 */
1001 tcond = TCG_COND_NE;
1002 break;
1003 case 0x2: /* fblg : 1 or 2 */
1004 /* fcc in {1,2} - 1 -> fcc in {0,1} */
1005 c1 = tcg_temp_new_i32();
1006 tcg_gen_addi_i32(c1, fcc, -1);
1007 c2 = 1;
1008 tcond = TCG_COND_LEU;
1009 break;
1010 case 0x3: /* fbul : 1 or 3 */
1011 c1 = tcg_temp_new_i32();
1012 tcg_gen_andi_i32(c1, fcc, 1);
1013 tcond = TCG_COND_NE;
1014 break;
1015 case 0x4: /* fbl : 1 */
1016 c2 = 1;
1017 tcond = TCG_COND_EQ;
1018 break;
1019 case 0x5: /* fbug : 2 or 3 */
1020 c2 = 2;
1021 tcond = TCG_COND_GEU;
1022 break;
1023 case 0x6: /* fbg : 2 */
1024 c2 = 2;
1025 tcond = TCG_COND_EQ;
1026 break;
1027 case 0x7: /* fbu : 3 */
1028 c2 = 3;
1029 tcond = TCG_COND_EQ;
1030 break;
1032 if (cond & 8) {
1033 tcond = tcg_invert_cond(tcond);
1036 cmp->cond = tcond;
1037 cmp->c2 = c2;
1038 cmp->c1 = tcg_temp_new();
1039 tcg_gen_extu_i32_tl(cmp->c1, c1);
1042 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1044 static const TCGCond cond_reg[4] = {
1045 TCG_COND_NEVER, /* reserved */
1046 TCG_COND_EQ,
1047 TCG_COND_LE,
1048 TCG_COND_LT,
1050 TCGCond tcond;
1052 if ((cond & 3) == 0) {
1053 return false;
1055 tcond = cond_reg[cond & 3];
1056 if (cond & 4) {
1057 tcond = tcg_invert_cond(tcond);
1060 cmp->cond = tcond;
1061 cmp->c1 = tcg_temp_new();
1062 cmp->c2 = 0;
1063 tcg_gen_mov_tl(cmp->c1, r_src);
1064 return true;
1067 static void gen_op_clear_ieee_excp_and_FTT(void)
1069 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1070 offsetof(CPUSPARCState, fsr_cexc_ftt));
1073 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1075 gen_op_clear_ieee_excp_and_FTT();
1076 tcg_gen_mov_i32(dst, src);
1079 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1081 gen_op_clear_ieee_excp_and_FTT();
1082 tcg_gen_xori_i32(dst, src, 1u << 31);
1085 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1087 gen_op_clear_ieee_excp_and_FTT();
1088 tcg_gen_andi_i32(dst, src, ~(1u << 31));
1091 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1093 gen_op_clear_ieee_excp_and_FTT();
1094 tcg_gen_mov_i64(dst, src);
1097 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1099 gen_op_clear_ieee_excp_and_FTT();
1100 tcg_gen_xori_i64(dst, src, 1ull << 63);
1103 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1105 gen_op_clear_ieee_excp_and_FTT();
1106 tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1109 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1111 TCGv_i64 l = tcg_temp_new_i64();
1112 TCGv_i64 h = tcg_temp_new_i64();
1114 tcg_gen_extr_i128_i64(l, h, src);
1115 tcg_gen_xori_i64(h, h, 1ull << 63);
1116 tcg_gen_concat_i64_i128(dst, l, h);
1119 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1121 TCGv_i64 l = tcg_temp_new_i64();
1122 TCGv_i64 h = tcg_temp_new_i64();
1124 tcg_gen_extr_i128_i64(l, h, src);
1125 tcg_gen_andi_i64(h, h, ~(1ull << 63));
1126 tcg_gen_concat_i64_i128(dst, l, h);
1129 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1132 * CEXC is only set when succesfully completing an FPop,
1133 * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1134 * Thus we can simply store FTT into this field.
1136 tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1137 offsetof(CPUSPARCState, fsr_cexc_ftt));
1138 gen_exception(dc, TT_FP_EXCP);
1141 static int gen_trap_ifnofpu(DisasContext *dc)
1143 #if !defined(CONFIG_USER_ONLY)
1144 if (!dc->fpu_enabled) {
1145 gen_exception(dc, TT_NFPU_INSN);
1146 return 1;
1148 #endif
1149 return 0;
1152 /* asi moves */
1153 typedef enum {
1154 GET_ASI_HELPER,
1155 GET_ASI_EXCP,
1156 GET_ASI_DIRECT,
1157 GET_ASI_DTWINX,
1158 GET_ASI_CODE,
1159 GET_ASI_BLOCK,
1160 GET_ASI_SHORT,
1161 GET_ASI_BCOPY,
1162 GET_ASI_BFILL,
1163 } ASIType;
1165 typedef struct {
1166 ASIType type;
1167 int asi;
1168 int mem_idx;
1169 MemOp memop;
1170 } DisasASI;
1173 * Build DisasASI.
1174 * For asi == -1, treat as non-asi.
1175 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1177 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1179 ASIType type = GET_ASI_HELPER;
1180 int mem_idx = dc->mem_idx;
1182 if (asi == -1) {
1183 /* Artificial "non-asi" case. */
1184 type = GET_ASI_DIRECT;
1185 goto done;
1188 #ifndef TARGET_SPARC64
1189 /* Before v9, all asis are immediate and privileged. */
1190 if (asi < 0) {
1191 gen_exception(dc, TT_ILL_INSN);
1192 type = GET_ASI_EXCP;
1193 } else if (supervisor(dc)
1194 /* Note that LEON accepts ASI_USERDATA in user mode, for
1195 use with CASA. Also note that previous versions of
1196 QEMU allowed (and old versions of gcc emitted) ASI_P
1197 for LEON, which is incorrect. */
1198 || (asi == ASI_USERDATA
1199 && (dc->def->features & CPU_FEATURE_CASA))) {
1200 switch (asi) {
1201 case ASI_USERDATA: /* User data access */
1202 mem_idx = MMU_USER_IDX;
1203 type = GET_ASI_DIRECT;
1204 break;
1205 case ASI_KERNELDATA: /* Supervisor data access */
1206 mem_idx = MMU_KERNEL_IDX;
1207 type = GET_ASI_DIRECT;
1208 break;
1209 case ASI_USERTXT: /* User text access */
1210 mem_idx = MMU_USER_IDX;
1211 type = GET_ASI_CODE;
1212 break;
1213 case ASI_KERNELTXT: /* Supervisor text access */
1214 mem_idx = MMU_KERNEL_IDX;
1215 type = GET_ASI_CODE;
1216 break;
1217 case ASI_M_BYPASS: /* MMU passthrough */
1218 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1219 mem_idx = MMU_PHYS_IDX;
1220 type = GET_ASI_DIRECT;
1221 break;
1222 case ASI_M_BCOPY: /* Block copy, sta access */
1223 mem_idx = MMU_KERNEL_IDX;
1224 type = GET_ASI_BCOPY;
1225 break;
1226 case ASI_M_BFILL: /* Block fill, stda access */
1227 mem_idx = MMU_KERNEL_IDX;
1228 type = GET_ASI_BFILL;
1229 break;
1232 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1233 * permissions check in get_physical_address(..).
1235 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1236 } else {
1237 gen_exception(dc, TT_PRIV_INSN);
1238 type = GET_ASI_EXCP;
1240 #else
1241 if (asi < 0) {
1242 asi = dc->asi;
1244 /* With v9, all asis below 0x80 are privileged. */
1245 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1246 down that bit into DisasContext. For the moment that's ok,
1247 since the direct implementations below doesn't have any ASIs
1248 in the restricted [0x30, 0x7f] range, and the check will be
1249 done properly in the helper. */
1250 if (!supervisor(dc) && asi < 0x80) {
1251 gen_exception(dc, TT_PRIV_ACT);
1252 type = GET_ASI_EXCP;
1253 } else {
1254 switch (asi) {
1255 case ASI_REAL: /* Bypass */
1256 case ASI_REAL_IO: /* Bypass, non-cacheable */
1257 case ASI_REAL_L: /* Bypass LE */
1258 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1259 case ASI_TWINX_REAL: /* Real address, twinx */
1260 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1261 case ASI_QUAD_LDD_PHYS:
1262 case ASI_QUAD_LDD_PHYS_L:
1263 mem_idx = MMU_PHYS_IDX;
1264 break;
1265 case ASI_N: /* Nucleus */
1266 case ASI_NL: /* Nucleus LE */
1267 case ASI_TWINX_N:
1268 case ASI_TWINX_NL:
1269 case ASI_NUCLEUS_QUAD_LDD:
1270 case ASI_NUCLEUS_QUAD_LDD_L:
1271 if (hypervisor(dc)) {
1272 mem_idx = MMU_PHYS_IDX;
1273 } else {
1274 mem_idx = MMU_NUCLEUS_IDX;
1276 break;
1277 case ASI_AIUP: /* As if user primary */
1278 case ASI_AIUPL: /* As if user primary LE */
1279 case ASI_TWINX_AIUP:
1280 case ASI_TWINX_AIUP_L:
1281 case ASI_BLK_AIUP_4V:
1282 case ASI_BLK_AIUP_L_4V:
1283 case ASI_BLK_AIUP:
1284 case ASI_BLK_AIUPL:
1285 mem_idx = MMU_USER_IDX;
1286 break;
1287 case ASI_AIUS: /* As if user secondary */
1288 case ASI_AIUSL: /* As if user secondary LE */
1289 case ASI_TWINX_AIUS:
1290 case ASI_TWINX_AIUS_L:
1291 case ASI_BLK_AIUS_4V:
1292 case ASI_BLK_AIUS_L_4V:
1293 case ASI_BLK_AIUS:
1294 case ASI_BLK_AIUSL:
1295 mem_idx = MMU_USER_SECONDARY_IDX;
1296 break;
1297 case ASI_S: /* Secondary */
1298 case ASI_SL: /* Secondary LE */
1299 case ASI_TWINX_S:
1300 case ASI_TWINX_SL:
1301 case ASI_BLK_COMMIT_S:
1302 case ASI_BLK_S:
1303 case ASI_BLK_SL:
1304 case ASI_FL8_S:
1305 case ASI_FL8_SL:
1306 case ASI_FL16_S:
1307 case ASI_FL16_SL:
1308 if (mem_idx == MMU_USER_IDX) {
1309 mem_idx = MMU_USER_SECONDARY_IDX;
1310 } else if (mem_idx == MMU_KERNEL_IDX) {
1311 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1313 break;
1314 case ASI_P: /* Primary */
1315 case ASI_PL: /* Primary LE */
1316 case ASI_TWINX_P:
1317 case ASI_TWINX_PL:
1318 case ASI_BLK_COMMIT_P:
1319 case ASI_BLK_P:
1320 case ASI_BLK_PL:
1321 case ASI_FL8_P:
1322 case ASI_FL8_PL:
1323 case ASI_FL16_P:
1324 case ASI_FL16_PL:
1325 break;
1327 switch (asi) {
1328 case ASI_REAL:
1329 case ASI_REAL_IO:
1330 case ASI_REAL_L:
1331 case ASI_REAL_IO_L:
1332 case ASI_N:
1333 case ASI_NL:
1334 case ASI_AIUP:
1335 case ASI_AIUPL:
1336 case ASI_AIUS:
1337 case ASI_AIUSL:
1338 case ASI_S:
1339 case ASI_SL:
1340 case ASI_P:
1341 case ASI_PL:
1342 type = GET_ASI_DIRECT;
1343 break;
1344 case ASI_TWINX_REAL:
1345 case ASI_TWINX_REAL_L:
1346 case ASI_TWINX_N:
1347 case ASI_TWINX_NL:
1348 case ASI_TWINX_AIUP:
1349 case ASI_TWINX_AIUP_L:
1350 case ASI_TWINX_AIUS:
1351 case ASI_TWINX_AIUS_L:
1352 case ASI_TWINX_P:
1353 case ASI_TWINX_PL:
1354 case ASI_TWINX_S:
1355 case ASI_TWINX_SL:
1356 case ASI_QUAD_LDD_PHYS:
1357 case ASI_QUAD_LDD_PHYS_L:
1358 case ASI_NUCLEUS_QUAD_LDD:
1359 case ASI_NUCLEUS_QUAD_LDD_L:
1360 type = GET_ASI_DTWINX;
1361 break;
1362 case ASI_BLK_COMMIT_P:
1363 case ASI_BLK_COMMIT_S:
1364 case ASI_BLK_AIUP_4V:
1365 case ASI_BLK_AIUP_L_4V:
1366 case ASI_BLK_AIUP:
1367 case ASI_BLK_AIUPL:
1368 case ASI_BLK_AIUS_4V:
1369 case ASI_BLK_AIUS_L_4V:
1370 case ASI_BLK_AIUS:
1371 case ASI_BLK_AIUSL:
1372 case ASI_BLK_S:
1373 case ASI_BLK_SL:
1374 case ASI_BLK_P:
1375 case ASI_BLK_PL:
1376 type = GET_ASI_BLOCK;
1377 break;
1378 case ASI_FL8_S:
1379 case ASI_FL8_SL:
1380 case ASI_FL8_P:
1381 case ASI_FL8_PL:
1382 memop = MO_UB;
1383 type = GET_ASI_SHORT;
1384 break;
1385 case ASI_FL16_S:
1386 case ASI_FL16_SL:
1387 case ASI_FL16_P:
1388 case ASI_FL16_PL:
1389 memop = MO_TEUW;
1390 type = GET_ASI_SHORT;
1391 break;
1393 /* The little-endian asis all have bit 3 set. */
1394 if (asi & 8) {
1395 memop ^= MO_BSWAP;
1398 #endif
1400 done:
1401 return (DisasASI){ type, asi, mem_idx, memop };
1404 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1405 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1406 TCGv_i32 asi, TCGv_i32 mop)
1408 g_assert_not_reached();
1411 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1412 TCGv_i32 asi, TCGv_i32 mop)
1414 g_assert_not_reached();
1416 #endif
1418 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1420 switch (da->type) {
1421 case GET_ASI_EXCP:
1422 break;
1423 case GET_ASI_DTWINX: /* Reserved for ldda. */
1424 gen_exception(dc, TT_ILL_INSN);
1425 break;
1426 case GET_ASI_DIRECT:
1427 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1428 break;
1430 case GET_ASI_CODE:
1431 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1433 MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1434 TCGv_i64 t64 = tcg_temp_new_i64();
1436 gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1437 tcg_gen_trunc_i64_tl(dst, t64);
1439 break;
1440 #else
1441 g_assert_not_reached();
1442 #endif
1444 default:
1446 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1447 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1449 save_state(dc);
1450 #ifdef TARGET_SPARC64
1451 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1452 #else
1454 TCGv_i64 t64 = tcg_temp_new_i64();
1455 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1456 tcg_gen_trunc_i64_tl(dst, t64);
1458 #endif
1460 break;
1464 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1466 switch (da->type) {
1467 case GET_ASI_EXCP:
1468 break;
1470 case GET_ASI_DTWINX: /* Reserved for stda. */
1471 if (TARGET_LONG_BITS == 32) {
1472 gen_exception(dc, TT_ILL_INSN);
1473 break;
1474 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1475 /* Pre OpenSPARC CPUs don't have these */
1476 gen_exception(dc, TT_ILL_INSN);
1477 break;
1479 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1480 /* fall through */
1482 case GET_ASI_DIRECT:
1483 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1484 break;
1486 case GET_ASI_BCOPY:
1487 assert(TARGET_LONG_BITS == 32);
1489 * Copy 32 bytes from the address in SRC to ADDR.
1491 * From Ross RT625 hyperSPARC manual, section 4.6:
1492 * "Block Copy and Block Fill will work only on cache line boundaries."
1494 * It does not specify if an unaliged address is truncated or trapped.
1495 * Previous qemu behaviour was to truncate to 4 byte alignment, which
1496 * is obviously wrong. The only place I can see this used is in the
1497 * Linux kernel which begins with page alignment, advancing by 32,
1498 * so is always aligned. Assume truncation as the simpler option.
1500 * Since the loads and stores are paired, allow the copy to happen
1501 * in the host endianness. The copy need not be atomic.
1504 MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1505 TCGv saddr = tcg_temp_new();
1506 TCGv daddr = tcg_temp_new();
1507 TCGv_i128 tmp = tcg_temp_new_i128();
1509 tcg_gen_andi_tl(saddr, src, -32);
1510 tcg_gen_andi_tl(daddr, addr, -32);
1511 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1512 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1513 tcg_gen_addi_tl(saddr, saddr, 16);
1514 tcg_gen_addi_tl(daddr, daddr, 16);
1515 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1516 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1518 break;
1520 default:
1522 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1523 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1525 save_state(dc);
1526 #ifdef TARGET_SPARC64
1527 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1528 #else
1530 TCGv_i64 t64 = tcg_temp_new_i64();
1531 tcg_gen_extu_tl_i64(t64, src);
1532 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1534 #endif
1536 /* A write to a TLB register may alter page maps. End the TB. */
1537 dc->npc = DYNAMIC_PC;
1539 break;
1543 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1544 TCGv dst, TCGv src, TCGv addr)
1546 switch (da->type) {
1547 case GET_ASI_EXCP:
1548 break;
1549 case GET_ASI_DIRECT:
1550 tcg_gen_atomic_xchg_tl(dst, addr, src,
1551 da->mem_idx, da->memop | MO_ALIGN);
1552 break;
1553 default:
1554 /* ??? Should be DAE_invalid_asi. */
1555 gen_exception(dc, TT_DATA_ACCESS);
1556 break;
1560 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1561 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1563 switch (da->type) {
1564 case GET_ASI_EXCP:
1565 return;
1566 case GET_ASI_DIRECT:
1567 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1568 da->mem_idx, da->memop | MO_ALIGN);
1569 break;
1570 default:
1571 /* ??? Should be DAE_invalid_asi. */
1572 gen_exception(dc, TT_DATA_ACCESS);
1573 break;
1577 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1579 switch (da->type) {
1580 case GET_ASI_EXCP:
1581 break;
1582 case GET_ASI_DIRECT:
1583 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1584 da->mem_idx, MO_UB);
1585 break;
1586 default:
1587 /* ??? In theory, this should be raise DAE_invalid_asi.
1588 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1589 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1590 gen_helper_exit_atomic(tcg_env);
1591 } else {
1592 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1593 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1594 TCGv_i64 s64, t64;
1596 save_state(dc);
1597 t64 = tcg_temp_new_i64();
1598 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1600 s64 = tcg_constant_i64(0xff);
1601 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1603 tcg_gen_trunc_i64_tl(dst, t64);
1605 /* End the TB. */
1606 dc->npc = DYNAMIC_PC;
1608 break;
1612 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1613 TCGv addr, int rd)
1615 MemOp memop = da->memop;
1616 MemOp size = memop & MO_SIZE;
1617 TCGv_i32 d32;
1618 TCGv_i64 d64;
1619 TCGv addr_tmp;
1621 /* TODO: Use 128-bit load/store below. */
1622 if (size == MO_128) {
1623 memop = (memop & ~MO_SIZE) | MO_64;
1626 switch (da->type) {
1627 case GET_ASI_EXCP:
1628 break;
1630 case GET_ASI_DIRECT:
1631 memop |= MO_ALIGN_4;
1632 switch (size) {
1633 case MO_32:
1634 d32 = tcg_temp_new_i32();
1635 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1636 gen_store_fpr_F(dc, rd, d32);
1637 break;
1639 case MO_64:
1640 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
1641 break;
1643 case MO_128:
1644 d64 = tcg_temp_new_i64();
1645 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1646 addr_tmp = tcg_temp_new();
1647 tcg_gen_addi_tl(addr_tmp, addr, 8);
1648 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1649 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1650 break;
1651 default:
1652 g_assert_not_reached();
1654 break;
1656 case GET_ASI_BLOCK:
1657 /* Valid for lddfa on aligned registers only. */
1658 if (orig_size == MO_64 && (rd & 7) == 0) {
1659 /* The first operation checks required alignment. */
1660 addr_tmp = tcg_temp_new();
1661 for (int i = 0; ; ++i) {
1662 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1663 memop | (i == 0 ? MO_ALIGN_64 : 0));
1664 if (i == 7) {
1665 break;
1667 tcg_gen_addi_tl(addr_tmp, addr, 8);
1668 addr = addr_tmp;
1670 } else {
1671 gen_exception(dc, TT_ILL_INSN);
1673 break;
1675 case GET_ASI_SHORT:
1676 /* Valid for lddfa only. */
1677 if (orig_size == MO_64) {
1678 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1679 memop | MO_ALIGN);
1680 } else {
1681 gen_exception(dc, TT_ILL_INSN);
1683 break;
1685 default:
1687 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1688 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1690 save_state(dc);
1691 /* According to the table in the UA2011 manual, the only
1692 other asis that are valid for ldfa/lddfa/ldqfa are
1693 the NO_FAULT asis. We still need a helper for these,
1694 but we can just use the integer asi helper for them. */
1695 switch (size) {
1696 case MO_32:
1697 d64 = tcg_temp_new_i64();
1698 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1699 d32 = tcg_temp_new_i32();
1700 tcg_gen_extrl_i64_i32(d32, d64);
1701 gen_store_fpr_F(dc, rd, d32);
1702 break;
1703 case MO_64:
1704 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
1705 r_asi, r_mop);
1706 break;
1707 case MO_128:
1708 d64 = tcg_temp_new_i64();
1709 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1710 addr_tmp = tcg_temp_new();
1711 tcg_gen_addi_tl(addr_tmp, addr, 8);
1712 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
1713 r_asi, r_mop);
1714 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1715 break;
1716 default:
1717 g_assert_not_reached();
1720 break;
1724 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1725 TCGv addr, int rd)
1727 MemOp memop = da->memop;
1728 MemOp size = memop & MO_SIZE;
1729 TCGv_i32 d32;
1730 TCGv addr_tmp;
1732 /* TODO: Use 128-bit load/store below. */
1733 if (size == MO_128) {
1734 memop = (memop & ~MO_SIZE) | MO_64;
1737 switch (da->type) {
1738 case GET_ASI_EXCP:
1739 break;
1741 case GET_ASI_DIRECT:
1742 memop |= MO_ALIGN_4;
1743 switch (size) {
1744 case MO_32:
1745 d32 = gen_load_fpr_F(dc, rd);
1746 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
1747 break;
1748 case MO_64:
1749 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1750 memop | MO_ALIGN_4);
1751 break;
1752 case MO_128:
1753 /* Only 4-byte alignment required. However, it is legal for the
1754 cpu to signal the alignment fault, and the OS trap handler is
1755 required to fix it up. Requiring 16-byte alignment here avoids
1756 having to probe the second page before performing the first
1757 write. */
1758 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1759 memop | MO_ALIGN_16);
1760 addr_tmp = tcg_temp_new();
1761 tcg_gen_addi_tl(addr_tmp, addr, 8);
1762 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1763 break;
1764 default:
1765 g_assert_not_reached();
1767 break;
1769 case GET_ASI_BLOCK:
1770 /* Valid for stdfa on aligned registers only. */
1771 if (orig_size == MO_64 && (rd & 7) == 0) {
1772 /* The first operation checks required alignment. */
1773 addr_tmp = tcg_temp_new();
1774 for (int i = 0; ; ++i) {
1775 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1776 memop | (i == 0 ? MO_ALIGN_64 : 0));
1777 if (i == 7) {
1778 break;
1780 tcg_gen_addi_tl(addr_tmp, addr, 8);
1781 addr = addr_tmp;
1783 } else {
1784 gen_exception(dc, TT_ILL_INSN);
1786 break;
1788 case GET_ASI_SHORT:
1789 /* Valid for stdfa only. */
1790 if (orig_size == MO_64) {
1791 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1792 memop | MO_ALIGN);
1793 } else {
1794 gen_exception(dc, TT_ILL_INSN);
1796 break;
1798 default:
1799 /* According to the table in the UA2011 manual, the only
1800 other asis that are valid for ldfa/lddfa/ldqfa are
1801 the PST* asis, which aren't currently handled. */
1802 gen_exception(dc, TT_ILL_INSN);
1803 break;
1807 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
1809 TCGv hi = gen_dest_gpr(dc, rd);
1810 TCGv lo = gen_dest_gpr(dc, rd + 1);
1812 switch (da->type) {
1813 case GET_ASI_EXCP:
1814 return;
1816 case GET_ASI_DTWINX:
1817 #ifdef TARGET_SPARC64
1819 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
1820 TCGv_i128 t = tcg_temp_new_i128();
1822 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
1824 * Note that LE twinx acts as if each 64-bit register result is
1825 * byte swapped. We perform one 128-bit LE load, so must swap
1826 * the order of the writebacks.
1828 if ((mop & MO_BSWAP) == MO_TE) {
1829 tcg_gen_extr_i128_i64(lo, hi, t);
1830 } else {
1831 tcg_gen_extr_i128_i64(hi, lo, t);
1834 break;
1835 #else
1836 g_assert_not_reached();
1837 #endif
1839 case GET_ASI_DIRECT:
1841 TCGv_i64 tmp = tcg_temp_new_i64();
1843 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
1845 /* Note that LE ldda acts as if each 32-bit register
1846 result is byte swapped. Having just performed one
1847 64-bit bswap, we need now to swap the writebacks. */
1848 if ((da->memop & MO_BSWAP) == MO_TE) {
1849 tcg_gen_extr_i64_tl(lo, hi, tmp);
1850 } else {
1851 tcg_gen_extr_i64_tl(hi, lo, tmp);
1854 break;
1856 case GET_ASI_CODE:
1857 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1859 MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1860 TCGv_i64 tmp = tcg_temp_new_i64();
1862 gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
1864 /* See above. */
1865 if ((da->memop & MO_BSWAP) == MO_TE) {
1866 tcg_gen_extr_i64_tl(lo, hi, tmp);
1867 } else {
1868 tcg_gen_extr_i64_tl(hi, lo, tmp);
1871 break;
1872 #else
1873 g_assert_not_reached();
1874 #endif
1876 default:
1877 /* ??? In theory we've handled all of the ASIs that are valid
1878 for ldda, and this should raise DAE_invalid_asi. However,
1879 real hardware allows others. This can be seen with e.g.
1880 FreeBSD 10.3 wrt ASI_IC_TAG. */
1882 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1883 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
1884 TCGv_i64 tmp = tcg_temp_new_i64();
1886 save_state(dc);
1887 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
1889 /* See above. */
1890 if ((da->memop & MO_BSWAP) == MO_TE) {
1891 tcg_gen_extr_i64_tl(lo, hi, tmp);
1892 } else {
1893 tcg_gen_extr_i64_tl(hi, lo, tmp);
1896 break;
1899 gen_store_gpr(dc, rd, hi);
1900 gen_store_gpr(dc, rd + 1, lo);
1903 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
1905 TCGv hi = gen_load_gpr(dc, rd);
1906 TCGv lo = gen_load_gpr(dc, rd + 1);
1908 switch (da->type) {
1909 case GET_ASI_EXCP:
1910 break;
1912 case GET_ASI_DTWINX:
1913 #ifdef TARGET_SPARC64
1915 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
1916 TCGv_i128 t = tcg_temp_new_i128();
1919 * Note that LE twinx acts as if each 64-bit register result is
1920 * byte swapped. We perform one 128-bit LE store, so must swap
1921 * the order of the construction.
1923 if ((mop & MO_BSWAP) == MO_TE) {
1924 tcg_gen_concat_i64_i128(t, lo, hi);
1925 } else {
1926 tcg_gen_concat_i64_i128(t, hi, lo);
1928 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
1930 break;
1931 #else
1932 g_assert_not_reached();
1933 #endif
1935 case GET_ASI_DIRECT:
1937 TCGv_i64 t64 = tcg_temp_new_i64();
1939 /* Note that LE stda acts as if each 32-bit register result is
1940 byte swapped. We will perform one 64-bit LE store, so now
1941 we must swap the order of the construction. */
1942 if ((da->memop & MO_BSWAP) == MO_TE) {
1943 tcg_gen_concat_tl_i64(t64, lo, hi);
1944 } else {
1945 tcg_gen_concat_tl_i64(t64, hi, lo);
1947 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
1949 break;
1951 case GET_ASI_BFILL:
1952 assert(TARGET_LONG_BITS == 32);
1954 * Store 32 bytes of [rd:rd+1] to ADDR.
1955 * See comments for GET_ASI_COPY above.
1958 MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
1959 TCGv_i64 t8 = tcg_temp_new_i64();
1960 TCGv_i128 t16 = tcg_temp_new_i128();
1961 TCGv daddr = tcg_temp_new();
1963 tcg_gen_concat_tl_i64(t8, lo, hi);
1964 tcg_gen_concat_i64_i128(t16, t8, t8);
1965 tcg_gen_andi_tl(daddr, addr, -32);
1966 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
1967 tcg_gen_addi_tl(daddr, daddr, 16);
1968 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
1970 break;
1972 default:
1973 /* ??? In theory we've handled all of the ASIs that are valid
1974 for stda, and this should raise DAE_invalid_asi. */
1976 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1977 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
1978 TCGv_i64 t64 = tcg_temp_new_i64();
1980 /* See above. */
1981 if ((da->memop & MO_BSWAP) == MO_TE) {
1982 tcg_gen_concat_tl_i64(t64, lo, hi);
1983 } else {
1984 tcg_gen_concat_tl_i64(t64, hi, lo);
1987 save_state(dc);
1988 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1990 break;
1994 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
1996 #ifdef TARGET_SPARC64
1997 TCGv_i32 c32, zero, dst, s1, s2;
1998 TCGv_i64 c64 = tcg_temp_new_i64();
2000 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2001 or fold the comparison down to 32 bits and use movcond_i32. Choose
2002 the later. */
2003 c32 = tcg_temp_new_i32();
2004 tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2005 tcg_gen_extrl_i64_i32(c32, c64);
2007 s1 = gen_load_fpr_F(dc, rs);
2008 s2 = gen_load_fpr_F(dc, rd);
2009 dst = tcg_temp_new_i32();
2010 zero = tcg_constant_i32(0);
2012 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2014 gen_store_fpr_F(dc, rd, dst);
2015 #else
2016 qemu_build_not_reached();
2017 #endif
2020 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2022 #ifdef TARGET_SPARC64
2023 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2024 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2025 gen_load_fpr_D(dc, rs),
2026 gen_load_fpr_D(dc, rd));
2027 gen_store_fpr_D(dc, rd, dst);
2028 #else
2029 qemu_build_not_reached();
2030 #endif
2033 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2035 #ifdef TARGET_SPARC64
2036 TCGv c2 = tcg_constant_tl(cmp->c2);
2038 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[rd / 2], cmp->c1, c2,
2039 cpu_fpr[rs / 2], cpu_fpr[rd / 2]);
2040 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[rd / 2 + 1], cmp->c1, c2,
2041 cpu_fpr[rs / 2 + 1], cpu_fpr[rd / 2 + 1]);
2043 gen_update_fprs_dirty(dc, rd);
2044 #else
2045 qemu_build_not_reached();
2046 #endif
2049 #ifdef TARGET_SPARC64
2050 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2052 TCGv_i32 r_tl = tcg_temp_new_i32();
2054 /* load env->tl into r_tl */
2055 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2057 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2058 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2060 /* calculate offset to current trap state from env->ts, reuse r_tl */
2061 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2062 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2064 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2066 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2067 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2068 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2071 #endif
2073 static int extract_dfpreg(DisasContext *dc, int x)
2075 int r = x & 0x1e;
2076 #ifdef TARGET_SPARC64
2077 r |= (x & 1) << 5;
2078 #endif
2079 return r;
2082 static int extract_qfpreg(DisasContext *dc, int x)
2084 int r = x & 0x1c;
2085 #ifdef TARGET_SPARC64
2086 r |= (x & 1) << 5;
2087 #endif
2088 return r;
2091 /* Include the auto-generated decoder. */
2092 #include "decode-insns.c.inc"
2094 #define TRANS(NAME, AVAIL, FUNC, ...) \
2095 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2096 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2098 #define avail_ALL(C) true
2099 #ifdef TARGET_SPARC64
2100 # define avail_32(C) false
2101 # define avail_ASR17(C) false
2102 # define avail_CASA(C) true
2103 # define avail_DIV(C) true
2104 # define avail_MUL(C) true
2105 # define avail_POWERDOWN(C) false
2106 # define avail_64(C) true
2107 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2108 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2109 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2110 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2111 #else
2112 # define avail_32(C) true
2113 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2114 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2115 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2116 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2117 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2118 # define avail_64(C) false
2119 # define avail_GL(C) false
2120 # define avail_HYPV(C) false
2121 # define avail_VIS1(C) false
2122 # define avail_VIS2(C) false
2123 #endif
2125 /* Default case for non jump instructions. */
2126 static bool advance_pc(DisasContext *dc)
2128 TCGLabel *l1;
2130 finishing_insn(dc);
2132 if (dc->npc & 3) {
2133 switch (dc->npc) {
2134 case DYNAMIC_PC:
2135 case DYNAMIC_PC_LOOKUP:
2136 dc->pc = dc->npc;
2137 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2138 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2139 break;
2141 case JUMP_PC:
2142 /* we can do a static jump */
2143 l1 = gen_new_label();
2144 tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2146 /* jump not taken */
2147 gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2149 /* jump taken */
2150 gen_set_label(l1);
2151 gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2153 dc->base.is_jmp = DISAS_NORETURN;
2154 break;
2156 default:
2157 g_assert_not_reached();
2159 } else {
2160 dc->pc = dc->npc;
2161 dc->npc = dc->npc + 4;
2163 return true;
2167 * Major opcodes 00 and 01 -- branches, call, and sethi
2170 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2171 bool annul, int disp)
2173 target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2174 target_ulong npc;
2176 finishing_insn(dc);
2178 if (cmp->cond == TCG_COND_ALWAYS) {
2179 if (annul) {
2180 dc->pc = dest;
2181 dc->npc = dest + 4;
2182 } else {
2183 gen_mov_pc_npc(dc);
2184 dc->npc = dest;
2186 return true;
2189 if (cmp->cond == TCG_COND_NEVER) {
2190 npc = dc->npc;
2191 if (npc & 3) {
2192 gen_mov_pc_npc(dc);
2193 if (annul) {
2194 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2196 tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2197 } else {
2198 dc->pc = npc + (annul ? 4 : 0);
2199 dc->npc = dc->pc + 4;
2201 return true;
2204 flush_cond(dc);
2205 npc = dc->npc;
2207 if (annul) {
2208 TCGLabel *l1 = gen_new_label();
2210 tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2211 gen_goto_tb(dc, 0, npc, dest);
2212 gen_set_label(l1);
2213 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2215 dc->base.is_jmp = DISAS_NORETURN;
2216 } else {
2217 if (npc & 3) {
2218 switch (npc) {
2219 case DYNAMIC_PC:
2220 case DYNAMIC_PC_LOOKUP:
2221 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2222 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2223 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2224 cmp->c1, tcg_constant_tl(cmp->c2),
2225 tcg_constant_tl(dest), cpu_npc);
2226 dc->pc = npc;
2227 break;
2228 default:
2229 g_assert_not_reached();
2231 } else {
2232 dc->pc = npc;
2233 dc->npc = JUMP_PC;
2234 dc->jump = *cmp;
2235 dc->jump_pc[0] = dest;
2236 dc->jump_pc[1] = npc + 4;
2238 /* The condition for cpu_cond is always NE -- normalize. */
2239 if (cmp->cond == TCG_COND_NE) {
2240 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2241 } else {
2242 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2244 dc->cpu_cond_live = true;
2247 return true;
2250 static bool raise_priv(DisasContext *dc)
2252 gen_exception(dc, TT_PRIV_INSN);
2253 return true;
2256 static bool raise_unimpfpop(DisasContext *dc)
2258 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2259 return true;
2262 static bool gen_trap_float128(DisasContext *dc)
2264 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2265 return false;
2267 return raise_unimpfpop(dc);
2270 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2272 DisasCompare cmp;
2274 gen_compare(&cmp, a->cc, a->cond, dc);
2275 return advance_jump_cond(dc, &cmp, a->a, a->i);
2278 TRANS(Bicc, ALL, do_bpcc, a)
2279 TRANS(BPcc, 64, do_bpcc, a)
2281 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2283 DisasCompare cmp;
2285 if (gen_trap_ifnofpu(dc)) {
2286 return true;
2288 gen_fcompare(&cmp, a->cc, a->cond);
2289 return advance_jump_cond(dc, &cmp, a->a, a->i);
2292 TRANS(FBPfcc, 64, do_fbpfcc, a)
2293 TRANS(FBfcc, ALL, do_fbpfcc, a)
2295 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2297 DisasCompare cmp;
2299 if (!avail_64(dc)) {
2300 return false;
2302 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2303 return false;
2305 return advance_jump_cond(dc, &cmp, a->a, a->i);
2308 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2310 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2312 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2313 gen_mov_pc_npc(dc);
2314 dc->npc = target;
2315 return true;
2318 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2321 * For sparc32, always generate the no-coprocessor exception.
2322 * For sparc64, always generate illegal instruction.
2324 #ifdef TARGET_SPARC64
2325 return false;
2326 #else
2327 gen_exception(dc, TT_NCP_INSN);
2328 return true;
2329 #endif
2332 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2334 /* Special-case %g0 because that's the canonical nop. */
2335 if (a->rd) {
2336 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2338 return advance_pc(dc);
2342 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2345 static bool do_tcc(DisasContext *dc, int cond, int cc,
2346 int rs1, bool imm, int rs2_or_imm)
2348 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2349 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2350 DisasCompare cmp;
2351 TCGLabel *lab;
2352 TCGv_i32 trap;
2354 /* Trap never. */
2355 if (cond == 0) {
2356 return advance_pc(dc);
2360 * Immediate traps are the most common case. Since this value is
2361 * live across the branch, it really pays to evaluate the constant.
2363 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2364 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2365 } else {
2366 trap = tcg_temp_new_i32();
2367 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2368 if (imm) {
2369 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2370 } else {
2371 TCGv_i32 t2 = tcg_temp_new_i32();
2372 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2373 tcg_gen_add_i32(trap, trap, t2);
2375 tcg_gen_andi_i32(trap, trap, mask);
2376 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2379 finishing_insn(dc);
2381 /* Trap always. */
2382 if (cond == 8) {
2383 save_state(dc);
2384 gen_helper_raise_exception(tcg_env, trap);
2385 dc->base.is_jmp = DISAS_NORETURN;
2386 return true;
2389 /* Conditional trap. */
2390 flush_cond(dc);
2391 lab = delay_exceptionv(dc, trap);
2392 gen_compare(&cmp, cc, cond, dc);
2393 tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2395 return advance_pc(dc);
2398 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2400 if (avail_32(dc) && a->cc) {
2401 return false;
2403 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2406 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2408 if (avail_64(dc)) {
2409 return false;
2411 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2414 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2416 if (avail_32(dc)) {
2417 return false;
2419 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2422 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2424 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2425 return advance_pc(dc);
2428 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2430 if (avail_32(dc)) {
2431 return false;
2433 if (a->mmask) {
2434 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2435 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2437 if (a->cmask) {
2438 /* For #Sync, etc, end the TB to recognize interrupts. */
2439 dc->base.is_jmp = DISAS_EXIT;
2441 return advance_pc(dc);
2444 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2445 TCGv (*func)(DisasContext *, TCGv))
2447 if (!priv) {
2448 return raise_priv(dc);
2450 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2451 return advance_pc(dc);
2454 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2456 return cpu_y;
2459 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2462 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2463 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2464 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2466 if (avail_64(dc) && a->rs1 != 0) {
2467 return false;
2469 return do_rd_special(dc, true, a->rd, do_rdy);
2472 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2474 gen_helper_rdasr17(dst, tcg_env);
2475 return dst;
2478 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2480 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2482 gen_helper_rdccr(dst, tcg_env);
2483 return dst;
2486 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2488 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2490 #ifdef TARGET_SPARC64
2491 return tcg_constant_tl(dc->asi);
2492 #else
2493 qemu_build_not_reached();
2494 #endif
2497 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2499 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2501 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2503 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2504 if (translator_io_start(&dc->base)) {
2505 dc->base.is_jmp = DISAS_EXIT;
2507 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2508 tcg_constant_i32(dc->mem_idx));
2509 return dst;
2512 /* TODO: non-priv access only allowed when enabled. */
2513 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2515 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2517 return tcg_constant_tl(address_mask_i(dc, dc->pc));
2520 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2522 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2524 tcg_gen_ext_i32_tl(dst, cpu_fprs);
2525 return dst;
2528 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2530 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2532 gen_trap_ifnofpu(dc);
2533 return cpu_gsr;
2536 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2538 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2540 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2541 return dst;
2544 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2546 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2548 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2549 return dst;
2552 /* TODO: non-priv access only allowed when enabled. */
2553 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2555 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2557 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2559 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2560 if (translator_io_start(&dc->base)) {
2561 dc->base.is_jmp = DISAS_EXIT;
2563 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2564 tcg_constant_i32(dc->mem_idx));
2565 return dst;
2568 /* TODO: non-priv access only allowed when enabled. */
2569 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2571 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2573 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2574 return dst;
2577 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2578 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2581 * UltraSPARC-T1 Strand status.
2582 * HYPV check maybe not enough, UA2005 & UA2007 describe
2583 * this ASR as impl. dep
2585 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2587 return tcg_constant_tl(1);
2590 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2592 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2594 gen_helper_rdpsr(dst, tcg_env);
2595 return dst;
2598 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2600 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2602 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2603 return dst;
2606 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2608 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2610 TCGv_i32 tl = tcg_temp_new_i32();
2611 TCGv_ptr tp = tcg_temp_new_ptr();
2613 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2614 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2615 tcg_gen_shli_i32(tl, tl, 3);
2616 tcg_gen_ext_i32_ptr(tp, tl);
2617 tcg_gen_add_ptr(tp, tp, tcg_env);
2619 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2620 return dst;
2623 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2625 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2627 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2628 return dst;
2631 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2633 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2635 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2636 return dst;
2639 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2641 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2643 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2644 return dst;
2647 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2649 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2651 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2652 return dst;
2655 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2656 do_rdhstick_cmpr)
2658 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2660 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2661 return dst;
2664 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2666 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2668 #ifdef TARGET_SPARC64
2669 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2671 gen_load_trap_state_at_tl(r_tsptr);
2672 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2673 return dst;
2674 #else
2675 qemu_build_not_reached();
2676 #endif
2679 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2681 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2683 #ifdef TARGET_SPARC64
2684 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2686 gen_load_trap_state_at_tl(r_tsptr);
2687 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2688 return dst;
2689 #else
2690 qemu_build_not_reached();
2691 #endif
2694 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2696 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2698 #ifdef TARGET_SPARC64
2699 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2701 gen_load_trap_state_at_tl(r_tsptr);
2702 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2703 return dst;
2704 #else
2705 qemu_build_not_reached();
2706 #endif
2709 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2711 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2713 #ifdef TARGET_SPARC64
2714 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2716 gen_load_trap_state_at_tl(r_tsptr);
2717 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2718 return dst;
2719 #else
2720 qemu_build_not_reached();
2721 #endif
2724 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2725 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2727 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2729 return cpu_tbr;
2732 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2733 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2735 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2737 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2738 return dst;
2741 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2743 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2745 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2746 return dst;
2749 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
2751 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
2753 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
2754 return dst;
2757 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
2759 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
2761 gen_helper_rdcwp(dst, tcg_env);
2762 return dst;
2765 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
2767 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
2769 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
2770 return dst;
2773 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
2775 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
2777 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
2778 return dst;
2781 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
2782 do_rdcanrestore)
2784 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
2786 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
2787 return dst;
2790 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
2792 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
2794 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
2795 return dst;
2798 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
2800 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
2802 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
2803 return dst;
2806 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
2808 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
2810 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
2811 return dst;
2814 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
2816 /* UA2005 strand status */
2817 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
2819 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
2820 return dst;
2823 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
2825 static TCGv do_rdver(DisasContext *dc, TCGv dst)
2827 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
2828 return dst;
2831 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
2833 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
2835 if (avail_64(dc)) {
2836 gen_helper_flushw(tcg_env);
2837 return advance_pc(dc);
2839 return false;
2842 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
2843 void (*func)(DisasContext *, TCGv))
2845 TCGv src;
2847 /* For simplicity, we under-decoded the rs2 form. */
2848 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
2849 return false;
2851 if (!priv) {
2852 return raise_priv(dc);
2855 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
2856 src = tcg_constant_tl(a->rs2_or_imm);
2857 } else {
2858 TCGv src1 = gen_load_gpr(dc, a->rs1);
2859 if (a->rs2_or_imm == 0) {
2860 src = src1;
2861 } else {
2862 src = tcg_temp_new();
2863 if (a->imm) {
2864 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
2865 } else {
2866 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
2870 func(dc, src);
2871 return advance_pc(dc);
2874 static void do_wry(DisasContext *dc, TCGv src)
2876 tcg_gen_ext32u_tl(cpu_y, src);
2879 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
2881 static void do_wrccr(DisasContext *dc, TCGv src)
2883 gen_helper_wrccr(tcg_env, src);
2886 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
2888 static void do_wrasi(DisasContext *dc, TCGv src)
2890 TCGv tmp = tcg_temp_new();
2892 tcg_gen_ext8u_tl(tmp, src);
2893 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
2894 /* End TB to notice changed ASI. */
2895 dc->base.is_jmp = DISAS_EXIT;
2898 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
2900 static void do_wrfprs(DisasContext *dc, TCGv src)
2902 #ifdef TARGET_SPARC64
2903 tcg_gen_trunc_tl_i32(cpu_fprs, src);
2904 dc->fprs_dirty = 0;
2905 dc->base.is_jmp = DISAS_EXIT;
2906 #else
2907 qemu_build_not_reached();
2908 #endif
2911 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
2913 static void do_wrgsr(DisasContext *dc, TCGv src)
2915 gen_trap_ifnofpu(dc);
2916 tcg_gen_mov_tl(cpu_gsr, src);
2919 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
2921 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
2923 gen_helper_set_softint(tcg_env, src);
2926 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
2928 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
2930 gen_helper_clear_softint(tcg_env, src);
2933 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
2935 static void do_wrsoftint(DisasContext *dc, TCGv src)
2937 gen_helper_write_softint(tcg_env, src);
2940 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
2942 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
2944 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2946 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
2947 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2948 translator_io_start(&dc->base);
2949 gen_helper_tick_set_limit(r_tickptr, src);
2950 /* End TB to handle timer interrupt */
2951 dc->base.is_jmp = DISAS_EXIT;
2954 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
2956 static void do_wrstick(DisasContext *dc, TCGv src)
2958 #ifdef TARGET_SPARC64
2959 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2961 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
2962 translator_io_start(&dc->base);
2963 gen_helper_tick_set_count(r_tickptr, src);
2964 /* End TB to handle timer interrupt */
2965 dc->base.is_jmp = DISAS_EXIT;
2966 #else
2967 qemu_build_not_reached();
2968 #endif
2971 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
2973 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
2975 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2977 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
2978 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2979 translator_io_start(&dc->base);
2980 gen_helper_tick_set_limit(r_tickptr, src);
2981 /* End TB to handle timer interrupt */
2982 dc->base.is_jmp = DISAS_EXIT;
2985 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
2987 static void do_wrpowerdown(DisasContext *dc, TCGv src)
2989 finishing_insn(dc);
2990 save_state(dc);
2991 gen_helper_power_down(tcg_env);
2994 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
2996 static void do_wrpsr(DisasContext *dc, TCGv src)
2998 gen_helper_wrpsr(tcg_env, src);
2999 dc->base.is_jmp = DISAS_EXIT;
3002 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3004 static void do_wrwim(DisasContext *dc, TCGv src)
3006 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3007 TCGv tmp = tcg_temp_new();
3009 tcg_gen_andi_tl(tmp, src, mask);
3010 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3013 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3015 static void do_wrtpc(DisasContext *dc, TCGv src)
3017 #ifdef TARGET_SPARC64
3018 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3020 gen_load_trap_state_at_tl(r_tsptr);
3021 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3022 #else
3023 qemu_build_not_reached();
3024 #endif
3027 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3029 static void do_wrtnpc(DisasContext *dc, TCGv src)
3031 #ifdef TARGET_SPARC64
3032 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3034 gen_load_trap_state_at_tl(r_tsptr);
3035 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3036 #else
3037 qemu_build_not_reached();
3038 #endif
3041 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3043 static void do_wrtstate(DisasContext *dc, TCGv src)
3045 #ifdef TARGET_SPARC64
3046 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3048 gen_load_trap_state_at_tl(r_tsptr);
3049 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3050 #else
3051 qemu_build_not_reached();
3052 #endif
3055 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3057 static void do_wrtt(DisasContext *dc, TCGv src)
3059 #ifdef TARGET_SPARC64
3060 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3062 gen_load_trap_state_at_tl(r_tsptr);
3063 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3064 #else
3065 qemu_build_not_reached();
3066 #endif
3069 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3071 static void do_wrtick(DisasContext *dc, TCGv src)
3073 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3075 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3076 translator_io_start(&dc->base);
3077 gen_helper_tick_set_count(r_tickptr, src);
3078 /* End TB to handle timer interrupt */
3079 dc->base.is_jmp = DISAS_EXIT;
3082 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3084 static void do_wrtba(DisasContext *dc, TCGv src)
3086 tcg_gen_mov_tl(cpu_tbr, src);
3089 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3091 static void do_wrpstate(DisasContext *dc, TCGv src)
3093 save_state(dc);
3094 if (translator_io_start(&dc->base)) {
3095 dc->base.is_jmp = DISAS_EXIT;
3097 gen_helper_wrpstate(tcg_env, src);
3098 dc->npc = DYNAMIC_PC;
3101 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3103 static void do_wrtl(DisasContext *dc, TCGv src)
3105 save_state(dc);
3106 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3107 dc->npc = DYNAMIC_PC;
3110 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3112 static void do_wrpil(DisasContext *dc, TCGv src)
3114 if (translator_io_start(&dc->base)) {
3115 dc->base.is_jmp = DISAS_EXIT;
3117 gen_helper_wrpil(tcg_env, src);
3120 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3122 static void do_wrcwp(DisasContext *dc, TCGv src)
3124 gen_helper_wrcwp(tcg_env, src);
3127 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3129 static void do_wrcansave(DisasContext *dc, TCGv src)
3131 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3134 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3136 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3138 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3141 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3143 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3145 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3148 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3150 static void do_wrotherwin(DisasContext *dc, TCGv src)
3152 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3155 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3157 static void do_wrwstate(DisasContext *dc, TCGv src)
3159 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3162 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3164 static void do_wrgl(DisasContext *dc, TCGv src)
3166 gen_helper_wrgl(tcg_env, src);
3169 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3171 /* UA2005 strand status */
3172 static void do_wrssr(DisasContext *dc, TCGv src)
3174 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3177 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3179 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3181 static void do_wrhpstate(DisasContext *dc, TCGv src)
3183 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3184 dc->base.is_jmp = DISAS_EXIT;
3187 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3189 static void do_wrhtstate(DisasContext *dc, TCGv src)
3191 TCGv_i32 tl = tcg_temp_new_i32();
3192 TCGv_ptr tp = tcg_temp_new_ptr();
3194 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3195 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3196 tcg_gen_shli_i32(tl, tl, 3);
3197 tcg_gen_ext_i32_ptr(tp, tl);
3198 tcg_gen_add_ptr(tp, tp, tcg_env);
3200 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3203 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3205 static void do_wrhintp(DisasContext *dc, TCGv src)
3207 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3210 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3212 static void do_wrhtba(DisasContext *dc, TCGv src)
3214 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3217 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3219 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3221 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3223 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3224 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3225 translator_io_start(&dc->base);
3226 gen_helper_tick_set_limit(r_tickptr, src);
3227 /* End TB to handle timer interrupt */
3228 dc->base.is_jmp = DISAS_EXIT;
3231 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3232 do_wrhstick_cmpr)
3234 static bool do_saved_restored(DisasContext *dc, bool saved)
3236 if (!supervisor(dc)) {
3237 return raise_priv(dc);
3239 if (saved) {
3240 gen_helper_saved(tcg_env);
3241 } else {
3242 gen_helper_restored(tcg_env);
3244 return advance_pc(dc);
3247 TRANS(SAVED, 64, do_saved_restored, true)
3248 TRANS(RESTORED, 64, do_saved_restored, false)
3250 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3252 return advance_pc(dc);
3256 * TODO: Need a feature bit for sparcv8.
3257 * In the meantime, treat all 32-bit cpus like sparcv7.
3259 TRANS(NOP_v7, 32, trans_NOP, a)
3260 TRANS(NOP_v9, 64, trans_NOP, a)
3262 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3263 void (*func)(TCGv, TCGv, TCGv),
3264 void (*funci)(TCGv, TCGv, target_long),
3265 bool logic_cc)
3267 TCGv dst, src1;
3269 /* For simplicity, we under-decoded the rs2 form. */
3270 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3271 return false;
3274 if (logic_cc) {
3275 dst = cpu_cc_N;
3276 } else {
3277 dst = gen_dest_gpr(dc, a->rd);
3279 src1 = gen_load_gpr(dc, a->rs1);
3281 if (a->imm || a->rs2_or_imm == 0) {
3282 if (funci) {
3283 funci(dst, src1, a->rs2_or_imm);
3284 } else {
3285 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3287 } else {
3288 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3291 if (logic_cc) {
3292 if (TARGET_LONG_BITS == 64) {
3293 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3294 tcg_gen_movi_tl(cpu_icc_C, 0);
3296 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3297 tcg_gen_movi_tl(cpu_cc_C, 0);
3298 tcg_gen_movi_tl(cpu_cc_V, 0);
3301 gen_store_gpr(dc, a->rd, dst);
3302 return advance_pc(dc);
3305 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3306 void (*func)(TCGv, TCGv, TCGv),
3307 void (*funci)(TCGv, TCGv, target_long),
3308 void (*func_cc)(TCGv, TCGv, TCGv))
3310 if (a->cc) {
3311 return do_arith_int(dc, a, func_cc, NULL, false);
3313 return do_arith_int(dc, a, func, funci, false);
3316 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3317 void (*func)(TCGv, TCGv, TCGv),
3318 void (*funci)(TCGv, TCGv, target_long))
3320 return do_arith_int(dc, a, func, funci, a->cc);
3323 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3324 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3325 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3326 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3328 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3329 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3330 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3331 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3333 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3334 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3335 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3336 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3337 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3339 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3340 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3341 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3342 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3344 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3345 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3347 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3348 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3350 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3352 /* OR with %g0 is the canonical alias for MOV. */
3353 if (!a->cc && a->rs1 == 0) {
3354 if (a->imm || a->rs2_or_imm == 0) {
3355 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3356 } else if (a->rs2_or_imm & ~0x1f) {
3357 /* For simplicity, we under-decoded the rs2 form. */
3358 return false;
3359 } else {
3360 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3362 return advance_pc(dc);
3364 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3367 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3369 TCGv_i64 t1, t2;
3370 TCGv dst;
3372 if (!avail_DIV(dc)) {
3373 return false;
3375 /* For simplicity, we under-decoded the rs2 form. */
3376 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3377 return false;
3380 if (unlikely(a->rs2_or_imm == 0)) {
3381 gen_exception(dc, TT_DIV_ZERO);
3382 return true;
3385 if (a->imm) {
3386 t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3387 } else {
3388 TCGLabel *lab;
3389 TCGv_i32 n2;
3391 finishing_insn(dc);
3392 flush_cond(dc);
3394 n2 = tcg_temp_new_i32();
3395 tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3397 lab = delay_exception(dc, TT_DIV_ZERO);
3398 tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3400 t2 = tcg_temp_new_i64();
3401 #ifdef TARGET_SPARC64
3402 tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3403 #else
3404 tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3405 #endif
3408 t1 = tcg_temp_new_i64();
3409 tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3411 tcg_gen_divu_i64(t1, t1, t2);
3412 tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3414 dst = gen_dest_gpr(dc, a->rd);
3415 tcg_gen_trunc_i64_tl(dst, t1);
3416 gen_store_gpr(dc, a->rd, dst);
3417 return advance_pc(dc);
3420 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3422 TCGv dst, src1, src2;
3424 if (!avail_64(dc)) {
3425 return false;
3427 /* For simplicity, we under-decoded the rs2 form. */
3428 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3429 return false;
3432 if (unlikely(a->rs2_or_imm == 0)) {
3433 gen_exception(dc, TT_DIV_ZERO);
3434 return true;
3437 if (a->imm) {
3438 src2 = tcg_constant_tl(a->rs2_or_imm);
3439 } else {
3440 TCGLabel *lab;
3442 finishing_insn(dc);
3443 flush_cond(dc);
3445 lab = delay_exception(dc, TT_DIV_ZERO);
3446 src2 = cpu_regs[a->rs2_or_imm];
3447 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3450 dst = gen_dest_gpr(dc, a->rd);
3451 src1 = gen_load_gpr(dc, a->rs1);
3453 tcg_gen_divu_tl(dst, src1, src2);
3454 gen_store_gpr(dc, a->rd, dst);
3455 return advance_pc(dc);
3458 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3460 TCGv dst, src1, src2;
3462 if (!avail_64(dc)) {
3463 return false;
3465 /* For simplicity, we under-decoded the rs2 form. */
3466 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3467 return false;
3470 if (unlikely(a->rs2_or_imm == 0)) {
3471 gen_exception(dc, TT_DIV_ZERO);
3472 return true;
3475 dst = gen_dest_gpr(dc, a->rd);
3476 src1 = gen_load_gpr(dc, a->rs1);
3478 if (a->imm) {
3479 if (unlikely(a->rs2_or_imm == -1)) {
3480 tcg_gen_neg_tl(dst, src1);
3481 gen_store_gpr(dc, a->rd, dst);
3482 return advance_pc(dc);
3484 src2 = tcg_constant_tl(a->rs2_or_imm);
3485 } else {
3486 TCGLabel *lab;
3487 TCGv t1, t2;
3489 finishing_insn(dc);
3490 flush_cond(dc);
3492 lab = delay_exception(dc, TT_DIV_ZERO);
3493 src2 = cpu_regs[a->rs2_or_imm];
3494 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3497 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3498 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3500 t1 = tcg_temp_new();
3501 t2 = tcg_temp_new();
3502 tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3503 tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3504 tcg_gen_and_tl(t1, t1, t2);
3505 tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3506 tcg_constant_tl(1), src2);
3507 src2 = t1;
3510 tcg_gen_div_tl(dst, src1, src2);
3511 gen_store_gpr(dc, a->rd, dst);
3512 return advance_pc(dc);
3515 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3516 int width, bool cc, bool little_endian)
3518 TCGv dst, s1, s2, l, r, t, m;
3519 uint64_t amask = address_mask_i(dc, -8);
3521 dst = gen_dest_gpr(dc, a->rd);
3522 s1 = gen_load_gpr(dc, a->rs1);
3523 s2 = gen_load_gpr(dc, a->rs2);
3525 if (cc) {
3526 gen_op_subcc(cpu_cc_N, s1, s2);
3529 l = tcg_temp_new();
3530 r = tcg_temp_new();
3531 t = tcg_temp_new();
3533 switch (width) {
3534 case 8:
3535 tcg_gen_andi_tl(l, s1, 7);
3536 tcg_gen_andi_tl(r, s2, 7);
3537 tcg_gen_xori_tl(r, r, 7);
3538 m = tcg_constant_tl(0xff);
3539 break;
3540 case 16:
3541 tcg_gen_extract_tl(l, s1, 1, 2);
3542 tcg_gen_extract_tl(r, s2, 1, 2);
3543 tcg_gen_xori_tl(r, r, 3);
3544 m = tcg_constant_tl(0xf);
3545 break;
3546 case 32:
3547 tcg_gen_extract_tl(l, s1, 2, 1);
3548 tcg_gen_extract_tl(r, s2, 2, 1);
3549 tcg_gen_xori_tl(r, r, 1);
3550 m = tcg_constant_tl(0x3);
3551 break;
3552 default:
3553 abort();
3556 /* Compute Left Edge */
3557 if (little_endian) {
3558 tcg_gen_shl_tl(l, m, l);
3559 tcg_gen_and_tl(l, l, m);
3560 } else {
3561 tcg_gen_shr_tl(l, m, l);
3563 /* Compute Right Edge */
3564 if (little_endian) {
3565 tcg_gen_shr_tl(r, m, r);
3566 } else {
3567 tcg_gen_shl_tl(r, m, r);
3568 tcg_gen_and_tl(r, r, m);
3571 /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3572 tcg_gen_xor_tl(t, s1, s2);
3573 tcg_gen_and_tl(r, r, l);
3574 tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3576 gen_store_gpr(dc, a->rd, dst);
3577 return advance_pc(dc);
3580 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3581 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3582 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3583 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3584 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3585 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3587 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3588 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3589 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3590 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3591 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3592 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3594 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3595 void (*func)(TCGv, TCGv, TCGv))
3597 TCGv dst = gen_dest_gpr(dc, a->rd);
3598 TCGv src1 = gen_load_gpr(dc, a->rs1);
3599 TCGv src2 = gen_load_gpr(dc, a->rs2);
3601 func(dst, src1, src2);
3602 gen_store_gpr(dc, a->rd, dst);
3603 return advance_pc(dc);
3606 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3607 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3608 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3610 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3612 #ifdef TARGET_SPARC64
3613 TCGv tmp = tcg_temp_new();
3615 tcg_gen_add_tl(tmp, s1, s2);
3616 tcg_gen_andi_tl(dst, tmp, -8);
3617 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3618 #else
3619 g_assert_not_reached();
3620 #endif
3623 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3625 #ifdef TARGET_SPARC64
3626 TCGv tmp = tcg_temp_new();
3628 tcg_gen_add_tl(tmp, s1, s2);
3629 tcg_gen_andi_tl(dst, tmp, -8);
3630 tcg_gen_neg_tl(tmp, tmp);
3631 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3632 #else
3633 g_assert_not_reached();
3634 #endif
3637 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3638 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3640 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3642 #ifdef TARGET_SPARC64
3643 tcg_gen_add_tl(dst, s1, s2);
3644 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3645 #else
3646 g_assert_not_reached();
3647 #endif
3650 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3652 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3654 TCGv dst, src1, src2;
3656 /* Reject 64-bit shifts for sparc32. */
3657 if (avail_32(dc) && a->x) {
3658 return false;
3661 src2 = tcg_temp_new();
3662 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3663 src1 = gen_load_gpr(dc, a->rs1);
3664 dst = gen_dest_gpr(dc, a->rd);
3666 if (l) {
3667 tcg_gen_shl_tl(dst, src1, src2);
3668 if (!a->x) {
3669 tcg_gen_ext32u_tl(dst, dst);
3671 } else if (u) {
3672 if (!a->x) {
3673 tcg_gen_ext32u_tl(dst, src1);
3674 src1 = dst;
3676 tcg_gen_shr_tl(dst, src1, src2);
3677 } else {
3678 if (!a->x) {
3679 tcg_gen_ext32s_tl(dst, src1);
3680 src1 = dst;
3682 tcg_gen_sar_tl(dst, src1, src2);
3684 gen_store_gpr(dc, a->rd, dst);
3685 return advance_pc(dc);
3688 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3689 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3690 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3692 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3694 TCGv dst, src1;
3696 /* Reject 64-bit shifts for sparc32. */
3697 if (avail_32(dc) && (a->x || a->i >= 32)) {
3698 return false;
3701 src1 = gen_load_gpr(dc, a->rs1);
3702 dst = gen_dest_gpr(dc, a->rd);
3704 if (avail_32(dc) || a->x) {
3705 if (l) {
3706 tcg_gen_shli_tl(dst, src1, a->i);
3707 } else if (u) {
3708 tcg_gen_shri_tl(dst, src1, a->i);
3709 } else {
3710 tcg_gen_sari_tl(dst, src1, a->i);
3712 } else {
3713 if (l) {
3714 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3715 } else if (u) {
3716 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3717 } else {
3718 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3721 gen_store_gpr(dc, a->rd, dst);
3722 return advance_pc(dc);
3725 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
3726 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
3727 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
3729 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
3731 /* For simplicity, we under-decoded the rs2 form. */
3732 if (!imm && rs2_or_imm & ~0x1f) {
3733 return NULL;
3735 if (imm || rs2_or_imm == 0) {
3736 return tcg_constant_tl(rs2_or_imm);
3737 } else {
3738 return cpu_regs[rs2_or_imm];
3742 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
3744 TCGv dst = gen_load_gpr(dc, rd);
3745 TCGv c2 = tcg_constant_tl(cmp->c2);
3747 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
3748 gen_store_gpr(dc, rd, dst);
3749 return advance_pc(dc);
3752 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
3754 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3755 DisasCompare cmp;
3757 if (src2 == NULL) {
3758 return false;
3760 gen_compare(&cmp, a->cc, a->cond, dc);
3761 return do_mov_cond(dc, &cmp, a->rd, src2);
3764 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
3766 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3767 DisasCompare cmp;
3769 if (src2 == NULL) {
3770 return false;
3772 gen_fcompare(&cmp, a->cc, a->cond);
3773 return do_mov_cond(dc, &cmp, a->rd, src2);
3776 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
3778 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3779 DisasCompare cmp;
3781 if (src2 == NULL) {
3782 return false;
3784 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
3785 return false;
3787 return do_mov_cond(dc, &cmp, a->rd, src2);
3790 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
3791 bool (*func)(DisasContext *dc, int rd, TCGv src))
3793 TCGv src1, sum;
3795 /* For simplicity, we under-decoded the rs2 form. */
3796 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3797 return false;
3801 * Always load the sum into a new temporary.
3802 * This is required to capture the value across a window change,
3803 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
3805 sum = tcg_temp_new();
3806 src1 = gen_load_gpr(dc, a->rs1);
3807 if (a->imm || a->rs2_or_imm == 0) {
3808 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
3809 } else {
3810 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
3812 return func(dc, a->rd, sum);
3815 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
3818 * Preserve pc across advance, so that we can delay
3819 * the writeback to rd until after src is consumed.
3821 target_ulong cur_pc = dc->pc;
3823 gen_check_align(dc, src, 3);
3825 gen_mov_pc_npc(dc);
3826 tcg_gen_mov_tl(cpu_npc, src);
3827 gen_address_mask(dc, cpu_npc);
3828 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
3830 dc->npc = DYNAMIC_PC_LOOKUP;
3831 return true;
3834 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
3836 static bool do_rett(DisasContext *dc, int rd, TCGv src)
3838 if (!supervisor(dc)) {
3839 return raise_priv(dc);
3842 gen_check_align(dc, src, 3);
3844 gen_mov_pc_npc(dc);
3845 tcg_gen_mov_tl(cpu_npc, src);
3846 gen_helper_rett(tcg_env);
3848 dc->npc = DYNAMIC_PC;
3849 return true;
3852 TRANS(RETT, 32, do_add_special, a, do_rett)
3854 static bool do_return(DisasContext *dc, int rd, TCGv src)
3856 gen_check_align(dc, src, 3);
3857 gen_helper_restore(tcg_env);
3859 gen_mov_pc_npc(dc);
3860 tcg_gen_mov_tl(cpu_npc, src);
3861 gen_address_mask(dc, cpu_npc);
3863 dc->npc = DYNAMIC_PC_LOOKUP;
3864 return true;
3867 TRANS(RETURN, 64, do_add_special, a, do_return)
3869 static bool do_save(DisasContext *dc, int rd, TCGv src)
3871 gen_helper_save(tcg_env);
3872 gen_store_gpr(dc, rd, src);
3873 return advance_pc(dc);
3876 TRANS(SAVE, ALL, do_add_special, a, do_save)
3878 static bool do_restore(DisasContext *dc, int rd, TCGv src)
3880 gen_helper_restore(tcg_env);
3881 gen_store_gpr(dc, rd, src);
3882 return advance_pc(dc);
3885 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
3887 static bool do_done_retry(DisasContext *dc, bool done)
3889 if (!supervisor(dc)) {
3890 return raise_priv(dc);
3892 dc->npc = DYNAMIC_PC;
3893 dc->pc = DYNAMIC_PC;
3894 translator_io_start(&dc->base);
3895 if (done) {
3896 gen_helper_done(tcg_env);
3897 } else {
3898 gen_helper_retry(tcg_env);
3900 return true;
3903 TRANS(DONE, 64, do_done_retry, true)
3904 TRANS(RETRY, 64, do_done_retry, false)
3907 * Major opcode 11 -- load and store instructions
3910 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
3912 TCGv addr, tmp = NULL;
3914 /* For simplicity, we under-decoded the rs2 form. */
3915 if (!imm && rs2_or_imm & ~0x1f) {
3916 return NULL;
3919 addr = gen_load_gpr(dc, rs1);
3920 if (rs2_or_imm) {
3921 tmp = tcg_temp_new();
3922 if (imm) {
3923 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
3924 } else {
3925 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
3927 addr = tmp;
3929 if (AM_CHECK(dc)) {
3930 if (!tmp) {
3931 tmp = tcg_temp_new();
3933 tcg_gen_ext32u_tl(tmp, addr);
3934 addr = tmp;
3936 return addr;
3939 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
3941 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3942 DisasASI da;
3944 if (addr == NULL) {
3945 return false;
3947 da = resolve_asi(dc, a->asi, mop);
3949 reg = gen_dest_gpr(dc, a->rd);
3950 gen_ld_asi(dc, &da, reg, addr);
3951 gen_store_gpr(dc, a->rd, reg);
3952 return advance_pc(dc);
3955 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
3956 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
3957 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
3958 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
3959 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
3960 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
3961 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
3963 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
3965 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3966 DisasASI da;
3968 if (addr == NULL) {
3969 return false;
3971 da = resolve_asi(dc, a->asi, mop);
3973 reg = gen_load_gpr(dc, a->rd);
3974 gen_st_asi(dc, &da, reg, addr);
3975 return advance_pc(dc);
3978 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
3979 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
3980 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
3981 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
3983 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
3985 TCGv addr;
3986 DisasASI da;
3988 if (a->rd & 1) {
3989 return false;
3991 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3992 if (addr == NULL) {
3993 return false;
3995 da = resolve_asi(dc, a->asi, MO_TEUQ);
3996 gen_ldda_asi(dc, &da, addr, a->rd);
3997 return advance_pc(dc);
4000 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4002 TCGv addr;
4003 DisasASI da;
4005 if (a->rd & 1) {
4006 return false;
4008 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4009 if (addr == NULL) {
4010 return false;
4012 da = resolve_asi(dc, a->asi, MO_TEUQ);
4013 gen_stda_asi(dc, &da, addr, a->rd);
4014 return advance_pc(dc);
4017 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4019 TCGv addr, reg;
4020 DisasASI da;
4022 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4023 if (addr == NULL) {
4024 return false;
4026 da = resolve_asi(dc, a->asi, MO_UB);
4028 reg = gen_dest_gpr(dc, a->rd);
4029 gen_ldstub_asi(dc, &da, reg, addr);
4030 gen_store_gpr(dc, a->rd, reg);
4031 return advance_pc(dc);
4034 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4036 TCGv addr, dst, src;
4037 DisasASI da;
4039 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4040 if (addr == NULL) {
4041 return false;
4043 da = resolve_asi(dc, a->asi, MO_TEUL);
4045 dst = gen_dest_gpr(dc, a->rd);
4046 src = gen_load_gpr(dc, a->rd);
4047 gen_swap_asi(dc, &da, dst, src, addr);
4048 gen_store_gpr(dc, a->rd, dst);
4049 return advance_pc(dc);
4052 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4054 TCGv addr, o, n, c;
4055 DisasASI da;
4057 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4058 if (addr == NULL) {
4059 return false;
4061 da = resolve_asi(dc, a->asi, mop);
4063 o = gen_dest_gpr(dc, a->rd);
4064 n = gen_load_gpr(dc, a->rd);
4065 c = gen_load_gpr(dc, a->rs2_or_imm);
4066 gen_cas_asi(dc, &da, o, n, c, addr);
4067 gen_store_gpr(dc, a->rd, o);
4068 return advance_pc(dc);
4071 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4072 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4074 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4076 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4077 DisasASI da;
4079 if (addr == NULL) {
4080 return false;
4082 if (gen_trap_ifnofpu(dc)) {
4083 return true;
4085 if (sz == MO_128 && gen_trap_float128(dc)) {
4086 return true;
4088 da = resolve_asi(dc, a->asi, MO_TE | sz);
4089 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4090 gen_update_fprs_dirty(dc, a->rd);
4091 return advance_pc(dc);
4094 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4095 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4096 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4098 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4099 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4100 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4102 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4104 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4105 DisasASI da;
4107 if (addr == NULL) {
4108 return false;
4110 if (gen_trap_ifnofpu(dc)) {
4111 return true;
4113 if (sz == MO_128 && gen_trap_float128(dc)) {
4114 return true;
4116 da = resolve_asi(dc, a->asi, MO_TE | sz);
4117 gen_stf_asi(dc, &da, sz, addr, a->rd);
4118 return advance_pc(dc);
4121 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4122 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4123 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4125 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4126 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4127 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4129 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4131 if (!avail_32(dc)) {
4132 return false;
4134 if (!supervisor(dc)) {
4135 return raise_priv(dc);
4137 if (gen_trap_ifnofpu(dc)) {
4138 return true;
4140 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4141 return true;
4144 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4146 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4147 TCGv_i32 tmp;
4149 if (addr == NULL) {
4150 return false;
4152 if (gen_trap_ifnofpu(dc)) {
4153 return true;
4156 tmp = tcg_temp_new_i32();
4157 tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4159 tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4160 /* LDFSR does not change FCC[1-3]. */
4162 gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4163 return advance_pc(dc);
4166 static bool trans_LDXFSR(DisasContext *dc, arg_r_r_ri *a)
4168 #ifdef TARGET_SPARC64
4169 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4170 TCGv_i64 t64;
4171 TCGv_i32 lo, hi;
4173 if (addr == NULL) {
4174 return false;
4176 if (gen_trap_ifnofpu(dc)) {
4177 return true;
4180 t64 = tcg_temp_new_i64();
4181 tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4183 lo = tcg_temp_new_i32();
4184 hi = cpu_fcc[3];
4185 tcg_gen_extr_i64_i32(lo, hi, t64);
4186 tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4187 tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4188 tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4189 tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4191 gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4192 return advance_pc(dc);
4193 #else
4194 return false;
4195 #endif
4198 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4200 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4201 TCGv fsr;
4203 if (addr == NULL) {
4204 return false;
4206 if (gen_trap_ifnofpu(dc)) {
4207 return true;
4210 fsr = tcg_temp_new();
4211 gen_helper_get_fsr(fsr, tcg_env);
4212 tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4213 return advance_pc(dc);
4216 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4217 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4219 static bool do_fc(DisasContext *dc, int rd, bool c)
4221 uint64_t mask;
4223 if (gen_trap_ifnofpu(dc)) {
4224 return true;
4227 if (rd & 1) {
4228 mask = MAKE_64BIT_MASK(0, 32);
4229 } else {
4230 mask = MAKE_64BIT_MASK(32, 32);
4232 if (c) {
4233 tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4234 } else {
4235 tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4237 gen_update_fprs_dirty(dc, rd);
4238 return advance_pc(dc);
4241 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4242 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4244 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4246 if (gen_trap_ifnofpu(dc)) {
4247 return true;
4250 tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4251 gen_update_fprs_dirty(dc, rd);
4252 return advance_pc(dc);
4255 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4256 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4258 static bool do_ff(DisasContext *dc, arg_r_r *a,
4259 void (*func)(TCGv_i32, TCGv_i32))
4261 TCGv_i32 tmp;
4263 if (gen_trap_ifnofpu(dc)) {
4264 return true;
4267 tmp = gen_load_fpr_F(dc, a->rs);
4268 func(tmp, tmp);
4269 gen_store_fpr_F(dc, a->rd, tmp);
4270 return advance_pc(dc);
4273 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4274 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4275 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4276 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4277 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4279 static bool do_fd(DisasContext *dc, arg_r_r *a,
4280 void (*func)(TCGv_i32, TCGv_i64))
4282 TCGv_i32 dst;
4283 TCGv_i64 src;
4285 if (gen_trap_ifnofpu(dc)) {
4286 return true;
4289 dst = tcg_temp_new_i32();
4290 src = gen_load_fpr_D(dc, a->rs);
4291 func(dst, src);
4292 gen_store_fpr_F(dc, a->rd, dst);
4293 return advance_pc(dc);
4296 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4297 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4299 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4300 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4302 TCGv_i32 tmp;
4304 if (gen_trap_ifnofpu(dc)) {
4305 return true;
4308 tmp = gen_load_fpr_F(dc, a->rs);
4309 func(tmp, tcg_env, tmp);
4310 gen_store_fpr_F(dc, a->rd, tmp);
4311 return advance_pc(dc);
4314 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4315 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4316 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4318 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4319 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4321 TCGv_i32 dst;
4322 TCGv_i64 src;
4324 if (gen_trap_ifnofpu(dc)) {
4325 return true;
4328 dst = tcg_temp_new_i32();
4329 src = gen_load_fpr_D(dc, a->rs);
4330 func(dst, tcg_env, src);
4331 gen_store_fpr_F(dc, a->rd, dst);
4332 return advance_pc(dc);
4335 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4336 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4337 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4339 static bool do_dd(DisasContext *dc, arg_r_r *a,
4340 void (*func)(TCGv_i64, TCGv_i64))
4342 TCGv_i64 dst, src;
4344 if (gen_trap_ifnofpu(dc)) {
4345 return true;
4348 dst = gen_dest_fpr_D(dc, a->rd);
4349 src = gen_load_fpr_D(dc, a->rs);
4350 func(dst, src);
4351 gen_store_fpr_D(dc, a->rd, dst);
4352 return advance_pc(dc);
4355 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4356 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4357 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4358 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4359 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4361 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4362 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4364 TCGv_i64 dst, src;
4366 if (gen_trap_ifnofpu(dc)) {
4367 return true;
4370 dst = gen_dest_fpr_D(dc, a->rd);
4371 src = gen_load_fpr_D(dc, a->rs);
4372 func(dst, tcg_env, src);
4373 gen_store_fpr_D(dc, a->rd, dst);
4374 return advance_pc(dc);
4377 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4378 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4379 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4381 static bool do_df(DisasContext *dc, arg_r_r *a,
4382 void (*func)(TCGv_i64, TCGv_i32))
4384 TCGv_i64 dst;
4385 TCGv_i32 src;
4387 if (gen_trap_ifnofpu(dc)) {
4388 return true;
4391 dst = tcg_temp_new_i64();
4392 src = gen_load_fpr_F(dc, a->rs);
4393 func(dst, src);
4394 gen_store_fpr_D(dc, a->rd, dst);
4395 return advance_pc(dc);
4398 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4400 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4401 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4403 TCGv_i64 dst;
4404 TCGv_i32 src;
4406 if (gen_trap_ifnofpu(dc)) {
4407 return true;
4410 dst = gen_dest_fpr_D(dc, a->rd);
4411 src = gen_load_fpr_F(dc, a->rs);
4412 func(dst, tcg_env, src);
4413 gen_store_fpr_D(dc, a->rd, dst);
4414 return advance_pc(dc);
4417 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4418 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4419 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4421 static bool do_qq(DisasContext *dc, arg_r_r *a,
4422 void (*func)(TCGv_i128, TCGv_i128))
4424 TCGv_i128 t;
4426 if (gen_trap_ifnofpu(dc)) {
4427 return true;
4429 if (gen_trap_float128(dc)) {
4430 return true;
4433 gen_op_clear_ieee_excp_and_FTT();
4434 t = gen_load_fpr_Q(dc, a->rs);
4435 func(t, t);
4436 gen_store_fpr_Q(dc, a->rd, t);
4437 return advance_pc(dc);
4440 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4441 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4442 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4444 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4445 void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4447 TCGv_i128 t;
4449 if (gen_trap_ifnofpu(dc)) {
4450 return true;
4452 if (gen_trap_float128(dc)) {
4453 return true;
4456 t = gen_load_fpr_Q(dc, a->rs);
4457 func(t, tcg_env, t);
4458 gen_store_fpr_Q(dc, a->rd, t);
4459 return advance_pc(dc);
4462 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4464 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4465 void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4467 TCGv_i128 src;
4468 TCGv_i32 dst;
4470 if (gen_trap_ifnofpu(dc)) {
4471 return true;
4473 if (gen_trap_float128(dc)) {
4474 return true;
4477 src = gen_load_fpr_Q(dc, a->rs);
4478 dst = tcg_temp_new_i32();
4479 func(dst, tcg_env, src);
4480 gen_store_fpr_F(dc, a->rd, dst);
4481 return advance_pc(dc);
4484 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4485 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4487 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4488 void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4490 TCGv_i128 src;
4491 TCGv_i64 dst;
4493 if (gen_trap_ifnofpu(dc)) {
4494 return true;
4496 if (gen_trap_float128(dc)) {
4497 return true;
4500 src = gen_load_fpr_Q(dc, a->rs);
4501 dst = gen_dest_fpr_D(dc, a->rd);
4502 func(dst, tcg_env, src);
4503 gen_store_fpr_D(dc, a->rd, dst);
4504 return advance_pc(dc);
4507 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4508 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4510 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4511 void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4513 TCGv_i32 src;
4514 TCGv_i128 dst;
4516 if (gen_trap_ifnofpu(dc)) {
4517 return true;
4519 if (gen_trap_float128(dc)) {
4520 return true;
4523 src = gen_load_fpr_F(dc, a->rs);
4524 dst = tcg_temp_new_i128();
4525 func(dst, tcg_env, src);
4526 gen_store_fpr_Q(dc, a->rd, dst);
4527 return advance_pc(dc);
4530 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4531 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4533 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4534 void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4536 TCGv_i64 src;
4537 TCGv_i128 dst;
4539 if (gen_trap_ifnofpu(dc)) {
4540 return true;
4542 if (gen_trap_float128(dc)) {
4543 return true;
4546 src = gen_load_fpr_D(dc, a->rs);
4547 dst = tcg_temp_new_i128();
4548 func(dst, tcg_env, src);
4549 gen_store_fpr_Q(dc, a->rd, dst);
4550 return advance_pc(dc);
4553 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4554 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4556 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4557 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4559 TCGv_i32 src1, src2;
4561 if (gen_trap_ifnofpu(dc)) {
4562 return true;
4565 src1 = gen_load_fpr_F(dc, a->rs1);
4566 src2 = gen_load_fpr_F(dc, a->rs2);
4567 func(src1, src1, src2);
4568 gen_store_fpr_F(dc, a->rd, src1);
4569 return advance_pc(dc);
4572 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4573 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4574 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4575 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4576 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4577 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4578 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4579 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4580 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4581 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4582 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4583 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4585 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4586 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4588 TCGv_i32 src1, src2;
4590 if (gen_trap_ifnofpu(dc)) {
4591 return true;
4594 src1 = gen_load_fpr_F(dc, a->rs1);
4595 src2 = gen_load_fpr_F(dc, a->rs2);
4596 func(src1, tcg_env, src1, src2);
4597 gen_store_fpr_F(dc, a->rd, src1);
4598 return advance_pc(dc);
4601 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4602 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4603 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4604 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4606 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
4607 void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
4609 TCGv_i64 dst;
4610 TCGv_i32 src1, src2;
4612 if (gen_trap_ifnofpu(dc)) {
4613 return true;
4616 dst = gen_dest_fpr_D(dc, a->rd);
4617 src1 = gen_load_fpr_F(dc, a->rs1);
4618 src2 = gen_load_fpr_F(dc, a->rs2);
4619 func(dst, src1, src2);
4620 gen_store_fpr_D(dc, a->rd, dst);
4621 return advance_pc(dc);
4624 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
4625 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
4626 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
4627 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
4628 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
4630 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
4631 void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
4633 TCGv_i64 dst, src2;
4634 TCGv_i32 src1;
4636 if (gen_trap_ifnofpu(dc)) {
4637 return true;
4640 dst = gen_dest_fpr_D(dc, a->rd);
4641 src1 = gen_load_fpr_F(dc, a->rs1);
4642 src2 = gen_load_fpr_D(dc, a->rs2);
4643 func(dst, src1, src2);
4644 gen_store_fpr_D(dc, a->rd, dst);
4645 return advance_pc(dc);
4648 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
4650 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4651 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4653 TCGv_i64 dst, src1, src2;
4655 if (gen_trap_ifnofpu(dc)) {
4656 return true;
4659 dst = gen_dest_fpr_D(dc, a->rd);
4660 src1 = gen_load_fpr_D(dc, a->rs1);
4661 src2 = gen_load_fpr_D(dc, a->rs2);
4662 func(dst, src1, src2);
4663 gen_store_fpr_D(dc, a->rd, dst);
4664 return advance_pc(dc);
4667 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4668 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4670 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4671 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4672 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4673 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4674 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4675 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4676 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4677 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4678 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4679 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4680 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4681 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4683 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4684 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4685 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4687 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4688 void (*func)(TCGv, TCGv_i64, TCGv_i64))
4690 TCGv_i64 src1, src2;
4691 TCGv dst;
4693 if (gen_trap_ifnofpu(dc)) {
4694 return true;
4697 dst = gen_dest_gpr(dc, a->rd);
4698 src1 = gen_load_fpr_D(dc, a->rs1);
4699 src2 = gen_load_fpr_D(dc, a->rs2);
4700 func(dst, src1, src2);
4701 gen_store_gpr(dc, a->rd, dst);
4702 return advance_pc(dc);
4705 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4706 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4707 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4708 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4710 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4711 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4712 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4713 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4715 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4716 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4718 TCGv_i64 dst, src1, src2;
4720 if (gen_trap_ifnofpu(dc)) {
4721 return true;
4724 dst = gen_dest_fpr_D(dc, a->rd);
4725 src1 = gen_load_fpr_D(dc, a->rs1);
4726 src2 = gen_load_fpr_D(dc, a->rs2);
4727 func(dst, tcg_env, src1, src2);
4728 gen_store_fpr_D(dc, a->rd, dst);
4729 return advance_pc(dc);
4732 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4733 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4734 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4735 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4737 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4739 TCGv_i64 dst;
4740 TCGv_i32 src1, src2;
4742 if (gen_trap_ifnofpu(dc)) {
4743 return true;
4745 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4746 return raise_unimpfpop(dc);
4749 dst = gen_dest_fpr_D(dc, a->rd);
4750 src1 = gen_load_fpr_F(dc, a->rs1);
4751 src2 = gen_load_fpr_F(dc, a->rs2);
4752 gen_helper_fsmuld(dst, tcg_env, src1, src2);
4753 gen_store_fpr_D(dc, a->rd, dst);
4754 return advance_pc(dc);
4757 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4758 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4760 TCGv_i64 dst, src0, src1, src2;
4762 if (gen_trap_ifnofpu(dc)) {
4763 return true;
4766 dst = gen_dest_fpr_D(dc, a->rd);
4767 src0 = gen_load_fpr_D(dc, a->rd);
4768 src1 = gen_load_fpr_D(dc, a->rs1);
4769 src2 = gen_load_fpr_D(dc, a->rs2);
4770 func(dst, src0, src1, src2);
4771 gen_store_fpr_D(dc, a->rd, dst);
4772 return advance_pc(dc);
4775 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4777 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4778 void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
4780 TCGv_i128 src1, src2;
4782 if (gen_trap_ifnofpu(dc)) {
4783 return true;
4785 if (gen_trap_float128(dc)) {
4786 return true;
4789 src1 = gen_load_fpr_Q(dc, a->rs1);
4790 src2 = gen_load_fpr_Q(dc, a->rs2);
4791 func(src1, tcg_env, src1, src2);
4792 gen_store_fpr_Q(dc, a->rd, src1);
4793 return advance_pc(dc);
4796 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4797 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
4798 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
4799 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
4801 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
4803 TCGv_i64 src1, src2;
4804 TCGv_i128 dst;
4806 if (gen_trap_ifnofpu(dc)) {
4807 return true;
4809 if (gen_trap_float128(dc)) {
4810 return true;
4813 src1 = gen_load_fpr_D(dc, a->rs1);
4814 src2 = gen_load_fpr_D(dc, a->rs2);
4815 dst = tcg_temp_new_i128();
4816 gen_helper_fdmulq(dst, tcg_env, src1, src2);
4817 gen_store_fpr_Q(dc, a->rd, dst);
4818 return advance_pc(dc);
4821 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
4822 void (*func)(DisasContext *, DisasCompare *, int, int))
4824 DisasCompare cmp;
4826 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4827 return false;
4829 if (gen_trap_ifnofpu(dc)) {
4830 return true;
4832 if (is_128 && gen_trap_float128(dc)) {
4833 return true;
4836 gen_op_clear_ieee_excp_and_FTT();
4837 func(dc, &cmp, a->rd, a->rs2);
4838 return advance_pc(dc);
4841 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
4842 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
4843 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
4845 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
4846 void (*func)(DisasContext *, DisasCompare *, int, int))
4848 DisasCompare cmp;
4850 if (gen_trap_ifnofpu(dc)) {
4851 return true;
4853 if (is_128 && gen_trap_float128(dc)) {
4854 return true;
4857 gen_op_clear_ieee_excp_and_FTT();
4858 gen_compare(&cmp, a->cc, a->cond, dc);
4859 func(dc, &cmp, a->rd, a->rs2);
4860 return advance_pc(dc);
4863 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
4864 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
4865 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
4867 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
4868 void (*func)(DisasContext *, DisasCompare *, int, int))
4870 DisasCompare cmp;
4872 if (gen_trap_ifnofpu(dc)) {
4873 return true;
4875 if (is_128 && gen_trap_float128(dc)) {
4876 return true;
4879 gen_op_clear_ieee_excp_and_FTT();
4880 gen_fcompare(&cmp, a->cc, a->cond);
4881 func(dc, &cmp, a->rd, a->rs2);
4882 return advance_pc(dc);
4885 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
4886 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
4887 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
4889 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
4891 TCGv_i32 src1, src2;
4893 if (avail_32(dc) && a->cc != 0) {
4894 return false;
4896 if (gen_trap_ifnofpu(dc)) {
4897 return true;
4900 src1 = gen_load_fpr_F(dc, a->rs1);
4901 src2 = gen_load_fpr_F(dc, a->rs2);
4902 if (e) {
4903 gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
4904 } else {
4905 gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
4907 return advance_pc(dc);
4910 TRANS(FCMPs, ALL, do_fcmps, a, false)
4911 TRANS(FCMPEs, ALL, do_fcmps, a, true)
4913 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
4915 TCGv_i64 src1, src2;
4917 if (avail_32(dc) && a->cc != 0) {
4918 return false;
4920 if (gen_trap_ifnofpu(dc)) {
4921 return true;
4924 src1 = gen_load_fpr_D(dc, a->rs1);
4925 src2 = gen_load_fpr_D(dc, a->rs2);
4926 if (e) {
4927 gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
4928 } else {
4929 gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
4931 return advance_pc(dc);
4934 TRANS(FCMPd, ALL, do_fcmpd, a, false)
4935 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
4937 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
4939 TCGv_i128 src1, src2;
4941 if (avail_32(dc) && a->cc != 0) {
4942 return false;
4944 if (gen_trap_ifnofpu(dc)) {
4945 return true;
4947 if (gen_trap_float128(dc)) {
4948 return true;
4951 src1 = gen_load_fpr_Q(dc, a->rs1);
4952 src2 = gen_load_fpr_Q(dc, a->rs2);
4953 if (e) {
4954 gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
4955 } else {
4956 gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
4958 return advance_pc(dc);
4961 TRANS(FCMPq, ALL, do_fcmpq, a, false)
4962 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
4964 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4966 DisasContext *dc = container_of(dcbase, DisasContext, base);
4967 int bound;
4969 dc->pc = dc->base.pc_first;
4970 dc->npc = (target_ulong)dc->base.tb->cs_base;
4971 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
4972 dc->def = &cpu_env(cs)->def;
4973 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
4974 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
4975 #ifndef CONFIG_USER_ONLY
4976 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
4977 #endif
4978 #ifdef TARGET_SPARC64
4979 dc->fprs_dirty = 0;
4980 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
4981 #ifndef CONFIG_USER_ONLY
4982 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
4983 #endif
4984 #endif
4986 * if we reach a page boundary, we stop generation so that the
4987 * PC of a TT_TFAULT exception is always in the right page
4989 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
4990 dc->base.max_insns = MIN(dc->base.max_insns, bound);
4993 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
4997 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4999 DisasContext *dc = container_of(dcbase, DisasContext, base);
5000 target_ulong npc = dc->npc;
5002 if (npc & 3) {
5003 switch (npc) {
5004 case JUMP_PC:
5005 assert(dc->jump_pc[1] == dc->pc + 4);
5006 npc = dc->jump_pc[0] | JUMP_PC;
5007 break;
5008 case DYNAMIC_PC:
5009 case DYNAMIC_PC_LOOKUP:
5010 npc = DYNAMIC_PC;
5011 break;
5012 default:
5013 g_assert_not_reached();
5016 tcg_gen_insn_start(dc->pc, npc);
5019 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5021 DisasContext *dc = container_of(dcbase, DisasContext, base);
5022 unsigned int insn;
5024 insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5025 dc->base.pc_next += 4;
5027 if (!decode(dc, insn)) {
5028 gen_exception(dc, TT_ILL_INSN);
5031 if (dc->base.is_jmp == DISAS_NORETURN) {
5032 return;
5034 if (dc->pc != dc->base.pc_next) {
5035 dc->base.is_jmp = DISAS_TOO_MANY;
5039 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5041 DisasContext *dc = container_of(dcbase, DisasContext, base);
5042 DisasDelayException *e, *e_next;
5043 bool may_lookup;
5045 finishing_insn(dc);
5047 switch (dc->base.is_jmp) {
5048 case DISAS_NEXT:
5049 case DISAS_TOO_MANY:
5050 if (((dc->pc | dc->npc) & 3) == 0) {
5051 /* static PC and NPC: we can use direct chaining */
5052 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5053 break;
5056 may_lookup = true;
5057 if (dc->pc & 3) {
5058 switch (dc->pc) {
5059 case DYNAMIC_PC_LOOKUP:
5060 break;
5061 case DYNAMIC_PC:
5062 may_lookup = false;
5063 break;
5064 default:
5065 g_assert_not_reached();
5067 } else {
5068 tcg_gen_movi_tl(cpu_pc, dc->pc);
5071 if (dc->npc & 3) {
5072 switch (dc->npc) {
5073 case JUMP_PC:
5074 gen_generic_branch(dc);
5075 break;
5076 case DYNAMIC_PC:
5077 may_lookup = false;
5078 break;
5079 case DYNAMIC_PC_LOOKUP:
5080 break;
5081 default:
5082 g_assert_not_reached();
5084 } else {
5085 tcg_gen_movi_tl(cpu_npc, dc->npc);
5087 if (may_lookup) {
5088 tcg_gen_lookup_and_goto_ptr();
5089 } else {
5090 tcg_gen_exit_tb(NULL, 0);
5092 break;
5094 case DISAS_NORETURN:
5095 break;
5097 case DISAS_EXIT:
5098 /* Exit TB */
5099 save_state(dc);
5100 tcg_gen_exit_tb(NULL, 0);
5101 break;
5103 default:
5104 g_assert_not_reached();
5107 for (e = dc->delay_excp_list; e ; e = e_next) {
5108 gen_set_label(e->lab);
5110 tcg_gen_movi_tl(cpu_pc, e->pc);
5111 if (e->npc % 4 == 0) {
5112 tcg_gen_movi_tl(cpu_npc, e->npc);
5114 gen_helper_raise_exception(tcg_env, e->excp);
5116 e_next = e->next;
5117 g_free(e);
5121 static const TranslatorOps sparc_tr_ops = {
5122 .init_disas_context = sparc_tr_init_disas_context,
5123 .tb_start = sparc_tr_tb_start,
5124 .insn_start = sparc_tr_insn_start,
5125 .translate_insn = sparc_tr_translate_insn,
5126 .tb_stop = sparc_tr_tb_stop,
5129 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5130 vaddr pc, void *host_pc)
5132 DisasContext dc = {};
5134 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5137 void sparc_tcg_init(void)
5139 static const char gregnames[32][4] = {
5140 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5141 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5142 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5143 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5145 static const char fregnames[32][4] = {
5146 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5147 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5148 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5149 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5152 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5153 #ifdef TARGET_SPARC64
5154 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5155 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5156 { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5157 { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5158 { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5159 #else
5160 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5161 #endif
5164 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5165 #ifdef TARGET_SPARC64
5166 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5167 { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5168 { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5169 #endif
5170 { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5171 { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5172 { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5173 { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5174 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5175 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5176 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5177 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5178 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5181 unsigned int i;
5183 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5184 offsetof(CPUSPARCState, regwptr),
5185 "regwptr");
5187 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5188 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5191 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5192 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5195 cpu_regs[0] = NULL;
5196 for (i = 1; i < 8; ++i) {
5197 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5198 offsetof(CPUSPARCState, gregs[i]),
5199 gregnames[i]);
5202 for (i = 8; i < 32; ++i) {
5203 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5204 (i - 8) * sizeof(target_ulong),
5205 gregnames[i]);
5208 for (i = 0; i < TARGET_DPREGS; i++) {
5209 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5210 offsetof(CPUSPARCState, fpr[i]),
5211 fregnames[i]);
5215 void sparc_restore_state_to_opc(CPUState *cs,
5216 const TranslationBlock *tb,
5217 const uint64_t *data)
5219 CPUSPARCState *env = cpu_env(cs);
5220 target_ulong pc = data[0];
5221 target_ulong npc = data[1];
5223 env->pc = pc;
5224 if (npc == DYNAMIC_PC) {
5225 /* dynamic NPC: already stored */
5226 } else if (npc & JUMP_PC) {
5227 /* jump PC: use 'cond' and the jump targets of the translation */
5228 if (env->cond) {
5229 env->npc = npc & ~3;
5230 } else {
5231 env->npc = pc + 4;
5233 } else {
5234 env->npc = npc;