Revert "hmp: info spice: take out webdav"
[qemu.git] / target / sparc / translate.c
blob150aeecd14b8316a0eeefaa06165811c0c280ba1
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "exec/translator.h"
33 #include "exec/log.h"
34 #include "asi.h"
37 #define DEBUG_DISAS
39 #define DYNAMIC_PC 1 /* dynamic pc value */
40 #define JUMP_PC 2 /* dynamic pc value which takes only two values
41 according to jump_pc[T2] */
43 #define DISAS_EXIT DISAS_TARGET_0
45 /* global register indexes */
46 static TCGv_ptr cpu_regwptr;
47 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
48 static TCGv_i32 cpu_cc_op;
49 static TCGv_i32 cpu_psr;
50 static TCGv cpu_fsr, cpu_pc, cpu_npc;
51 static TCGv cpu_regs[32];
52 static TCGv cpu_y;
53 #ifndef CONFIG_USER_ONLY
54 static TCGv cpu_tbr;
55 #endif
56 static TCGv cpu_cond;
57 #ifdef TARGET_SPARC64
58 static TCGv_i32 cpu_xcc, cpu_fprs;
59 static TCGv cpu_gsr;
60 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
61 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
62 #else
63 static TCGv cpu_wim;
64 #endif
65 /* Floating point registers */
66 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
68 #include "exec/gen-icount.h"
70 typedef struct DisasContext {
71 DisasContextBase base;
72 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
73 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
74 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
75 int mem_idx;
76 bool fpu_enabled;
77 bool address_mask_32bit;
78 #ifndef CONFIG_USER_ONLY
79 bool supervisor;
80 #ifdef TARGET_SPARC64
81 bool hypervisor;
82 #endif
83 #endif
85 uint32_t cc_op; /* current CC operation */
86 sparc_def_t *def;
87 TCGv_i32 t32[3];
88 TCGv ttl[5];
89 int n_t32;
90 int n_ttl;
91 #ifdef TARGET_SPARC64
92 int fprs_dirty;
93 int asi;
94 #endif
95 } DisasContext;
97 typedef struct {
98 TCGCond cond;
99 bool is_bool;
100 bool g1, g2;
101 TCGv c1, c2;
102 } DisasCompare;
104 // This function uses non-native bit order
105 #define GET_FIELD(X, FROM, TO) \
106 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
108 // This function uses the order in the manuals, i.e. bit 0 is 2^0
109 #define GET_FIELD_SP(X, FROM, TO) \
110 GET_FIELD(X, 31 - (TO), 31 - (FROM))
112 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
113 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
115 #ifdef TARGET_SPARC64
116 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
117 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
118 #else
119 #define DFPREG(r) (r & 0x1e)
120 #define QFPREG(r) (r & 0x1c)
121 #endif
123 #define UA2005_HTRAP_MASK 0xff
124 #define V8_TRAP_MASK 0x7f
126 static int sign_extend(int x, int len)
128 len = 32 - len;
129 return (x << len) >> len;
132 #define IS_IMM (insn & (1<<13))
134 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
136 TCGv_i32 t;
137 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
138 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
139 return t;
142 static inline TCGv get_temp_tl(DisasContext *dc)
144 TCGv t;
145 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
146 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
147 return t;
150 static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
152 #if defined(TARGET_SPARC64)
153 int bit = (rd < 32) ? 1 : 2;
154 /* If we know we've already set this bit within the TB,
155 we can avoid setting it again. */
156 if (!(dc->fprs_dirty & bit)) {
157 dc->fprs_dirty |= bit;
158 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
160 #endif
163 /* floating point registers moves */
164 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
166 TCGv_i32 ret = get_temp_i32(dc);
167 if (src & 1) {
168 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
169 } else {
170 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
172 return ret;
175 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
177 TCGv_i64 t = tcg_temp_new_i64();
179 tcg_gen_extu_i32_i64(t, v);
180 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
181 (dst & 1 ? 0 : 32), 32);
182 tcg_temp_free_i64(t);
183 gen_update_fprs_dirty(dc, dst);
186 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
188 return get_temp_i32(dc);
191 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
193 src = DFPREG(src);
194 return cpu_fpr[src / 2];
197 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
199 dst = DFPREG(dst);
200 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
201 gen_update_fprs_dirty(dc, dst);
204 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
206 return cpu_fpr[DFPREG(dst) / 2];
209 static void gen_op_load_fpr_QT0(unsigned int src)
211 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
212 offsetof(CPU_QuadU, ll.upper));
213 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
214 offsetof(CPU_QuadU, ll.lower));
217 static void gen_op_load_fpr_QT1(unsigned int src)
219 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
220 offsetof(CPU_QuadU, ll.upper));
221 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
222 offsetof(CPU_QuadU, ll.lower));
225 static void gen_op_store_QT0_fpr(unsigned int dst)
227 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
228 offsetof(CPU_QuadU, ll.upper));
229 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
230 offsetof(CPU_QuadU, ll.lower));
233 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
234 TCGv_i64 v1, TCGv_i64 v2)
236 dst = QFPREG(dst);
238 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
239 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
240 gen_update_fprs_dirty(dc, dst);
243 #ifdef TARGET_SPARC64
244 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
246 src = QFPREG(src);
247 return cpu_fpr[src / 2];
250 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
252 src = QFPREG(src);
253 return cpu_fpr[src / 2 + 1];
256 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
258 rd = QFPREG(rd);
259 rs = QFPREG(rs);
261 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
262 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
263 gen_update_fprs_dirty(dc, rd);
265 #endif
267 /* moves */
268 #ifdef CONFIG_USER_ONLY
269 #define supervisor(dc) 0
270 #ifdef TARGET_SPARC64
271 #define hypervisor(dc) 0
272 #endif
273 #else
274 #ifdef TARGET_SPARC64
275 #define hypervisor(dc) (dc->hypervisor)
276 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
277 #else
278 #define supervisor(dc) (dc->supervisor)
279 #endif
280 #endif
282 #ifdef TARGET_SPARC64
283 #ifndef TARGET_ABI32
284 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
285 #else
286 #define AM_CHECK(dc) (1)
287 #endif
288 #endif
290 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
292 #ifdef TARGET_SPARC64
293 if (AM_CHECK(dc))
294 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
295 #endif
298 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
300 if (reg > 0) {
301 assert(reg < 32);
302 return cpu_regs[reg];
303 } else {
304 TCGv t = get_temp_tl(dc);
305 tcg_gen_movi_tl(t, 0);
306 return t;
310 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
312 if (reg > 0) {
313 assert(reg < 32);
314 tcg_gen_mov_tl(cpu_regs[reg], v);
318 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
320 if (reg > 0) {
321 assert(reg < 32);
322 return cpu_regs[reg];
323 } else {
324 return get_temp_tl(dc);
328 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
330 return translator_use_goto_tb(&s->base, pc) &&
331 translator_use_goto_tb(&s->base, npc);
334 static void gen_goto_tb(DisasContext *s, int tb_num,
335 target_ulong pc, target_ulong npc)
337 if (use_goto_tb(s, pc, npc)) {
338 /* jump to same page: we can use a direct jump */
339 tcg_gen_goto_tb(tb_num);
340 tcg_gen_movi_tl(cpu_pc, pc);
341 tcg_gen_movi_tl(cpu_npc, npc);
342 tcg_gen_exit_tb(s->base.tb, tb_num);
343 } else {
344 /* jump to another page: currently not optimized */
345 tcg_gen_movi_tl(cpu_pc, pc);
346 tcg_gen_movi_tl(cpu_npc, npc);
347 tcg_gen_exit_tb(NULL, 0);
351 // XXX suboptimal
352 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
354 tcg_gen_extu_i32_tl(reg, src);
355 tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
358 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
360 tcg_gen_extu_i32_tl(reg, src);
361 tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
364 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
366 tcg_gen_extu_i32_tl(reg, src);
367 tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
370 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
372 tcg_gen_extu_i32_tl(reg, src);
373 tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
376 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
378 tcg_gen_mov_tl(cpu_cc_src, src1);
379 tcg_gen_mov_tl(cpu_cc_src2, src2);
380 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
381 tcg_gen_mov_tl(dst, cpu_cc_dst);
384 static TCGv_i32 gen_add32_carry32(void)
386 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
388 /* Carry is computed from a previous add: (dst < src) */
389 #if TARGET_LONG_BITS == 64
390 cc_src1_32 = tcg_temp_new_i32();
391 cc_src2_32 = tcg_temp_new_i32();
392 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
393 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
394 #else
395 cc_src1_32 = cpu_cc_dst;
396 cc_src2_32 = cpu_cc_src;
397 #endif
399 carry_32 = tcg_temp_new_i32();
400 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
402 #if TARGET_LONG_BITS == 64
403 tcg_temp_free_i32(cc_src1_32);
404 tcg_temp_free_i32(cc_src2_32);
405 #endif
407 return carry_32;
410 static TCGv_i32 gen_sub32_carry32(void)
412 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
414 /* Carry is computed from a previous borrow: (src1 < src2) */
415 #if TARGET_LONG_BITS == 64
416 cc_src1_32 = tcg_temp_new_i32();
417 cc_src2_32 = tcg_temp_new_i32();
418 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
419 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
420 #else
421 cc_src1_32 = cpu_cc_src;
422 cc_src2_32 = cpu_cc_src2;
423 #endif
425 carry_32 = tcg_temp_new_i32();
426 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
428 #if TARGET_LONG_BITS == 64
429 tcg_temp_free_i32(cc_src1_32);
430 tcg_temp_free_i32(cc_src2_32);
431 #endif
433 return carry_32;
436 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
437 TCGv src2, int update_cc)
439 TCGv_i32 carry_32;
440 TCGv carry;
442 switch (dc->cc_op) {
443 case CC_OP_DIV:
444 case CC_OP_LOGIC:
445 /* Carry is known to be zero. Fall back to plain ADD. */
446 if (update_cc) {
447 gen_op_add_cc(dst, src1, src2);
448 } else {
449 tcg_gen_add_tl(dst, src1, src2);
451 return;
453 case CC_OP_ADD:
454 case CC_OP_TADD:
455 case CC_OP_TADDTV:
456 if (TARGET_LONG_BITS == 32) {
457 /* We can re-use the host's hardware carry generation by using
458 an ADD2 opcode. We discard the low part of the output.
459 Ideally we'd combine this operation with the add that
460 generated the carry in the first place. */
461 carry = tcg_temp_new();
462 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
463 tcg_temp_free(carry);
464 goto add_done;
466 carry_32 = gen_add32_carry32();
467 break;
469 case CC_OP_SUB:
470 case CC_OP_TSUB:
471 case CC_OP_TSUBTV:
472 carry_32 = gen_sub32_carry32();
473 break;
475 default:
476 /* We need external help to produce the carry. */
477 carry_32 = tcg_temp_new_i32();
478 gen_helper_compute_C_icc(carry_32, cpu_env);
479 break;
482 #if TARGET_LONG_BITS == 64
483 carry = tcg_temp_new();
484 tcg_gen_extu_i32_i64(carry, carry_32);
485 #else
486 carry = carry_32;
487 #endif
489 tcg_gen_add_tl(dst, src1, src2);
490 tcg_gen_add_tl(dst, dst, carry);
492 tcg_temp_free_i32(carry_32);
493 #if TARGET_LONG_BITS == 64
494 tcg_temp_free(carry);
495 #endif
497 add_done:
498 if (update_cc) {
499 tcg_gen_mov_tl(cpu_cc_src, src1);
500 tcg_gen_mov_tl(cpu_cc_src2, src2);
501 tcg_gen_mov_tl(cpu_cc_dst, dst);
502 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
503 dc->cc_op = CC_OP_ADDX;
507 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
509 tcg_gen_mov_tl(cpu_cc_src, src1);
510 tcg_gen_mov_tl(cpu_cc_src2, src2);
511 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
512 tcg_gen_mov_tl(dst, cpu_cc_dst);
515 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
516 TCGv src2, int update_cc)
518 TCGv_i32 carry_32;
519 TCGv carry;
521 switch (dc->cc_op) {
522 case CC_OP_DIV:
523 case CC_OP_LOGIC:
524 /* Carry is known to be zero. Fall back to plain SUB. */
525 if (update_cc) {
526 gen_op_sub_cc(dst, src1, src2);
527 } else {
528 tcg_gen_sub_tl(dst, src1, src2);
530 return;
532 case CC_OP_ADD:
533 case CC_OP_TADD:
534 case CC_OP_TADDTV:
535 carry_32 = gen_add32_carry32();
536 break;
538 case CC_OP_SUB:
539 case CC_OP_TSUB:
540 case CC_OP_TSUBTV:
541 if (TARGET_LONG_BITS == 32) {
542 /* We can re-use the host's hardware carry generation by using
543 a SUB2 opcode. We discard the low part of the output.
544 Ideally we'd combine this operation with the add that
545 generated the carry in the first place. */
546 carry = tcg_temp_new();
547 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
548 tcg_temp_free(carry);
549 goto sub_done;
551 carry_32 = gen_sub32_carry32();
552 break;
554 default:
555 /* We need external help to produce the carry. */
556 carry_32 = tcg_temp_new_i32();
557 gen_helper_compute_C_icc(carry_32, cpu_env);
558 break;
561 #if TARGET_LONG_BITS == 64
562 carry = tcg_temp_new();
563 tcg_gen_extu_i32_i64(carry, carry_32);
564 #else
565 carry = carry_32;
566 #endif
568 tcg_gen_sub_tl(dst, src1, src2);
569 tcg_gen_sub_tl(dst, dst, carry);
571 tcg_temp_free_i32(carry_32);
572 #if TARGET_LONG_BITS == 64
573 tcg_temp_free(carry);
574 #endif
576 sub_done:
577 if (update_cc) {
578 tcg_gen_mov_tl(cpu_cc_src, src1);
579 tcg_gen_mov_tl(cpu_cc_src2, src2);
580 tcg_gen_mov_tl(cpu_cc_dst, dst);
581 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
582 dc->cc_op = CC_OP_SUBX;
586 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
588 TCGv r_temp, zero, t0;
590 r_temp = tcg_temp_new();
591 t0 = tcg_temp_new();
593 /* old op:
594 if (!(env->y & 1))
595 T1 = 0;
597 zero = tcg_const_tl(0);
598 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
599 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
600 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
601 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
602 zero, cpu_cc_src2);
603 tcg_temp_free(zero);
605 // b2 = T0 & 1;
606 // env->y = (b2 << 31) | (env->y >> 1);
607 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
608 tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
610 // b1 = N ^ V;
611 gen_mov_reg_N(t0, cpu_psr);
612 gen_mov_reg_V(r_temp, cpu_psr);
613 tcg_gen_xor_tl(t0, t0, r_temp);
614 tcg_temp_free(r_temp);
616 // T0 = (b1 << 31) | (T0 >> 1);
617 // src1 = T0;
618 tcg_gen_shli_tl(t0, t0, 31);
619 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
620 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
621 tcg_temp_free(t0);
623 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
625 tcg_gen_mov_tl(dst, cpu_cc_dst);
628 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
630 #if TARGET_LONG_BITS == 32
631 if (sign_ext) {
632 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
633 } else {
634 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
636 #else
637 TCGv t0 = tcg_temp_new_i64();
638 TCGv t1 = tcg_temp_new_i64();
640 if (sign_ext) {
641 tcg_gen_ext32s_i64(t0, src1);
642 tcg_gen_ext32s_i64(t1, src2);
643 } else {
644 tcg_gen_ext32u_i64(t0, src1);
645 tcg_gen_ext32u_i64(t1, src2);
648 tcg_gen_mul_i64(dst, t0, t1);
649 tcg_temp_free(t0);
650 tcg_temp_free(t1);
652 tcg_gen_shri_i64(cpu_y, dst, 32);
653 #endif
656 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
658 /* zero-extend truncated operands before multiplication */
659 gen_op_multiply(dst, src1, src2, 0);
662 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
664 /* sign-extend truncated operands before multiplication */
665 gen_op_multiply(dst, src1, src2, 1);
668 // 1
669 static inline void gen_op_eval_ba(TCGv dst)
671 tcg_gen_movi_tl(dst, 1);
674 // Z
675 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
677 gen_mov_reg_Z(dst, src);
680 // Z | (N ^ V)
681 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
683 TCGv t0 = tcg_temp_new();
684 gen_mov_reg_N(t0, src);
685 gen_mov_reg_V(dst, src);
686 tcg_gen_xor_tl(dst, dst, t0);
687 gen_mov_reg_Z(t0, src);
688 tcg_gen_or_tl(dst, dst, t0);
689 tcg_temp_free(t0);
692 // N ^ V
693 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
695 TCGv t0 = tcg_temp_new();
696 gen_mov_reg_V(t0, src);
697 gen_mov_reg_N(dst, src);
698 tcg_gen_xor_tl(dst, dst, t0);
699 tcg_temp_free(t0);
702 // C | Z
703 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
705 TCGv t0 = tcg_temp_new();
706 gen_mov_reg_Z(t0, src);
707 gen_mov_reg_C(dst, src);
708 tcg_gen_or_tl(dst, dst, t0);
709 tcg_temp_free(t0);
712 // C
713 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
715 gen_mov_reg_C(dst, src);
718 // V
719 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
721 gen_mov_reg_V(dst, src);
724 // 0
725 static inline void gen_op_eval_bn(TCGv dst)
727 tcg_gen_movi_tl(dst, 0);
730 // N
731 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
733 gen_mov_reg_N(dst, src);
736 // !Z
737 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
739 gen_mov_reg_Z(dst, src);
740 tcg_gen_xori_tl(dst, dst, 0x1);
743 // !(Z | (N ^ V))
744 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
746 gen_op_eval_ble(dst, src);
747 tcg_gen_xori_tl(dst, dst, 0x1);
750 // !(N ^ V)
751 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
753 gen_op_eval_bl(dst, src);
754 tcg_gen_xori_tl(dst, dst, 0x1);
757 // !(C | Z)
758 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
760 gen_op_eval_bleu(dst, src);
761 tcg_gen_xori_tl(dst, dst, 0x1);
764 // !C
765 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
767 gen_mov_reg_C(dst, src);
768 tcg_gen_xori_tl(dst, dst, 0x1);
771 // !N
772 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
774 gen_mov_reg_N(dst, src);
775 tcg_gen_xori_tl(dst, dst, 0x1);
778 // !V
779 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
781 gen_mov_reg_V(dst, src);
782 tcg_gen_xori_tl(dst, dst, 0x1);
786 FPSR bit field FCC1 | FCC0:
790 3 unordered
792 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
793 unsigned int fcc_offset)
795 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
796 tcg_gen_andi_tl(reg, reg, 0x1);
799 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
800 unsigned int fcc_offset)
802 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
803 tcg_gen_andi_tl(reg, reg, 0x1);
806 // !0: FCC0 | FCC1
807 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
808 unsigned int fcc_offset)
810 TCGv t0 = tcg_temp_new();
811 gen_mov_reg_FCC0(dst, src, fcc_offset);
812 gen_mov_reg_FCC1(t0, src, fcc_offset);
813 tcg_gen_or_tl(dst, dst, t0);
814 tcg_temp_free(t0);
817 // 1 or 2: FCC0 ^ FCC1
818 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
819 unsigned int fcc_offset)
821 TCGv t0 = tcg_temp_new();
822 gen_mov_reg_FCC0(dst, src, fcc_offset);
823 gen_mov_reg_FCC1(t0, src, fcc_offset);
824 tcg_gen_xor_tl(dst, dst, t0);
825 tcg_temp_free(t0);
828 // 1 or 3: FCC0
829 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
830 unsigned int fcc_offset)
832 gen_mov_reg_FCC0(dst, src, fcc_offset);
835 // 1: FCC0 & !FCC1
836 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
837 unsigned int fcc_offset)
839 TCGv t0 = tcg_temp_new();
840 gen_mov_reg_FCC0(dst, src, fcc_offset);
841 gen_mov_reg_FCC1(t0, src, fcc_offset);
842 tcg_gen_andc_tl(dst, dst, t0);
843 tcg_temp_free(t0);
846 // 2 or 3: FCC1
847 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
848 unsigned int fcc_offset)
850 gen_mov_reg_FCC1(dst, src, fcc_offset);
853 // 2: !FCC0 & FCC1
854 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
855 unsigned int fcc_offset)
857 TCGv t0 = tcg_temp_new();
858 gen_mov_reg_FCC0(dst, src, fcc_offset);
859 gen_mov_reg_FCC1(t0, src, fcc_offset);
860 tcg_gen_andc_tl(dst, t0, dst);
861 tcg_temp_free(t0);
864 // 3: FCC0 & FCC1
865 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
866 unsigned int fcc_offset)
868 TCGv t0 = tcg_temp_new();
869 gen_mov_reg_FCC0(dst, src, fcc_offset);
870 gen_mov_reg_FCC1(t0, src, fcc_offset);
871 tcg_gen_and_tl(dst, dst, t0);
872 tcg_temp_free(t0);
875 // 0: !(FCC0 | FCC1)
876 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
877 unsigned int fcc_offset)
879 TCGv t0 = tcg_temp_new();
880 gen_mov_reg_FCC0(dst, src, fcc_offset);
881 gen_mov_reg_FCC1(t0, src, fcc_offset);
882 tcg_gen_or_tl(dst, dst, t0);
883 tcg_gen_xori_tl(dst, dst, 0x1);
884 tcg_temp_free(t0);
887 // 0 or 3: !(FCC0 ^ FCC1)
888 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
889 unsigned int fcc_offset)
891 TCGv t0 = tcg_temp_new();
892 gen_mov_reg_FCC0(dst, src, fcc_offset);
893 gen_mov_reg_FCC1(t0, src, fcc_offset);
894 tcg_gen_xor_tl(dst, dst, t0);
895 tcg_gen_xori_tl(dst, dst, 0x1);
896 tcg_temp_free(t0);
899 // 0 or 2: !FCC0
900 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
901 unsigned int fcc_offset)
903 gen_mov_reg_FCC0(dst, src, fcc_offset);
904 tcg_gen_xori_tl(dst, dst, 0x1);
907 // !1: !(FCC0 & !FCC1)
908 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
909 unsigned int fcc_offset)
911 TCGv t0 = tcg_temp_new();
912 gen_mov_reg_FCC0(dst, src, fcc_offset);
913 gen_mov_reg_FCC1(t0, src, fcc_offset);
914 tcg_gen_andc_tl(dst, dst, t0);
915 tcg_gen_xori_tl(dst, dst, 0x1);
916 tcg_temp_free(t0);
919 // 0 or 1: !FCC1
920 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
921 unsigned int fcc_offset)
923 gen_mov_reg_FCC1(dst, src, fcc_offset);
924 tcg_gen_xori_tl(dst, dst, 0x1);
927 // !2: !(!FCC0 & FCC1)
928 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
929 unsigned int fcc_offset)
931 TCGv t0 = tcg_temp_new();
932 gen_mov_reg_FCC0(dst, src, fcc_offset);
933 gen_mov_reg_FCC1(t0, src, fcc_offset);
934 tcg_gen_andc_tl(dst, t0, dst);
935 tcg_gen_xori_tl(dst, dst, 0x1);
936 tcg_temp_free(t0);
939 // !3: !(FCC0 & FCC1)
940 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
941 unsigned int fcc_offset)
943 TCGv t0 = tcg_temp_new();
944 gen_mov_reg_FCC0(dst, src, fcc_offset);
945 gen_mov_reg_FCC1(t0, src, fcc_offset);
946 tcg_gen_and_tl(dst, dst, t0);
947 tcg_gen_xori_tl(dst, dst, 0x1);
948 tcg_temp_free(t0);
951 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
952 target_ulong pc2, TCGv r_cond)
954 TCGLabel *l1 = gen_new_label();
956 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
958 gen_goto_tb(dc, 0, pc1, pc1 + 4);
960 gen_set_label(l1);
961 gen_goto_tb(dc, 1, pc2, pc2 + 4);
964 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
966 TCGLabel *l1 = gen_new_label();
967 target_ulong npc = dc->npc;
969 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
971 gen_goto_tb(dc, 0, npc, pc1);
973 gen_set_label(l1);
974 gen_goto_tb(dc, 1, npc + 4, npc + 8);
976 dc->base.is_jmp = DISAS_NORETURN;
979 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
981 target_ulong npc = dc->npc;
983 if (likely(npc != DYNAMIC_PC)) {
984 dc->pc = npc;
985 dc->jump_pc[0] = pc1;
986 dc->jump_pc[1] = npc + 4;
987 dc->npc = JUMP_PC;
988 } else {
989 TCGv t, z;
991 tcg_gen_mov_tl(cpu_pc, cpu_npc);
993 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
994 t = tcg_const_tl(pc1);
995 z = tcg_const_tl(0);
996 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
997 tcg_temp_free(t);
998 tcg_temp_free(z);
1000 dc->pc = DYNAMIC_PC;
1004 static inline void gen_generic_branch(DisasContext *dc)
1006 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1007 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1008 TCGv zero = tcg_const_tl(0);
1010 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1012 tcg_temp_free(npc0);
1013 tcg_temp_free(npc1);
1014 tcg_temp_free(zero);
1017 /* call this function before using the condition register as it may
1018 have been set for a jump */
1019 static inline void flush_cond(DisasContext *dc)
1021 if (dc->npc == JUMP_PC) {
1022 gen_generic_branch(dc);
1023 dc->npc = DYNAMIC_PC;
1027 static inline void save_npc(DisasContext *dc)
1029 if (dc->npc == JUMP_PC) {
1030 gen_generic_branch(dc);
1031 dc->npc = DYNAMIC_PC;
1032 } else if (dc->npc != DYNAMIC_PC) {
1033 tcg_gen_movi_tl(cpu_npc, dc->npc);
1037 static inline void update_psr(DisasContext *dc)
1039 if (dc->cc_op != CC_OP_FLAGS) {
1040 dc->cc_op = CC_OP_FLAGS;
1041 gen_helper_compute_psr(cpu_env);
1045 static inline void save_state(DisasContext *dc)
1047 tcg_gen_movi_tl(cpu_pc, dc->pc);
1048 save_npc(dc);
1051 static void gen_exception(DisasContext *dc, int which)
1053 TCGv_i32 t;
1055 save_state(dc);
1056 t = tcg_const_i32(which);
1057 gen_helper_raise_exception(cpu_env, t);
1058 tcg_temp_free_i32(t);
1059 dc->base.is_jmp = DISAS_NORETURN;
1062 static void gen_check_align(TCGv addr, int mask)
1064 TCGv_i32 r_mask = tcg_const_i32(mask);
1065 gen_helper_check_align(cpu_env, addr, r_mask);
1066 tcg_temp_free_i32(r_mask);
1069 static inline void gen_mov_pc_npc(DisasContext *dc)
1071 if (dc->npc == JUMP_PC) {
1072 gen_generic_branch(dc);
1073 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1074 dc->pc = DYNAMIC_PC;
1075 } else if (dc->npc == DYNAMIC_PC) {
1076 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1077 dc->pc = DYNAMIC_PC;
1078 } else {
1079 dc->pc = dc->npc;
1083 static inline void gen_op_next_insn(void)
1085 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1086 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1089 static void free_compare(DisasCompare *cmp)
1091 if (!cmp->g1) {
1092 tcg_temp_free(cmp->c1);
1094 if (!cmp->g2) {
1095 tcg_temp_free(cmp->c2);
1099 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1100 DisasContext *dc)
1102 static int subcc_cond[16] = {
1103 TCG_COND_NEVER,
1104 TCG_COND_EQ,
1105 TCG_COND_LE,
1106 TCG_COND_LT,
1107 TCG_COND_LEU,
1108 TCG_COND_LTU,
1109 -1, /* neg */
1110 -1, /* overflow */
1111 TCG_COND_ALWAYS,
1112 TCG_COND_NE,
1113 TCG_COND_GT,
1114 TCG_COND_GE,
1115 TCG_COND_GTU,
1116 TCG_COND_GEU,
1117 -1, /* pos */
1118 -1, /* no overflow */
1121 static int logic_cond[16] = {
1122 TCG_COND_NEVER,
1123 TCG_COND_EQ, /* eq: Z */
1124 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1125 TCG_COND_LT, /* lt: N ^ V -> N */
1126 TCG_COND_EQ, /* leu: C | Z -> Z */
1127 TCG_COND_NEVER, /* ltu: C -> 0 */
1128 TCG_COND_LT, /* neg: N */
1129 TCG_COND_NEVER, /* vs: V -> 0 */
1130 TCG_COND_ALWAYS,
1131 TCG_COND_NE, /* ne: !Z */
1132 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1133 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1134 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1135 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1136 TCG_COND_GE, /* pos: !N */
1137 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1140 TCGv_i32 r_src;
1141 TCGv r_dst;
1143 #ifdef TARGET_SPARC64
1144 if (xcc) {
1145 r_src = cpu_xcc;
1146 } else {
1147 r_src = cpu_psr;
1149 #else
1150 r_src = cpu_psr;
1151 #endif
1153 switch (dc->cc_op) {
1154 case CC_OP_LOGIC:
1155 cmp->cond = logic_cond[cond];
1156 do_compare_dst_0:
1157 cmp->is_bool = false;
1158 cmp->g2 = false;
1159 cmp->c2 = tcg_const_tl(0);
1160 #ifdef TARGET_SPARC64
1161 if (!xcc) {
1162 cmp->g1 = false;
1163 cmp->c1 = tcg_temp_new();
1164 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1165 break;
1167 #endif
1168 cmp->g1 = true;
1169 cmp->c1 = cpu_cc_dst;
1170 break;
1172 case CC_OP_SUB:
1173 switch (cond) {
1174 case 6: /* neg */
1175 case 14: /* pos */
1176 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1177 goto do_compare_dst_0;
1179 case 7: /* overflow */
1180 case 15: /* !overflow */
1181 goto do_dynamic;
1183 default:
1184 cmp->cond = subcc_cond[cond];
1185 cmp->is_bool = false;
1186 #ifdef TARGET_SPARC64
1187 if (!xcc) {
1188 /* Note that sign-extension works for unsigned compares as
1189 long as both operands are sign-extended. */
1190 cmp->g1 = cmp->g2 = false;
1191 cmp->c1 = tcg_temp_new();
1192 cmp->c2 = tcg_temp_new();
1193 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1194 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1195 break;
1197 #endif
1198 cmp->g1 = cmp->g2 = true;
1199 cmp->c1 = cpu_cc_src;
1200 cmp->c2 = cpu_cc_src2;
1201 break;
1203 break;
1205 default:
1206 do_dynamic:
1207 gen_helper_compute_psr(cpu_env);
1208 dc->cc_op = CC_OP_FLAGS;
1209 /* FALLTHRU */
1211 case CC_OP_FLAGS:
1212 /* We're going to generate a boolean result. */
1213 cmp->cond = TCG_COND_NE;
1214 cmp->is_bool = true;
1215 cmp->g1 = cmp->g2 = false;
1216 cmp->c1 = r_dst = tcg_temp_new();
1217 cmp->c2 = tcg_const_tl(0);
1219 switch (cond) {
1220 case 0x0:
1221 gen_op_eval_bn(r_dst);
1222 break;
1223 case 0x1:
1224 gen_op_eval_be(r_dst, r_src);
1225 break;
1226 case 0x2:
1227 gen_op_eval_ble(r_dst, r_src);
1228 break;
1229 case 0x3:
1230 gen_op_eval_bl(r_dst, r_src);
1231 break;
1232 case 0x4:
1233 gen_op_eval_bleu(r_dst, r_src);
1234 break;
1235 case 0x5:
1236 gen_op_eval_bcs(r_dst, r_src);
1237 break;
1238 case 0x6:
1239 gen_op_eval_bneg(r_dst, r_src);
1240 break;
1241 case 0x7:
1242 gen_op_eval_bvs(r_dst, r_src);
1243 break;
1244 case 0x8:
1245 gen_op_eval_ba(r_dst);
1246 break;
1247 case 0x9:
1248 gen_op_eval_bne(r_dst, r_src);
1249 break;
1250 case 0xa:
1251 gen_op_eval_bg(r_dst, r_src);
1252 break;
1253 case 0xb:
1254 gen_op_eval_bge(r_dst, r_src);
1255 break;
1256 case 0xc:
1257 gen_op_eval_bgu(r_dst, r_src);
1258 break;
1259 case 0xd:
1260 gen_op_eval_bcc(r_dst, r_src);
1261 break;
1262 case 0xe:
1263 gen_op_eval_bpos(r_dst, r_src);
1264 break;
1265 case 0xf:
1266 gen_op_eval_bvc(r_dst, r_src);
1267 break;
1269 break;
1273 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1275 unsigned int offset;
1276 TCGv r_dst;
1278 /* For now we still generate a straight boolean result. */
1279 cmp->cond = TCG_COND_NE;
1280 cmp->is_bool = true;
1281 cmp->g1 = cmp->g2 = false;
1282 cmp->c1 = r_dst = tcg_temp_new();
1283 cmp->c2 = tcg_const_tl(0);
1285 switch (cc) {
1286 default:
1287 case 0x0:
1288 offset = 0;
1289 break;
1290 case 0x1:
1291 offset = 32 - 10;
1292 break;
1293 case 0x2:
1294 offset = 34 - 10;
1295 break;
1296 case 0x3:
1297 offset = 36 - 10;
1298 break;
1301 switch (cond) {
1302 case 0x0:
1303 gen_op_eval_bn(r_dst);
1304 break;
1305 case 0x1:
1306 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1307 break;
1308 case 0x2:
1309 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1310 break;
1311 case 0x3:
1312 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1313 break;
1314 case 0x4:
1315 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1316 break;
1317 case 0x5:
1318 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1319 break;
1320 case 0x6:
1321 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1322 break;
1323 case 0x7:
1324 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1325 break;
1326 case 0x8:
1327 gen_op_eval_ba(r_dst);
1328 break;
1329 case 0x9:
1330 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1331 break;
1332 case 0xa:
1333 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1334 break;
1335 case 0xb:
1336 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1337 break;
1338 case 0xc:
1339 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1340 break;
1341 case 0xd:
1342 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1343 break;
1344 case 0xe:
1345 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1346 break;
1347 case 0xf:
1348 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1349 break;
1353 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1354 DisasContext *dc)
1356 DisasCompare cmp;
1357 gen_compare(&cmp, cc, cond, dc);
1359 /* The interface is to return a boolean in r_dst. */
1360 if (cmp.is_bool) {
1361 tcg_gen_mov_tl(r_dst, cmp.c1);
1362 } else {
1363 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1366 free_compare(&cmp);
1369 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1371 DisasCompare cmp;
1372 gen_fcompare(&cmp, cc, cond);
1374 /* The interface is to return a boolean in r_dst. */
1375 if (cmp.is_bool) {
1376 tcg_gen_mov_tl(r_dst, cmp.c1);
1377 } else {
1378 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1381 free_compare(&cmp);
1384 #ifdef TARGET_SPARC64
1385 // Inverted logic
1386 static const int gen_tcg_cond_reg[8] = {
1388 TCG_COND_NE,
1389 TCG_COND_GT,
1390 TCG_COND_GE,
1392 TCG_COND_EQ,
1393 TCG_COND_LE,
1394 TCG_COND_LT,
1397 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1399 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1400 cmp->is_bool = false;
1401 cmp->g1 = true;
1402 cmp->g2 = false;
1403 cmp->c1 = r_src;
1404 cmp->c2 = tcg_const_tl(0);
1407 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1409 DisasCompare cmp;
1410 gen_compare_reg(&cmp, cond, r_src);
1412 /* The interface is to return a boolean in r_dst. */
1413 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1415 free_compare(&cmp);
1417 #endif
1419 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1421 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1422 target_ulong target = dc->pc + offset;
1424 #ifdef TARGET_SPARC64
1425 if (unlikely(AM_CHECK(dc))) {
1426 target &= 0xffffffffULL;
1428 #endif
1429 if (cond == 0x0) {
1430 /* unconditional not taken */
1431 if (a) {
1432 dc->pc = dc->npc + 4;
1433 dc->npc = dc->pc + 4;
1434 } else {
1435 dc->pc = dc->npc;
1436 dc->npc = dc->pc + 4;
1438 } else if (cond == 0x8) {
1439 /* unconditional taken */
1440 if (a) {
1441 dc->pc = target;
1442 dc->npc = dc->pc + 4;
1443 } else {
1444 dc->pc = dc->npc;
1445 dc->npc = target;
1446 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1448 } else {
1449 flush_cond(dc);
1450 gen_cond(cpu_cond, cc, cond, dc);
1451 if (a) {
1452 gen_branch_a(dc, target);
1453 } else {
1454 gen_branch_n(dc, target);
1459 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1461 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1462 target_ulong target = dc->pc + offset;
1464 #ifdef TARGET_SPARC64
1465 if (unlikely(AM_CHECK(dc))) {
1466 target &= 0xffffffffULL;
1468 #endif
1469 if (cond == 0x0) {
1470 /* unconditional not taken */
1471 if (a) {
1472 dc->pc = dc->npc + 4;
1473 dc->npc = dc->pc + 4;
1474 } else {
1475 dc->pc = dc->npc;
1476 dc->npc = dc->pc + 4;
1478 } else if (cond == 0x8) {
1479 /* unconditional taken */
1480 if (a) {
1481 dc->pc = target;
1482 dc->npc = dc->pc + 4;
1483 } else {
1484 dc->pc = dc->npc;
1485 dc->npc = target;
1486 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1488 } else {
1489 flush_cond(dc);
1490 gen_fcond(cpu_cond, cc, cond);
1491 if (a) {
1492 gen_branch_a(dc, target);
1493 } else {
1494 gen_branch_n(dc, target);
1499 #ifdef TARGET_SPARC64
1500 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1501 TCGv r_reg)
1503 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1504 target_ulong target = dc->pc + offset;
1506 if (unlikely(AM_CHECK(dc))) {
1507 target &= 0xffffffffULL;
1509 flush_cond(dc);
1510 gen_cond_reg(cpu_cond, cond, r_reg);
1511 if (a) {
1512 gen_branch_a(dc, target);
1513 } else {
1514 gen_branch_n(dc, target);
1518 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1520 switch (fccno) {
1521 case 0:
1522 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1523 break;
1524 case 1:
1525 gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1526 break;
1527 case 2:
1528 gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1529 break;
1530 case 3:
1531 gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1532 break;
1536 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1538 switch (fccno) {
1539 case 0:
1540 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1541 break;
1542 case 1:
1543 gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1544 break;
1545 case 2:
1546 gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1547 break;
1548 case 3:
1549 gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1550 break;
1554 static inline void gen_op_fcmpq(int fccno)
1556 switch (fccno) {
1557 case 0:
1558 gen_helper_fcmpq(cpu_fsr, cpu_env);
1559 break;
1560 case 1:
1561 gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1562 break;
1563 case 2:
1564 gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1565 break;
1566 case 3:
1567 gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1568 break;
1572 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1574 switch (fccno) {
1575 case 0:
1576 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1577 break;
1578 case 1:
1579 gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1580 break;
1581 case 2:
1582 gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1583 break;
1584 case 3:
1585 gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1586 break;
1590 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1592 switch (fccno) {
1593 case 0:
1594 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1595 break;
1596 case 1:
1597 gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1598 break;
1599 case 2:
1600 gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1601 break;
1602 case 3:
1603 gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1604 break;
1608 static inline void gen_op_fcmpeq(int fccno)
1610 switch (fccno) {
1611 case 0:
1612 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1613 break;
1614 case 1:
1615 gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1616 break;
1617 case 2:
1618 gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1619 break;
1620 case 3:
1621 gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1622 break;
1626 #else
1628 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1630 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1633 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1635 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1638 static inline void gen_op_fcmpq(int fccno)
1640 gen_helper_fcmpq(cpu_fsr, cpu_env);
1643 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1645 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1648 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1650 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1653 static inline void gen_op_fcmpeq(int fccno)
1655 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1657 #endif
1659 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1661 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1662 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1663 gen_exception(dc, TT_FP_EXCP);
1666 static int gen_trap_ifnofpu(DisasContext *dc)
1668 #if !defined(CONFIG_USER_ONLY)
1669 if (!dc->fpu_enabled) {
1670 gen_exception(dc, TT_NFPU_INSN);
1671 return 1;
1673 #endif
1674 return 0;
1677 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1679 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1682 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1683 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1685 TCGv_i32 dst, src;
1687 src = gen_load_fpr_F(dc, rs);
1688 dst = gen_dest_fpr_F(dc);
1690 gen(dst, cpu_env, src);
1691 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1693 gen_store_fpr_F(dc, rd, dst);
1696 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1697 void (*gen)(TCGv_i32, TCGv_i32))
1699 TCGv_i32 dst, src;
1701 src = gen_load_fpr_F(dc, rs);
1702 dst = gen_dest_fpr_F(dc);
1704 gen(dst, src);
1706 gen_store_fpr_F(dc, rd, dst);
1709 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1710 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1712 TCGv_i32 dst, src1, src2;
1714 src1 = gen_load_fpr_F(dc, rs1);
1715 src2 = gen_load_fpr_F(dc, rs2);
1716 dst = gen_dest_fpr_F(dc);
1718 gen(dst, cpu_env, src1, src2);
1719 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1721 gen_store_fpr_F(dc, rd, dst);
1724 #ifdef TARGET_SPARC64
1725 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1726 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1728 TCGv_i32 dst, src1, src2;
1730 src1 = gen_load_fpr_F(dc, rs1);
1731 src2 = gen_load_fpr_F(dc, rs2);
1732 dst = gen_dest_fpr_F(dc);
1734 gen(dst, src1, src2);
1736 gen_store_fpr_F(dc, rd, dst);
1738 #endif
1740 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1741 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1743 TCGv_i64 dst, src;
1745 src = gen_load_fpr_D(dc, rs);
1746 dst = gen_dest_fpr_D(dc, rd);
1748 gen(dst, cpu_env, src);
1749 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1751 gen_store_fpr_D(dc, rd, dst);
1754 #ifdef TARGET_SPARC64
1755 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1756 void (*gen)(TCGv_i64, TCGv_i64))
1758 TCGv_i64 dst, src;
1760 src = gen_load_fpr_D(dc, rs);
1761 dst = gen_dest_fpr_D(dc, rd);
1763 gen(dst, src);
1765 gen_store_fpr_D(dc, rd, dst);
1767 #endif
1769 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1770 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1772 TCGv_i64 dst, src1, src2;
1774 src1 = gen_load_fpr_D(dc, rs1);
1775 src2 = gen_load_fpr_D(dc, rs2);
1776 dst = gen_dest_fpr_D(dc, rd);
1778 gen(dst, cpu_env, src1, src2);
1779 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1781 gen_store_fpr_D(dc, rd, dst);
1784 #ifdef TARGET_SPARC64
1785 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1786 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1788 TCGv_i64 dst, src1, src2;
1790 src1 = gen_load_fpr_D(dc, rs1);
1791 src2 = gen_load_fpr_D(dc, rs2);
1792 dst = gen_dest_fpr_D(dc, rd);
1794 gen(dst, src1, src2);
1796 gen_store_fpr_D(dc, rd, dst);
1799 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1800 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1802 TCGv_i64 dst, src1, src2;
1804 src1 = gen_load_fpr_D(dc, rs1);
1805 src2 = gen_load_fpr_D(dc, rs2);
1806 dst = gen_dest_fpr_D(dc, rd);
1808 gen(dst, cpu_gsr, src1, src2);
1810 gen_store_fpr_D(dc, rd, dst);
1813 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1814 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1816 TCGv_i64 dst, src0, src1, src2;
1818 src1 = gen_load_fpr_D(dc, rs1);
1819 src2 = gen_load_fpr_D(dc, rs2);
1820 src0 = gen_load_fpr_D(dc, rd);
1821 dst = gen_dest_fpr_D(dc, rd);
1823 gen(dst, src0, src1, src2);
1825 gen_store_fpr_D(dc, rd, dst);
1827 #endif
1829 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1830 void (*gen)(TCGv_ptr))
1832 gen_op_load_fpr_QT1(QFPREG(rs));
1834 gen(cpu_env);
1835 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1837 gen_op_store_QT0_fpr(QFPREG(rd));
1838 gen_update_fprs_dirty(dc, QFPREG(rd));
1841 #ifdef TARGET_SPARC64
1842 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1843 void (*gen)(TCGv_ptr))
1845 gen_op_load_fpr_QT1(QFPREG(rs));
1847 gen(cpu_env);
1849 gen_op_store_QT0_fpr(QFPREG(rd));
1850 gen_update_fprs_dirty(dc, QFPREG(rd));
1852 #endif
1854 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1855 void (*gen)(TCGv_ptr))
1857 gen_op_load_fpr_QT0(QFPREG(rs1));
1858 gen_op_load_fpr_QT1(QFPREG(rs2));
1860 gen(cpu_env);
1861 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1863 gen_op_store_QT0_fpr(QFPREG(rd));
1864 gen_update_fprs_dirty(dc, QFPREG(rd));
1867 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1868 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1870 TCGv_i64 dst;
1871 TCGv_i32 src1, src2;
1873 src1 = gen_load_fpr_F(dc, rs1);
1874 src2 = gen_load_fpr_F(dc, rs2);
1875 dst = gen_dest_fpr_D(dc, rd);
1877 gen(dst, cpu_env, src1, src2);
1878 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1880 gen_store_fpr_D(dc, rd, dst);
1883 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1884 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1886 TCGv_i64 src1, src2;
1888 src1 = gen_load_fpr_D(dc, rs1);
1889 src2 = gen_load_fpr_D(dc, rs2);
1891 gen(cpu_env, src1, src2);
1892 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1894 gen_op_store_QT0_fpr(QFPREG(rd));
1895 gen_update_fprs_dirty(dc, QFPREG(rd));
1898 #ifdef TARGET_SPARC64
1899 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1900 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1902 TCGv_i64 dst;
1903 TCGv_i32 src;
1905 src = gen_load_fpr_F(dc, rs);
1906 dst = gen_dest_fpr_D(dc, rd);
1908 gen(dst, cpu_env, src);
1909 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1911 gen_store_fpr_D(dc, rd, dst);
1913 #endif
1915 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1916 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1918 TCGv_i64 dst;
1919 TCGv_i32 src;
1921 src = gen_load_fpr_F(dc, rs);
1922 dst = gen_dest_fpr_D(dc, rd);
1924 gen(dst, cpu_env, src);
1926 gen_store_fpr_D(dc, rd, dst);
1929 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1930 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1932 TCGv_i32 dst;
1933 TCGv_i64 src;
1935 src = gen_load_fpr_D(dc, rs);
1936 dst = gen_dest_fpr_F(dc);
1938 gen(dst, cpu_env, src);
1939 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1941 gen_store_fpr_F(dc, rd, dst);
1944 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1945 void (*gen)(TCGv_i32, TCGv_ptr))
1947 TCGv_i32 dst;
1949 gen_op_load_fpr_QT1(QFPREG(rs));
1950 dst = gen_dest_fpr_F(dc);
1952 gen(dst, cpu_env);
1953 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1955 gen_store_fpr_F(dc, rd, dst);
1958 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1959 void (*gen)(TCGv_i64, TCGv_ptr))
1961 TCGv_i64 dst;
1963 gen_op_load_fpr_QT1(QFPREG(rs));
1964 dst = gen_dest_fpr_D(dc, rd);
1966 gen(dst, cpu_env);
1967 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1969 gen_store_fpr_D(dc, rd, dst);
1972 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1973 void (*gen)(TCGv_ptr, TCGv_i32))
1975 TCGv_i32 src;
1977 src = gen_load_fpr_F(dc, rs);
1979 gen(cpu_env, src);
1981 gen_op_store_QT0_fpr(QFPREG(rd));
1982 gen_update_fprs_dirty(dc, QFPREG(rd));
1985 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1986 void (*gen)(TCGv_ptr, TCGv_i64))
1988 TCGv_i64 src;
1990 src = gen_load_fpr_D(dc, rs);
1992 gen(cpu_env, src);
1994 gen_op_store_QT0_fpr(QFPREG(rd));
1995 gen_update_fprs_dirty(dc, QFPREG(rd));
1998 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
1999 TCGv addr, int mmu_idx, MemOp memop)
2001 gen_address_mask(dc, addr);
2002 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
2005 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
2007 TCGv m1 = tcg_const_tl(0xff);
2008 gen_address_mask(dc, addr);
2009 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
2010 tcg_temp_free(m1);
2013 /* asi moves */
2014 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2015 typedef enum {
2016 GET_ASI_HELPER,
2017 GET_ASI_EXCP,
2018 GET_ASI_DIRECT,
2019 GET_ASI_DTWINX,
2020 GET_ASI_BLOCK,
2021 GET_ASI_SHORT,
2022 GET_ASI_BCOPY,
2023 GET_ASI_BFILL,
2024 } ASIType;
2026 typedef struct {
2027 ASIType type;
2028 int asi;
2029 int mem_idx;
2030 MemOp memop;
2031 } DisasASI;
2033 static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
2035 int asi = GET_FIELD(insn, 19, 26);
2036 ASIType type = GET_ASI_HELPER;
2037 int mem_idx = dc->mem_idx;
2039 #ifndef TARGET_SPARC64
2040 /* Before v9, all asis are immediate and privileged. */
2041 if (IS_IMM) {
2042 gen_exception(dc, TT_ILL_INSN);
2043 type = GET_ASI_EXCP;
2044 } else if (supervisor(dc)
2045 /* Note that LEON accepts ASI_USERDATA in user mode, for
2046 use with CASA. Also note that previous versions of
2047 QEMU allowed (and old versions of gcc emitted) ASI_P
2048 for LEON, which is incorrect. */
2049 || (asi == ASI_USERDATA
2050 && (dc->def->features & CPU_FEATURE_CASA))) {
2051 switch (asi) {
2052 case ASI_USERDATA: /* User data access */
2053 mem_idx = MMU_USER_IDX;
2054 type = GET_ASI_DIRECT;
2055 break;
2056 case ASI_KERNELDATA: /* Supervisor data access */
2057 mem_idx = MMU_KERNEL_IDX;
2058 type = GET_ASI_DIRECT;
2059 break;
2060 case ASI_M_BYPASS: /* MMU passthrough */
2061 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
2062 mem_idx = MMU_PHYS_IDX;
2063 type = GET_ASI_DIRECT;
2064 break;
2065 case ASI_M_BCOPY: /* Block copy, sta access */
2066 mem_idx = MMU_KERNEL_IDX;
2067 type = GET_ASI_BCOPY;
2068 break;
2069 case ASI_M_BFILL: /* Block fill, stda access */
2070 mem_idx = MMU_KERNEL_IDX;
2071 type = GET_ASI_BFILL;
2072 break;
2075 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
2076 * permissions check in get_physical_address(..).
2078 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
2079 } else {
2080 gen_exception(dc, TT_PRIV_INSN);
2081 type = GET_ASI_EXCP;
2083 #else
2084 if (IS_IMM) {
2085 asi = dc->asi;
2087 /* With v9, all asis below 0x80 are privileged. */
2088 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2089 down that bit into DisasContext. For the moment that's ok,
2090 since the direct implementations below doesn't have any ASIs
2091 in the restricted [0x30, 0x7f] range, and the check will be
2092 done properly in the helper. */
2093 if (!supervisor(dc) && asi < 0x80) {
2094 gen_exception(dc, TT_PRIV_ACT);
2095 type = GET_ASI_EXCP;
2096 } else {
2097 switch (asi) {
2098 case ASI_REAL: /* Bypass */
2099 case ASI_REAL_IO: /* Bypass, non-cacheable */
2100 case ASI_REAL_L: /* Bypass LE */
2101 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
2102 case ASI_TWINX_REAL: /* Real address, twinx */
2103 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2104 case ASI_QUAD_LDD_PHYS:
2105 case ASI_QUAD_LDD_PHYS_L:
2106 mem_idx = MMU_PHYS_IDX;
2107 break;
2108 case ASI_N: /* Nucleus */
2109 case ASI_NL: /* Nucleus LE */
2110 case ASI_TWINX_N:
2111 case ASI_TWINX_NL:
2112 case ASI_NUCLEUS_QUAD_LDD:
2113 case ASI_NUCLEUS_QUAD_LDD_L:
2114 if (hypervisor(dc)) {
2115 mem_idx = MMU_PHYS_IDX;
2116 } else {
2117 mem_idx = MMU_NUCLEUS_IDX;
2119 break;
2120 case ASI_AIUP: /* As if user primary */
2121 case ASI_AIUPL: /* As if user primary LE */
2122 case ASI_TWINX_AIUP:
2123 case ASI_TWINX_AIUP_L:
2124 case ASI_BLK_AIUP_4V:
2125 case ASI_BLK_AIUP_L_4V:
2126 case ASI_BLK_AIUP:
2127 case ASI_BLK_AIUPL:
2128 mem_idx = MMU_USER_IDX;
2129 break;
2130 case ASI_AIUS: /* As if user secondary */
2131 case ASI_AIUSL: /* As if user secondary LE */
2132 case ASI_TWINX_AIUS:
2133 case ASI_TWINX_AIUS_L:
2134 case ASI_BLK_AIUS_4V:
2135 case ASI_BLK_AIUS_L_4V:
2136 case ASI_BLK_AIUS:
2137 case ASI_BLK_AIUSL:
2138 mem_idx = MMU_USER_SECONDARY_IDX;
2139 break;
2140 case ASI_S: /* Secondary */
2141 case ASI_SL: /* Secondary LE */
2142 case ASI_TWINX_S:
2143 case ASI_TWINX_SL:
2144 case ASI_BLK_COMMIT_S:
2145 case ASI_BLK_S:
2146 case ASI_BLK_SL:
2147 case ASI_FL8_S:
2148 case ASI_FL8_SL:
2149 case ASI_FL16_S:
2150 case ASI_FL16_SL:
2151 if (mem_idx == MMU_USER_IDX) {
2152 mem_idx = MMU_USER_SECONDARY_IDX;
2153 } else if (mem_idx == MMU_KERNEL_IDX) {
2154 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2156 break;
2157 case ASI_P: /* Primary */
2158 case ASI_PL: /* Primary LE */
2159 case ASI_TWINX_P:
2160 case ASI_TWINX_PL:
2161 case ASI_BLK_COMMIT_P:
2162 case ASI_BLK_P:
2163 case ASI_BLK_PL:
2164 case ASI_FL8_P:
2165 case ASI_FL8_PL:
2166 case ASI_FL16_P:
2167 case ASI_FL16_PL:
2168 break;
2170 switch (asi) {
2171 case ASI_REAL:
2172 case ASI_REAL_IO:
2173 case ASI_REAL_L:
2174 case ASI_REAL_IO_L:
2175 case ASI_N:
2176 case ASI_NL:
2177 case ASI_AIUP:
2178 case ASI_AIUPL:
2179 case ASI_AIUS:
2180 case ASI_AIUSL:
2181 case ASI_S:
2182 case ASI_SL:
2183 case ASI_P:
2184 case ASI_PL:
2185 type = GET_ASI_DIRECT;
2186 break;
2187 case ASI_TWINX_REAL:
2188 case ASI_TWINX_REAL_L:
2189 case ASI_TWINX_N:
2190 case ASI_TWINX_NL:
2191 case ASI_TWINX_AIUP:
2192 case ASI_TWINX_AIUP_L:
2193 case ASI_TWINX_AIUS:
2194 case ASI_TWINX_AIUS_L:
2195 case ASI_TWINX_P:
2196 case ASI_TWINX_PL:
2197 case ASI_TWINX_S:
2198 case ASI_TWINX_SL:
2199 case ASI_QUAD_LDD_PHYS:
2200 case ASI_QUAD_LDD_PHYS_L:
2201 case ASI_NUCLEUS_QUAD_LDD:
2202 case ASI_NUCLEUS_QUAD_LDD_L:
2203 type = GET_ASI_DTWINX;
2204 break;
2205 case ASI_BLK_COMMIT_P:
2206 case ASI_BLK_COMMIT_S:
2207 case ASI_BLK_AIUP_4V:
2208 case ASI_BLK_AIUP_L_4V:
2209 case ASI_BLK_AIUP:
2210 case ASI_BLK_AIUPL:
2211 case ASI_BLK_AIUS_4V:
2212 case ASI_BLK_AIUS_L_4V:
2213 case ASI_BLK_AIUS:
2214 case ASI_BLK_AIUSL:
2215 case ASI_BLK_S:
2216 case ASI_BLK_SL:
2217 case ASI_BLK_P:
2218 case ASI_BLK_PL:
2219 type = GET_ASI_BLOCK;
2220 break;
2221 case ASI_FL8_S:
2222 case ASI_FL8_SL:
2223 case ASI_FL8_P:
2224 case ASI_FL8_PL:
2225 memop = MO_UB;
2226 type = GET_ASI_SHORT;
2227 break;
2228 case ASI_FL16_S:
2229 case ASI_FL16_SL:
2230 case ASI_FL16_P:
2231 case ASI_FL16_PL:
2232 memop = MO_TEUW;
2233 type = GET_ASI_SHORT;
2234 break;
2236 /* The little-endian asis all have bit 3 set. */
2237 if (asi & 8) {
2238 memop ^= MO_BSWAP;
2241 #endif
2243 return (DisasASI){ type, asi, mem_idx, memop };
2246 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2247 int insn, MemOp memop)
2249 DisasASI da = get_asi(dc, insn, memop);
2251 switch (da.type) {
2252 case GET_ASI_EXCP:
2253 break;
2254 case GET_ASI_DTWINX: /* Reserved for ldda. */
2255 gen_exception(dc, TT_ILL_INSN);
2256 break;
2257 case GET_ASI_DIRECT:
2258 gen_address_mask(dc, addr);
2259 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
2260 break;
2261 default:
2263 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2264 TCGv_i32 r_mop = tcg_const_i32(memop);
2266 save_state(dc);
2267 #ifdef TARGET_SPARC64
2268 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2269 #else
2271 TCGv_i64 t64 = tcg_temp_new_i64();
2272 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2273 tcg_gen_trunc_i64_tl(dst, t64);
2274 tcg_temp_free_i64(t64);
2276 #endif
2277 tcg_temp_free_i32(r_mop);
2278 tcg_temp_free_i32(r_asi);
2280 break;
2284 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2285 int insn, MemOp memop)
2287 DisasASI da = get_asi(dc, insn, memop);
2289 switch (da.type) {
2290 case GET_ASI_EXCP:
2291 break;
2292 case GET_ASI_DTWINX: /* Reserved for stda. */
2293 #ifndef TARGET_SPARC64
2294 gen_exception(dc, TT_ILL_INSN);
2295 break;
2296 #else
2297 if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2298 /* Pre OpenSPARC CPUs don't have these */
2299 gen_exception(dc, TT_ILL_INSN);
2300 return;
2302 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2303 * are ST_BLKINIT_ ASIs */
2304 #endif
2305 /* fall through */
2306 case GET_ASI_DIRECT:
2307 gen_address_mask(dc, addr);
2308 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
2309 break;
2310 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2311 case GET_ASI_BCOPY:
2312 /* Copy 32 bytes from the address in SRC to ADDR. */
2313 /* ??? The original qemu code suggests 4-byte alignment, dropping
2314 the low bits, but the only place I can see this used is in the
2315 Linux kernel with 32 byte alignment, which would make more sense
2316 as a cacheline-style operation. */
2318 TCGv saddr = tcg_temp_new();
2319 TCGv daddr = tcg_temp_new();
2320 TCGv four = tcg_const_tl(4);
2321 TCGv_i32 tmp = tcg_temp_new_i32();
2322 int i;
2324 tcg_gen_andi_tl(saddr, src, -4);
2325 tcg_gen_andi_tl(daddr, addr, -4);
2326 for (i = 0; i < 32; i += 4) {
2327 /* Since the loads and stores are paired, allow the
2328 copy to happen in the host endianness. */
2329 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2330 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2331 tcg_gen_add_tl(saddr, saddr, four);
2332 tcg_gen_add_tl(daddr, daddr, four);
2335 tcg_temp_free(saddr);
2336 tcg_temp_free(daddr);
2337 tcg_temp_free(four);
2338 tcg_temp_free_i32(tmp);
2340 break;
2341 #endif
2342 default:
2344 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2345 TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
2347 save_state(dc);
2348 #ifdef TARGET_SPARC64
2349 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2350 #else
2352 TCGv_i64 t64 = tcg_temp_new_i64();
2353 tcg_gen_extu_tl_i64(t64, src);
2354 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2355 tcg_temp_free_i64(t64);
2357 #endif
2358 tcg_temp_free_i32(r_mop);
2359 tcg_temp_free_i32(r_asi);
2361 /* A write to a TLB register may alter page maps. End the TB. */
2362 dc->npc = DYNAMIC_PC;
2364 break;
2368 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2369 TCGv addr, int insn)
2371 DisasASI da = get_asi(dc, insn, MO_TEUL);
2373 switch (da.type) {
2374 case GET_ASI_EXCP:
2375 break;
2376 case GET_ASI_DIRECT:
2377 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2378 break;
2379 default:
2380 /* ??? Should be DAE_invalid_asi. */
2381 gen_exception(dc, TT_DATA_ACCESS);
2382 break;
2386 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2387 int insn, int rd)
2389 DisasASI da = get_asi(dc, insn, MO_TEUL);
2390 TCGv oldv;
2392 switch (da.type) {
2393 case GET_ASI_EXCP:
2394 return;
2395 case GET_ASI_DIRECT:
2396 oldv = tcg_temp_new();
2397 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2398 da.mem_idx, da.memop);
2399 gen_store_gpr(dc, rd, oldv);
2400 tcg_temp_free(oldv);
2401 break;
2402 default:
2403 /* ??? Should be DAE_invalid_asi. */
2404 gen_exception(dc, TT_DATA_ACCESS);
2405 break;
2409 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2411 DisasASI da = get_asi(dc, insn, MO_UB);
2413 switch (da.type) {
2414 case GET_ASI_EXCP:
2415 break;
2416 case GET_ASI_DIRECT:
2417 gen_ldstub(dc, dst, addr, da.mem_idx);
2418 break;
2419 default:
2420 /* ??? In theory, this should be raise DAE_invalid_asi.
2421 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2422 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2423 gen_helper_exit_atomic(cpu_env);
2424 } else {
2425 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2426 TCGv_i32 r_mop = tcg_const_i32(MO_UB);
2427 TCGv_i64 s64, t64;
2429 save_state(dc);
2430 t64 = tcg_temp_new_i64();
2431 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2433 s64 = tcg_const_i64(0xff);
2434 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
2435 tcg_temp_free_i64(s64);
2436 tcg_temp_free_i32(r_mop);
2437 tcg_temp_free_i32(r_asi);
2439 tcg_gen_trunc_i64_tl(dst, t64);
2440 tcg_temp_free_i64(t64);
2442 /* End the TB. */
2443 dc->npc = DYNAMIC_PC;
2445 break;
2448 #endif
2450 #ifdef TARGET_SPARC64
2451 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2452 int insn, int size, int rd)
2454 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2455 TCGv_i32 d32;
2456 TCGv_i64 d64;
2458 switch (da.type) {
2459 case GET_ASI_EXCP:
2460 break;
2462 case GET_ASI_DIRECT:
2463 gen_address_mask(dc, addr);
2464 switch (size) {
2465 case 4:
2466 d32 = gen_dest_fpr_F(dc);
2467 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
2468 gen_store_fpr_F(dc, rd, d32);
2469 break;
2470 case 8:
2471 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2472 da.memop | MO_ALIGN_4);
2473 break;
2474 case 16:
2475 d64 = tcg_temp_new_i64();
2476 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2477 tcg_gen_addi_tl(addr, addr, 8);
2478 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2479 da.memop | MO_ALIGN_4);
2480 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2481 tcg_temp_free_i64(d64);
2482 break;
2483 default:
2484 g_assert_not_reached();
2486 break;
2488 case GET_ASI_BLOCK:
2489 /* Valid for lddfa on aligned registers only. */
2490 if (size == 8 && (rd & 7) == 0) {
2491 MemOp memop;
2492 TCGv eight;
2493 int i;
2495 gen_address_mask(dc, addr);
2497 /* The first operation checks required alignment. */
2498 memop = da.memop | MO_ALIGN_64;
2499 eight = tcg_const_tl(8);
2500 for (i = 0; ; ++i) {
2501 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2502 da.mem_idx, memop);
2503 if (i == 7) {
2504 break;
2506 tcg_gen_add_tl(addr, addr, eight);
2507 memop = da.memop;
2509 tcg_temp_free(eight);
2510 } else {
2511 gen_exception(dc, TT_ILL_INSN);
2513 break;
2515 case GET_ASI_SHORT:
2516 /* Valid for lddfa only. */
2517 if (size == 8) {
2518 gen_address_mask(dc, addr);
2519 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2520 } else {
2521 gen_exception(dc, TT_ILL_INSN);
2523 break;
2525 default:
2527 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2528 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2530 save_state(dc);
2531 /* According to the table in the UA2011 manual, the only
2532 other asis that are valid for ldfa/lddfa/ldqfa are
2533 the NO_FAULT asis. We still need a helper for these,
2534 but we can just use the integer asi helper for them. */
2535 switch (size) {
2536 case 4:
2537 d64 = tcg_temp_new_i64();
2538 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2539 d32 = gen_dest_fpr_F(dc);
2540 tcg_gen_extrl_i64_i32(d32, d64);
2541 tcg_temp_free_i64(d64);
2542 gen_store_fpr_F(dc, rd, d32);
2543 break;
2544 case 8:
2545 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2546 break;
2547 case 16:
2548 d64 = tcg_temp_new_i64();
2549 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2550 tcg_gen_addi_tl(addr, addr, 8);
2551 gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2552 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2553 tcg_temp_free_i64(d64);
2554 break;
2555 default:
2556 g_assert_not_reached();
2558 tcg_temp_free_i32(r_mop);
2559 tcg_temp_free_i32(r_asi);
2561 break;
2565 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2566 int insn, int size, int rd)
2568 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2569 TCGv_i32 d32;
2571 switch (da.type) {
2572 case GET_ASI_EXCP:
2573 break;
2575 case GET_ASI_DIRECT:
2576 gen_address_mask(dc, addr);
2577 switch (size) {
2578 case 4:
2579 d32 = gen_load_fpr_F(dc, rd);
2580 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
2581 break;
2582 case 8:
2583 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2584 da.memop | MO_ALIGN_4);
2585 break;
2586 case 16:
2587 /* Only 4-byte alignment required. However, it is legal for the
2588 cpu to signal the alignment fault, and the OS trap handler is
2589 required to fix it up. Requiring 16-byte alignment here avoids
2590 having to probe the second page before performing the first
2591 write. */
2592 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2593 da.memop | MO_ALIGN_16);
2594 tcg_gen_addi_tl(addr, addr, 8);
2595 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2596 break;
2597 default:
2598 g_assert_not_reached();
2600 break;
2602 case GET_ASI_BLOCK:
2603 /* Valid for stdfa on aligned registers only. */
2604 if (size == 8 && (rd & 7) == 0) {
2605 MemOp memop;
2606 TCGv eight;
2607 int i;
2609 gen_address_mask(dc, addr);
2611 /* The first operation checks required alignment. */
2612 memop = da.memop | MO_ALIGN_64;
2613 eight = tcg_const_tl(8);
2614 for (i = 0; ; ++i) {
2615 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2616 da.mem_idx, memop);
2617 if (i == 7) {
2618 break;
2620 tcg_gen_add_tl(addr, addr, eight);
2621 memop = da.memop;
2623 tcg_temp_free(eight);
2624 } else {
2625 gen_exception(dc, TT_ILL_INSN);
2627 break;
2629 case GET_ASI_SHORT:
2630 /* Valid for stdfa only. */
2631 if (size == 8) {
2632 gen_address_mask(dc, addr);
2633 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2634 } else {
2635 gen_exception(dc, TT_ILL_INSN);
2637 break;
2639 default:
2640 /* According to the table in the UA2011 manual, the only
2641 other asis that are valid for ldfa/lddfa/ldqfa are
2642 the PST* asis, which aren't currently handled. */
2643 gen_exception(dc, TT_ILL_INSN);
2644 break;
2648 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2650 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2651 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2652 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2654 switch (da.type) {
2655 case GET_ASI_EXCP:
2656 return;
2658 case GET_ASI_DTWINX:
2659 gen_address_mask(dc, addr);
2660 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2661 tcg_gen_addi_tl(addr, addr, 8);
2662 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2663 break;
2665 case GET_ASI_DIRECT:
2667 TCGv_i64 tmp = tcg_temp_new_i64();
2669 gen_address_mask(dc, addr);
2670 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
2672 /* Note that LE ldda acts as if each 32-bit register
2673 result is byte swapped. Having just performed one
2674 64-bit bswap, we need now to swap the writebacks. */
2675 if ((da.memop & MO_BSWAP) == MO_TE) {
2676 tcg_gen_extr32_i64(lo, hi, tmp);
2677 } else {
2678 tcg_gen_extr32_i64(hi, lo, tmp);
2680 tcg_temp_free_i64(tmp);
2682 break;
2684 default:
2685 /* ??? In theory we've handled all of the ASIs that are valid
2686 for ldda, and this should raise DAE_invalid_asi. However,
2687 real hardware allows others. This can be seen with e.g.
2688 FreeBSD 10.3 wrt ASI_IC_TAG. */
2690 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2691 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2692 TCGv_i64 tmp = tcg_temp_new_i64();
2694 save_state(dc);
2695 gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
2696 tcg_temp_free_i32(r_asi);
2697 tcg_temp_free_i32(r_mop);
2699 /* See above. */
2700 if ((da.memop & MO_BSWAP) == MO_TE) {
2701 tcg_gen_extr32_i64(lo, hi, tmp);
2702 } else {
2703 tcg_gen_extr32_i64(hi, lo, tmp);
2705 tcg_temp_free_i64(tmp);
2707 break;
2710 gen_store_gpr(dc, rd, hi);
2711 gen_store_gpr(dc, rd + 1, lo);
2714 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2715 int insn, int rd)
2717 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2718 TCGv lo = gen_load_gpr(dc, rd + 1);
2720 switch (da.type) {
2721 case GET_ASI_EXCP:
2722 break;
2724 case GET_ASI_DTWINX:
2725 gen_address_mask(dc, addr);
2726 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2727 tcg_gen_addi_tl(addr, addr, 8);
2728 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2729 break;
2731 case GET_ASI_DIRECT:
2733 TCGv_i64 t64 = tcg_temp_new_i64();
2735 /* Note that LE stda acts as if each 32-bit register result is
2736 byte swapped. We will perform one 64-bit LE store, so now
2737 we must swap the order of the construction. */
2738 if ((da.memop & MO_BSWAP) == MO_TE) {
2739 tcg_gen_concat32_i64(t64, lo, hi);
2740 } else {
2741 tcg_gen_concat32_i64(t64, hi, lo);
2743 gen_address_mask(dc, addr);
2744 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2745 tcg_temp_free_i64(t64);
2747 break;
2749 default:
2750 /* ??? In theory we've handled all of the ASIs that are valid
2751 for stda, and this should raise DAE_invalid_asi. */
2753 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2754 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2755 TCGv_i64 t64 = tcg_temp_new_i64();
2757 /* See above. */
2758 if ((da.memop & MO_BSWAP) == MO_TE) {
2759 tcg_gen_concat32_i64(t64, lo, hi);
2760 } else {
2761 tcg_gen_concat32_i64(t64, hi, lo);
2764 save_state(dc);
2765 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2766 tcg_temp_free_i32(r_mop);
2767 tcg_temp_free_i32(r_asi);
2768 tcg_temp_free_i64(t64);
2770 break;
2774 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2775 int insn, int rd)
2777 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2778 TCGv oldv;
2780 switch (da.type) {
2781 case GET_ASI_EXCP:
2782 return;
2783 case GET_ASI_DIRECT:
2784 oldv = tcg_temp_new();
2785 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2786 da.mem_idx, da.memop);
2787 gen_store_gpr(dc, rd, oldv);
2788 tcg_temp_free(oldv);
2789 break;
2790 default:
2791 /* ??? Should be DAE_invalid_asi. */
2792 gen_exception(dc, TT_DATA_ACCESS);
2793 break;
2797 #elif !defined(CONFIG_USER_ONLY)
2798 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2800 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2801 whereby "rd + 1" elicits "error: array subscript is above array".
2802 Since we have already asserted that rd is even, the semantics
2803 are unchanged. */
2804 TCGv lo = gen_dest_gpr(dc, rd | 1);
2805 TCGv hi = gen_dest_gpr(dc, rd);
2806 TCGv_i64 t64 = tcg_temp_new_i64();
2807 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2809 switch (da.type) {
2810 case GET_ASI_EXCP:
2811 tcg_temp_free_i64(t64);
2812 return;
2813 case GET_ASI_DIRECT:
2814 gen_address_mask(dc, addr);
2815 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
2816 break;
2817 default:
2819 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2820 TCGv_i32 r_mop = tcg_const_i32(MO_UQ);
2822 save_state(dc);
2823 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2824 tcg_temp_free_i32(r_mop);
2825 tcg_temp_free_i32(r_asi);
2827 break;
2830 tcg_gen_extr_i64_i32(lo, hi, t64);
2831 tcg_temp_free_i64(t64);
2832 gen_store_gpr(dc, rd | 1, lo);
2833 gen_store_gpr(dc, rd, hi);
2836 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2837 int insn, int rd)
2839 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2840 TCGv lo = gen_load_gpr(dc, rd + 1);
2841 TCGv_i64 t64 = tcg_temp_new_i64();
2843 tcg_gen_concat_tl_i64(t64, lo, hi);
2845 switch (da.type) {
2846 case GET_ASI_EXCP:
2847 break;
2848 case GET_ASI_DIRECT:
2849 gen_address_mask(dc, addr);
2850 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2851 break;
2852 case GET_ASI_BFILL:
2853 /* Store 32 bytes of T64 to ADDR. */
2854 /* ??? The original qemu code suggests 8-byte alignment, dropping
2855 the low bits, but the only place I can see this used is in the
2856 Linux kernel with 32 byte alignment, which would make more sense
2857 as a cacheline-style operation. */
2859 TCGv d_addr = tcg_temp_new();
2860 TCGv eight = tcg_const_tl(8);
2861 int i;
2863 tcg_gen_andi_tl(d_addr, addr, -8);
2864 for (i = 0; i < 32; i += 8) {
2865 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2866 tcg_gen_add_tl(d_addr, d_addr, eight);
2869 tcg_temp_free(d_addr);
2870 tcg_temp_free(eight);
2872 break;
2873 default:
2875 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2876 TCGv_i32 r_mop = tcg_const_i32(MO_UQ);
2878 save_state(dc);
2879 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2880 tcg_temp_free_i32(r_mop);
2881 tcg_temp_free_i32(r_asi);
2883 break;
2886 tcg_temp_free_i64(t64);
2888 #endif
2890 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2892 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2893 return gen_load_gpr(dc, rs1);
2896 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2898 if (IS_IMM) { /* immediate */
2899 target_long simm = GET_FIELDs(insn, 19, 31);
2900 TCGv t = get_temp_tl(dc);
2901 tcg_gen_movi_tl(t, simm);
2902 return t;
2903 } else { /* register */
2904 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2905 return gen_load_gpr(dc, rs2);
2909 #ifdef TARGET_SPARC64
2910 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2912 TCGv_i32 c32, zero, dst, s1, s2;
2914 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2915 or fold the comparison down to 32 bits and use movcond_i32. Choose
2916 the later. */
2917 c32 = tcg_temp_new_i32();
2918 if (cmp->is_bool) {
2919 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2920 } else {
2921 TCGv_i64 c64 = tcg_temp_new_i64();
2922 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2923 tcg_gen_extrl_i64_i32(c32, c64);
2924 tcg_temp_free_i64(c64);
2927 s1 = gen_load_fpr_F(dc, rs);
2928 s2 = gen_load_fpr_F(dc, rd);
2929 dst = gen_dest_fpr_F(dc);
2930 zero = tcg_const_i32(0);
2932 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2934 tcg_temp_free_i32(c32);
2935 tcg_temp_free_i32(zero);
2936 gen_store_fpr_F(dc, rd, dst);
2939 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2941 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2942 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2943 gen_load_fpr_D(dc, rs),
2944 gen_load_fpr_D(dc, rd));
2945 gen_store_fpr_D(dc, rd, dst);
2948 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2950 int qd = QFPREG(rd);
2951 int qs = QFPREG(rs);
2953 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2954 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2955 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2956 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2958 gen_update_fprs_dirty(dc, qd);
2961 #ifndef CONFIG_USER_ONLY
2962 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2964 TCGv_i32 r_tl = tcg_temp_new_i32();
2966 /* load env->tl into r_tl */
2967 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2969 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2970 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2972 /* calculate offset to current trap state from env->ts, reuse r_tl */
2973 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2974 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2976 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2978 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2979 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2980 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2981 tcg_temp_free_ptr(r_tl_tmp);
2984 tcg_temp_free_i32(r_tl);
2986 #endif
2988 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2989 int width, bool cc, bool left)
2991 TCGv lo1, lo2, t1, t2;
2992 uint64_t amask, tabl, tabr;
2993 int shift, imask, omask;
2995 if (cc) {
2996 tcg_gen_mov_tl(cpu_cc_src, s1);
2997 tcg_gen_mov_tl(cpu_cc_src2, s2);
2998 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2999 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3000 dc->cc_op = CC_OP_SUB;
3003 /* Theory of operation: there are two tables, left and right (not to
3004 be confused with the left and right versions of the opcode). These
3005 are indexed by the low 3 bits of the inputs. To make things "easy",
3006 these tables are loaded into two constants, TABL and TABR below.
3007 The operation index = (input & imask) << shift calculates the index
3008 into the constant, while val = (table >> index) & omask calculates
3009 the value we're looking for. */
3010 switch (width) {
3011 case 8:
3012 imask = 0x7;
3013 shift = 3;
3014 omask = 0xff;
3015 if (left) {
3016 tabl = 0x80c0e0f0f8fcfeffULL;
3017 tabr = 0xff7f3f1f0f070301ULL;
3018 } else {
3019 tabl = 0x0103070f1f3f7fffULL;
3020 tabr = 0xfffefcf8f0e0c080ULL;
3022 break;
3023 case 16:
3024 imask = 0x6;
3025 shift = 1;
3026 omask = 0xf;
3027 if (left) {
3028 tabl = 0x8cef;
3029 tabr = 0xf731;
3030 } else {
3031 tabl = 0x137f;
3032 tabr = 0xfec8;
3034 break;
3035 case 32:
3036 imask = 0x4;
3037 shift = 0;
3038 omask = 0x3;
3039 if (left) {
3040 tabl = (2 << 2) | 3;
3041 tabr = (3 << 2) | 1;
3042 } else {
3043 tabl = (1 << 2) | 3;
3044 tabr = (3 << 2) | 2;
3046 break;
3047 default:
3048 abort();
3051 lo1 = tcg_temp_new();
3052 lo2 = tcg_temp_new();
3053 tcg_gen_andi_tl(lo1, s1, imask);
3054 tcg_gen_andi_tl(lo2, s2, imask);
3055 tcg_gen_shli_tl(lo1, lo1, shift);
3056 tcg_gen_shli_tl(lo2, lo2, shift);
3058 t1 = tcg_const_tl(tabl);
3059 t2 = tcg_const_tl(tabr);
3060 tcg_gen_shr_tl(lo1, t1, lo1);
3061 tcg_gen_shr_tl(lo2, t2, lo2);
3062 tcg_gen_andi_tl(dst, lo1, omask);
3063 tcg_gen_andi_tl(lo2, lo2, omask);
3065 amask = -8;
3066 if (AM_CHECK(dc)) {
3067 amask &= 0xffffffffULL;
3069 tcg_gen_andi_tl(s1, s1, amask);
3070 tcg_gen_andi_tl(s2, s2, amask);
3072 /* We want to compute
3073 dst = (s1 == s2 ? lo1 : lo1 & lo2).
3074 We've already done dst = lo1, so this reduces to
3075 dst &= (s1 == s2 ? -1 : lo2)
3076 Which we perform by
3077 lo2 |= -(s1 == s2)
3078 dst &= lo2
3080 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
3081 tcg_gen_neg_tl(t1, t1);
3082 tcg_gen_or_tl(lo2, lo2, t1);
3083 tcg_gen_and_tl(dst, dst, lo2);
3085 tcg_temp_free(lo1);
3086 tcg_temp_free(lo2);
3087 tcg_temp_free(t1);
3088 tcg_temp_free(t2);
3091 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
3093 TCGv tmp = tcg_temp_new();
3095 tcg_gen_add_tl(tmp, s1, s2);
3096 tcg_gen_andi_tl(dst, tmp, -8);
3097 if (left) {
3098 tcg_gen_neg_tl(tmp, tmp);
3100 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3102 tcg_temp_free(tmp);
3105 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
3107 TCGv t1, t2, shift;
3109 t1 = tcg_temp_new();
3110 t2 = tcg_temp_new();
3111 shift = tcg_temp_new();
3113 tcg_gen_andi_tl(shift, gsr, 7);
3114 tcg_gen_shli_tl(shift, shift, 3);
3115 tcg_gen_shl_tl(t1, s1, shift);
3117 /* A shift of 64 does not produce 0 in TCG. Divide this into a
3118 shift of (up to 63) followed by a constant shift of 1. */
3119 tcg_gen_xori_tl(shift, shift, 63);
3120 tcg_gen_shr_tl(t2, s2, shift);
3121 tcg_gen_shri_tl(t2, t2, 1);
3123 tcg_gen_or_tl(dst, t1, t2);
3125 tcg_temp_free(t1);
3126 tcg_temp_free(t2);
3127 tcg_temp_free(shift);
3129 #endif
3131 #define CHECK_IU_FEATURE(dc, FEATURE) \
3132 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3133 goto illegal_insn;
3134 #define CHECK_FPU_FEATURE(dc, FEATURE) \
3135 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3136 goto nfpu_insn;
3138 /* before an instruction, dc->pc must be static */
3139 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
3141 unsigned int opc, rs1, rs2, rd;
3142 TCGv cpu_src1, cpu_src2;
3143 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
3144 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
3145 target_long simm;
3147 opc = GET_FIELD(insn, 0, 1);
3148 rd = GET_FIELD(insn, 2, 6);
3150 switch (opc) {
3151 case 0: /* branches/sethi */
3153 unsigned int xop = GET_FIELD(insn, 7, 9);
3154 int32_t target;
3155 switch (xop) {
3156 #ifdef TARGET_SPARC64
3157 case 0x1: /* V9 BPcc */
3159 int cc;
3161 target = GET_FIELD_SP(insn, 0, 18);
3162 target = sign_extend(target, 19);
3163 target <<= 2;
3164 cc = GET_FIELD_SP(insn, 20, 21);
3165 if (cc == 0)
3166 do_branch(dc, target, insn, 0);
3167 else if (cc == 2)
3168 do_branch(dc, target, insn, 1);
3169 else
3170 goto illegal_insn;
3171 goto jmp_insn;
3173 case 0x3: /* V9 BPr */
3175 target = GET_FIELD_SP(insn, 0, 13) |
3176 (GET_FIELD_SP(insn, 20, 21) << 14);
3177 target = sign_extend(target, 16);
3178 target <<= 2;
3179 cpu_src1 = get_src1(dc, insn);
3180 do_branch_reg(dc, target, insn, cpu_src1);
3181 goto jmp_insn;
3183 case 0x5: /* V9 FBPcc */
3185 int cc = GET_FIELD_SP(insn, 20, 21);
3186 if (gen_trap_ifnofpu(dc)) {
3187 goto jmp_insn;
3189 target = GET_FIELD_SP(insn, 0, 18);
3190 target = sign_extend(target, 19);
3191 target <<= 2;
3192 do_fbranch(dc, target, insn, cc);
3193 goto jmp_insn;
3195 #else
3196 case 0x7: /* CBN+x */
3198 goto ncp_insn;
3200 #endif
3201 case 0x2: /* BN+x */
3203 target = GET_FIELD(insn, 10, 31);
3204 target = sign_extend(target, 22);
3205 target <<= 2;
3206 do_branch(dc, target, insn, 0);
3207 goto jmp_insn;
3209 case 0x6: /* FBN+x */
3211 if (gen_trap_ifnofpu(dc)) {
3212 goto jmp_insn;
3214 target = GET_FIELD(insn, 10, 31);
3215 target = sign_extend(target, 22);
3216 target <<= 2;
3217 do_fbranch(dc, target, insn, 0);
3218 goto jmp_insn;
3220 case 0x4: /* SETHI */
3221 /* Special-case %g0 because that's the canonical nop. */
3222 if (rd) {
3223 uint32_t value = GET_FIELD(insn, 10, 31);
3224 TCGv t = gen_dest_gpr(dc, rd);
3225 tcg_gen_movi_tl(t, value << 10);
3226 gen_store_gpr(dc, rd, t);
3228 break;
3229 case 0x0: /* UNIMPL */
3230 default:
3231 goto illegal_insn;
3233 break;
3235 break;
3236 case 1: /*CALL*/
3238 target_long target = GET_FIELDs(insn, 2, 31) << 2;
3239 TCGv o7 = gen_dest_gpr(dc, 15);
3241 tcg_gen_movi_tl(o7, dc->pc);
3242 gen_store_gpr(dc, 15, o7);
3243 target += dc->pc;
3244 gen_mov_pc_npc(dc);
3245 #ifdef TARGET_SPARC64
3246 if (unlikely(AM_CHECK(dc))) {
3247 target &= 0xffffffffULL;
3249 #endif
3250 dc->npc = target;
3252 goto jmp_insn;
3253 case 2: /* FPU & Logical Operations */
3255 unsigned int xop = GET_FIELD(insn, 7, 12);
3256 TCGv cpu_dst = get_temp_tl(dc);
3257 TCGv cpu_tmp0;
3259 if (xop == 0x3a) { /* generate trap */
3260 int cond = GET_FIELD(insn, 3, 6);
3261 TCGv_i32 trap;
3262 TCGLabel *l1 = NULL;
3263 int mask;
3265 if (cond == 0) {
3266 /* Trap never. */
3267 break;
3270 save_state(dc);
3272 if (cond != 8) {
3273 /* Conditional trap. */
3274 DisasCompare cmp;
3275 #ifdef TARGET_SPARC64
3276 /* V9 icc/xcc */
3277 int cc = GET_FIELD_SP(insn, 11, 12);
3278 if (cc == 0) {
3279 gen_compare(&cmp, 0, cond, dc);
3280 } else if (cc == 2) {
3281 gen_compare(&cmp, 1, cond, dc);
3282 } else {
3283 goto illegal_insn;
3285 #else
3286 gen_compare(&cmp, 0, cond, dc);
3287 #endif
3288 l1 = gen_new_label();
3289 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3290 cmp.c1, cmp.c2, l1);
3291 free_compare(&cmp);
3294 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3295 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3297 /* Don't use the normal temporaries, as they may well have
3298 gone out of scope with the branch above. While we're
3299 doing that we might as well pre-truncate to 32-bit. */
3300 trap = tcg_temp_new_i32();
3302 rs1 = GET_FIELD_SP(insn, 14, 18);
3303 if (IS_IMM) {
3304 rs2 = GET_FIELD_SP(insn, 0, 7);
3305 if (rs1 == 0) {
3306 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3307 /* Signal that the trap value is fully constant. */
3308 mask = 0;
3309 } else {
3310 TCGv t1 = gen_load_gpr(dc, rs1);
3311 tcg_gen_trunc_tl_i32(trap, t1);
3312 tcg_gen_addi_i32(trap, trap, rs2);
3314 } else {
3315 TCGv t1, t2;
3316 rs2 = GET_FIELD_SP(insn, 0, 4);
3317 t1 = gen_load_gpr(dc, rs1);
3318 t2 = gen_load_gpr(dc, rs2);
3319 tcg_gen_add_tl(t1, t1, t2);
3320 tcg_gen_trunc_tl_i32(trap, t1);
3322 if (mask != 0) {
3323 tcg_gen_andi_i32(trap, trap, mask);
3324 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3327 gen_helper_raise_exception(cpu_env, trap);
3328 tcg_temp_free_i32(trap);
3330 if (cond == 8) {
3331 /* An unconditional trap ends the TB. */
3332 dc->base.is_jmp = DISAS_NORETURN;
3333 goto jmp_insn;
3334 } else {
3335 /* A conditional trap falls through to the next insn. */
3336 gen_set_label(l1);
3337 break;
3339 } else if (xop == 0x28) {
3340 rs1 = GET_FIELD(insn, 13, 17);
3341 switch(rs1) {
3342 case 0: /* rdy */
3343 #ifndef TARGET_SPARC64
3344 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3345 manual, rdy on the microSPARC
3346 II */
3347 case 0x0f: /* stbar in the SPARCv8 manual,
3348 rdy on the microSPARC II */
3349 case 0x10 ... 0x1f: /* implementation-dependent in the
3350 SPARCv8 manual, rdy on the
3351 microSPARC II */
3352 /* Read Asr17 */
3353 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3354 TCGv t = gen_dest_gpr(dc, rd);
3355 /* Read Asr17 for a Leon3 monoprocessor */
3356 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3357 gen_store_gpr(dc, rd, t);
3358 break;
3360 #endif
3361 gen_store_gpr(dc, rd, cpu_y);
3362 break;
3363 #ifdef TARGET_SPARC64
3364 case 0x2: /* V9 rdccr */
3365 update_psr(dc);
3366 gen_helper_rdccr(cpu_dst, cpu_env);
3367 gen_store_gpr(dc, rd, cpu_dst);
3368 break;
3369 case 0x3: /* V9 rdasi */
3370 tcg_gen_movi_tl(cpu_dst, dc->asi);
3371 gen_store_gpr(dc, rd, cpu_dst);
3372 break;
3373 case 0x4: /* V9 rdtick */
3375 TCGv_ptr r_tickptr;
3376 TCGv_i32 r_const;
3378 r_tickptr = tcg_temp_new_ptr();
3379 r_const = tcg_const_i32(dc->mem_idx);
3380 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3381 offsetof(CPUSPARCState, tick));
3382 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
3383 gen_io_start();
3385 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3386 r_const);
3387 tcg_temp_free_ptr(r_tickptr);
3388 tcg_temp_free_i32(r_const);
3389 gen_store_gpr(dc, rd, cpu_dst);
3390 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
3391 /* I/O operations in icount mode must end the TB */
3392 dc->base.is_jmp = DISAS_EXIT;
3395 break;
3396 case 0x5: /* V9 rdpc */
3398 TCGv t = gen_dest_gpr(dc, rd);
3399 if (unlikely(AM_CHECK(dc))) {
3400 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3401 } else {
3402 tcg_gen_movi_tl(t, dc->pc);
3404 gen_store_gpr(dc, rd, t);
3406 break;
3407 case 0x6: /* V9 rdfprs */
3408 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3409 gen_store_gpr(dc, rd, cpu_dst);
3410 break;
3411 case 0xf: /* V9 membar */
3412 break; /* no effect */
3413 case 0x13: /* Graphics Status */
3414 if (gen_trap_ifnofpu(dc)) {
3415 goto jmp_insn;
3417 gen_store_gpr(dc, rd, cpu_gsr);
3418 break;
3419 case 0x16: /* Softint */
3420 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3421 offsetof(CPUSPARCState, softint));
3422 gen_store_gpr(dc, rd, cpu_dst);
3423 break;
3424 case 0x17: /* Tick compare */
3425 gen_store_gpr(dc, rd, cpu_tick_cmpr);
3426 break;
3427 case 0x18: /* System tick */
3429 TCGv_ptr r_tickptr;
3430 TCGv_i32 r_const;
3432 r_tickptr = tcg_temp_new_ptr();
3433 r_const = tcg_const_i32(dc->mem_idx);
3434 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3435 offsetof(CPUSPARCState, stick));
3436 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
3437 gen_io_start();
3439 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3440 r_const);
3441 tcg_temp_free_ptr(r_tickptr);
3442 tcg_temp_free_i32(r_const);
3443 gen_store_gpr(dc, rd, cpu_dst);
3444 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
3445 /* I/O operations in icount mode must end the TB */
3446 dc->base.is_jmp = DISAS_EXIT;
3449 break;
3450 case 0x19: /* System tick compare */
3451 gen_store_gpr(dc, rd, cpu_stick_cmpr);
3452 break;
3453 case 0x1a: /* UltraSPARC-T1 Strand status */
3454 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3455 * this ASR as impl. dep
3457 CHECK_IU_FEATURE(dc, HYPV);
3459 TCGv t = gen_dest_gpr(dc, rd);
3460 tcg_gen_movi_tl(t, 1UL);
3461 gen_store_gpr(dc, rd, t);
3463 break;
3464 case 0x10: /* Performance Control */
3465 case 0x11: /* Performance Instrumentation Counter */
3466 case 0x12: /* Dispatch Control */
3467 case 0x14: /* Softint set, WO */
3468 case 0x15: /* Softint clear, WO */
3469 #endif
3470 default:
3471 goto illegal_insn;
3473 #if !defined(CONFIG_USER_ONLY)
3474 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3475 #ifndef TARGET_SPARC64
3476 if (!supervisor(dc)) {
3477 goto priv_insn;
3479 update_psr(dc);
3480 gen_helper_rdpsr(cpu_dst, cpu_env);
3481 #else
3482 CHECK_IU_FEATURE(dc, HYPV);
3483 if (!hypervisor(dc))
3484 goto priv_insn;
3485 rs1 = GET_FIELD(insn, 13, 17);
3486 switch (rs1) {
3487 case 0: // hpstate
3488 tcg_gen_ld_i64(cpu_dst, cpu_env,
3489 offsetof(CPUSPARCState, hpstate));
3490 break;
3491 case 1: // htstate
3492 // gen_op_rdhtstate();
3493 break;
3494 case 3: // hintp
3495 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3496 break;
3497 case 5: // htba
3498 tcg_gen_mov_tl(cpu_dst, cpu_htba);
3499 break;
3500 case 6: // hver
3501 tcg_gen_mov_tl(cpu_dst, cpu_hver);
3502 break;
3503 case 31: // hstick_cmpr
3504 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3505 break;
3506 default:
3507 goto illegal_insn;
3509 #endif
3510 gen_store_gpr(dc, rd, cpu_dst);
3511 break;
3512 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3513 if (!supervisor(dc)) {
3514 goto priv_insn;
3516 cpu_tmp0 = get_temp_tl(dc);
3517 #ifdef TARGET_SPARC64
3518 rs1 = GET_FIELD(insn, 13, 17);
3519 switch (rs1) {
3520 case 0: // tpc
3522 TCGv_ptr r_tsptr;
3524 r_tsptr = tcg_temp_new_ptr();
3525 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3526 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3527 offsetof(trap_state, tpc));
3528 tcg_temp_free_ptr(r_tsptr);
3530 break;
3531 case 1: // tnpc
3533 TCGv_ptr r_tsptr;
3535 r_tsptr = tcg_temp_new_ptr();
3536 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3537 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3538 offsetof(trap_state, tnpc));
3539 tcg_temp_free_ptr(r_tsptr);
3541 break;
3542 case 2: // tstate
3544 TCGv_ptr r_tsptr;
3546 r_tsptr = tcg_temp_new_ptr();
3547 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3548 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3549 offsetof(trap_state, tstate));
3550 tcg_temp_free_ptr(r_tsptr);
3552 break;
3553 case 3: // tt
3555 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3557 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3558 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3559 offsetof(trap_state, tt));
3560 tcg_temp_free_ptr(r_tsptr);
3562 break;
3563 case 4: // tick
3565 TCGv_ptr r_tickptr;
3566 TCGv_i32 r_const;
3568 r_tickptr = tcg_temp_new_ptr();
3569 r_const = tcg_const_i32(dc->mem_idx);
3570 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3571 offsetof(CPUSPARCState, tick));
3572 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
3573 gen_io_start();
3575 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3576 r_tickptr, r_const);
3577 tcg_temp_free_ptr(r_tickptr);
3578 tcg_temp_free_i32(r_const);
3579 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
3580 /* I/O operations in icount mode must end the TB */
3581 dc->base.is_jmp = DISAS_EXIT;
3584 break;
3585 case 5: // tba
3586 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3587 break;
3588 case 6: // pstate
3589 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3590 offsetof(CPUSPARCState, pstate));
3591 break;
3592 case 7: // tl
3593 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3594 offsetof(CPUSPARCState, tl));
3595 break;
3596 case 8: // pil
3597 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3598 offsetof(CPUSPARCState, psrpil));
3599 break;
3600 case 9: // cwp
3601 gen_helper_rdcwp(cpu_tmp0, cpu_env);
3602 break;
3603 case 10: // cansave
3604 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3605 offsetof(CPUSPARCState, cansave));
3606 break;
3607 case 11: // canrestore
3608 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3609 offsetof(CPUSPARCState, canrestore));
3610 break;
3611 case 12: // cleanwin
3612 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3613 offsetof(CPUSPARCState, cleanwin));
3614 break;
3615 case 13: // otherwin
3616 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3617 offsetof(CPUSPARCState, otherwin));
3618 break;
3619 case 14: // wstate
3620 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3621 offsetof(CPUSPARCState, wstate));
3622 break;
3623 case 16: // UA2005 gl
3624 CHECK_IU_FEATURE(dc, GL);
3625 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3626 offsetof(CPUSPARCState, gl));
3627 break;
3628 case 26: // UA2005 strand status
3629 CHECK_IU_FEATURE(dc, HYPV);
3630 if (!hypervisor(dc))
3631 goto priv_insn;
3632 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3633 break;
3634 case 31: // ver
3635 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3636 break;
3637 case 15: // fq
3638 default:
3639 goto illegal_insn;
3641 #else
3642 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3643 #endif
3644 gen_store_gpr(dc, rd, cpu_tmp0);
3645 break;
3646 #endif
3647 #if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
3648 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3649 #ifdef TARGET_SPARC64
3650 gen_helper_flushw(cpu_env);
3651 #else
3652 if (!supervisor(dc))
3653 goto priv_insn;
3654 gen_store_gpr(dc, rd, cpu_tbr);
3655 #endif
3656 break;
3657 #endif
3658 } else if (xop == 0x34) { /* FPU Operations */
3659 if (gen_trap_ifnofpu(dc)) {
3660 goto jmp_insn;
3662 gen_op_clear_ieee_excp_and_FTT();
3663 rs1 = GET_FIELD(insn, 13, 17);
3664 rs2 = GET_FIELD(insn, 27, 31);
3665 xop = GET_FIELD(insn, 18, 26);
3667 switch (xop) {
3668 case 0x1: /* fmovs */
3669 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3670 gen_store_fpr_F(dc, rd, cpu_src1_32);
3671 break;
3672 case 0x5: /* fnegs */
3673 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3674 break;
3675 case 0x9: /* fabss */
3676 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3677 break;
3678 case 0x29: /* fsqrts */
3679 CHECK_FPU_FEATURE(dc, FSQRT);
3680 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3681 break;
3682 case 0x2a: /* fsqrtd */
3683 CHECK_FPU_FEATURE(dc, FSQRT);
3684 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3685 break;
3686 case 0x2b: /* fsqrtq */
3687 CHECK_FPU_FEATURE(dc, FLOAT128);
3688 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3689 break;
3690 case 0x41: /* fadds */
3691 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3692 break;
3693 case 0x42: /* faddd */
3694 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3695 break;
3696 case 0x43: /* faddq */
3697 CHECK_FPU_FEATURE(dc, FLOAT128);
3698 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3699 break;
3700 case 0x45: /* fsubs */
3701 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3702 break;
3703 case 0x46: /* fsubd */
3704 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3705 break;
3706 case 0x47: /* fsubq */
3707 CHECK_FPU_FEATURE(dc, FLOAT128);
3708 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3709 break;
3710 case 0x49: /* fmuls */
3711 CHECK_FPU_FEATURE(dc, FMUL);
3712 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3713 break;
3714 case 0x4a: /* fmuld */
3715 CHECK_FPU_FEATURE(dc, FMUL);
3716 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3717 break;
3718 case 0x4b: /* fmulq */
3719 CHECK_FPU_FEATURE(dc, FLOAT128);
3720 CHECK_FPU_FEATURE(dc, FMUL);
3721 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3722 break;
3723 case 0x4d: /* fdivs */
3724 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3725 break;
3726 case 0x4e: /* fdivd */
3727 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3728 break;
3729 case 0x4f: /* fdivq */
3730 CHECK_FPU_FEATURE(dc, FLOAT128);
3731 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3732 break;
3733 case 0x69: /* fsmuld */
3734 CHECK_FPU_FEATURE(dc, FSMULD);
3735 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3736 break;
3737 case 0x6e: /* fdmulq */
3738 CHECK_FPU_FEATURE(dc, FLOAT128);
3739 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3740 break;
3741 case 0xc4: /* fitos */
3742 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3743 break;
3744 case 0xc6: /* fdtos */
3745 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3746 break;
3747 case 0xc7: /* fqtos */
3748 CHECK_FPU_FEATURE(dc, FLOAT128);
3749 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3750 break;
3751 case 0xc8: /* fitod */
3752 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3753 break;
3754 case 0xc9: /* fstod */
3755 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3756 break;
3757 case 0xcb: /* fqtod */
3758 CHECK_FPU_FEATURE(dc, FLOAT128);
3759 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3760 break;
3761 case 0xcc: /* fitoq */
3762 CHECK_FPU_FEATURE(dc, FLOAT128);
3763 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3764 break;
3765 case 0xcd: /* fstoq */
3766 CHECK_FPU_FEATURE(dc, FLOAT128);
3767 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3768 break;
3769 case 0xce: /* fdtoq */
3770 CHECK_FPU_FEATURE(dc, FLOAT128);
3771 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3772 break;
3773 case 0xd1: /* fstoi */
3774 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3775 break;
3776 case 0xd2: /* fdtoi */
3777 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3778 break;
3779 case 0xd3: /* fqtoi */
3780 CHECK_FPU_FEATURE(dc, FLOAT128);
3781 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3782 break;
3783 #ifdef TARGET_SPARC64
3784 case 0x2: /* V9 fmovd */
3785 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3786 gen_store_fpr_D(dc, rd, cpu_src1_64);
3787 break;
3788 case 0x3: /* V9 fmovq */
3789 CHECK_FPU_FEATURE(dc, FLOAT128);
3790 gen_move_Q(dc, rd, rs2);
3791 break;
3792 case 0x6: /* V9 fnegd */
3793 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3794 break;
3795 case 0x7: /* V9 fnegq */
3796 CHECK_FPU_FEATURE(dc, FLOAT128);
3797 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3798 break;
3799 case 0xa: /* V9 fabsd */
3800 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3801 break;
3802 case 0xb: /* V9 fabsq */
3803 CHECK_FPU_FEATURE(dc, FLOAT128);
3804 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3805 break;
3806 case 0x81: /* V9 fstox */
3807 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3808 break;
3809 case 0x82: /* V9 fdtox */
3810 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3811 break;
3812 case 0x83: /* V9 fqtox */
3813 CHECK_FPU_FEATURE(dc, FLOAT128);
3814 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3815 break;
3816 case 0x84: /* V9 fxtos */
3817 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3818 break;
3819 case 0x88: /* V9 fxtod */
3820 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3821 break;
3822 case 0x8c: /* V9 fxtoq */
3823 CHECK_FPU_FEATURE(dc, FLOAT128);
3824 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3825 break;
3826 #endif
3827 default:
3828 goto illegal_insn;
3830 } else if (xop == 0x35) { /* FPU Operations */
3831 #ifdef TARGET_SPARC64
3832 int cond;
3833 #endif
3834 if (gen_trap_ifnofpu(dc)) {
3835 goto jmp_insn;
3837 gen_op_clear_ieee_excp_and_FTT();
3838 rs1 = GET_FIELD(insn, 13, 17);
3839 rs2 = GET_FIELD(insn, 27, 31);
3840 xop = GET_FIELD(insn, 18, 26);
3842 #ifdef TARGET_SPARC64
3843 #define FMOVR(sz) \
3844 do { \
3845 DisasCompare cmp; \
3846 cond = GET_FIELD_SP(insn, 10, 12); \
3847 cpu_src1 = get_src1(dc, insn); \
3848 gen_compare_reg(&cmp, cond, cpu_src1); \
3849 gen_fmov##sz(dc, &cmp, rd, rs2); \
3850 free_compare(&cmp); \
3851 } while (0)
3853 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3854 FMOVR(s);
3855 break;
3856 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3857 FMOVR(d);
3858 break;
3859 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3860 CHECK_FPU_FEATURE(dc, FLOAT128);
3861 FMOVR(q);
3862 break;
3864 #undef FMOVR
3865 #endif
3866 switch (xop) {
3867 #ifdef TARGET_SPARC64
3868 #define FMOVCC(fcc, sz) \
3869 do { \
3870 DisasCompare cmp; \
3871 cond = GET_FIELD_SP(insn, 14, 17); \
3872 gen_fcompare(&cmp, fcc, cond); \
3873 gen_fmov##sz(dc, &cmp, rd, rs2); \
3874 free_compare(&cmp); \
3875 } while (0)
3877 case 0x001: /* V9 fmovscc %fcc0 */
3878 FMOVCC(0, s);
3879 break;
3880 case 0x002: /* V9 fmovdcc %fcc0 */
3881 FMOVCC(0, d);
3882 break;
3883 case 0x003: /* V9 fmovqcc %fcc0 */
3884 CHECK_FPU_FEATURE(dc, FLOAT128);
3885 FMOVCC(0, q);
3886 break;
3887 case 0x041: /* V9 fmovscc %fcc1 */
3888 FMOVCC(1, s);
3889 break;
3890 case 0x042: /* V9 fmovdcc %fcc1 */
3891 FMOVCC(1, d);
3892 break;
3893 case 0x043: /* V9 fmovqcc %fcc1 */
3894 CHECK_FPU_FEATURE(dc, FLOAT128);
3895 FMOVCC(1, q);
3896 break;
3897 case 0x081: /* V9 fmovscc %fcc2 */
3898 FMOVCC(2, s);
3899 break;
3900 case 0x082: /* V9 fmovdcc %fcc2 */
3901 FMOVCC(2, d);
3902 break;
3903 case 0x083: /* V9 fmovqcc %fcc2 */
3904 CHECK_FPU_FEATURE(dc, FLOAT128);
3905 FMOVCC(2, q);
3906 break;
3907 case 0x0c1: /* V9 fmovscc %fcc3 */
3908 FMOVCC(3, s);
3909 break;
3910 case 0x0c2: /* V9 fmovdcc %fcc3 */
3911 FMOVCC(3, d);
3912 break;
3913 case 0x0c3: /* V9 fmovqcc %fcc3 */
3914 CHECK_FPU_FEATURE(dc, FLOAT128);
3915 FMOVCC(3, q);
3916 break;
3917 #undef FMOVCC
3918 #define FMOVCC(xcc, sz) \
3919 do { \
3920 DisasCompare cmp; \
3921 cond = GET_FIELD_SP(insn, 14, 17); \
3922 gen_compare(&cmp, xcc, cond, dc); \
3923 gen_fmov##sz(dc, &cmp, rd, rs2); \
3924 free_compare(&cmp); \
3925 } while (0)
3927 case 0x101: /* V9 fmovscc %icc */
3928 FMOVCC(0, s);
3929 break;
3930 case 0x102: /* V9 fmovdcc %icc */
3931 FMOVCC(0, d);
3932 break;
3933 case 0x103: /* V9 fmovqcc %icc */
3934 CHECK_FPU_FEATURE(dc, FLOAT128);
3935 FMOVCC(0, q);
3936 break;
3937 case 0x181: /* V9 fmovscc %xcc */
3938 FMOVCC(1, s);
3939 break;
3940 case 0x182: /* V9 fmovdcc %xcc */
3941 FMOVCC(1, d);
3942 break;
3943 case 0x183: /* V9 fmovqcc %xcc */
3944 CHECK_FPU_FEATURE(dc, FLOAT128);
3945 FMOVCC(1, q);
3946 break;
3947 #undef FMOVCC
3948 #endif
3949 case 0x51: /* fcmps, V9 %fcc */
3950 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3951 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3952 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3953 break;
3954 case 0x52: /* fcmpd, V9 %fcc */
3955 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3956 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3957 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3958 break;
3959 case 0x53: /* fcmpq, V9 %fcc */
3960 CHECK_FPU_FEATURE(dc, FLOAT128);
3961 gen_op_load_fpr_QT0(QFPREG(rs1));
3962 gen_op_load_fpr_QT1(QFPREG(rs2));
3963 gen_op_fcmpq(rd & 3);
3964 break;
3965 case 0x55: /* fcmpes, V9 %fcc */
3966 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3967 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3968 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3969 break;
3970 case 0x56: /* fcmped, V9 %fcc */
3971 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3972 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3973 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3974 break;
3975 case 0x57: /* fcmpeq, V9 %fcc */
3976 CHECK_FPU_FEATURE(dc, FLOAT128);
3977 gen_op_load_fpr_QT0(QFPREG(rs1));
3978 gen_op_load_fpr_QT1(QFPREG(rs2));
3979 gen_op_fcmpeq(rd & 3);
3980 break;
3981 default:
3982 goto illegal_insn;
3984 } else if (xop == 0x2) {
3985 TCGv dst = gen_dest_gpr(dc, rd);
3986 rs1 = GET_FIELD(insn, 13, 17);
3987 if (rs1 == 0) {
3988 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3989 if (IS_IMM) { /* immediate */
3990 simm = GET_FIELDs(insn, 19, 31);
3991 tcg_gen_movi_tl(dst, simm);
3992 gen_store_gpr(dc, rd, dst);
3993 } else { /* register */
3994 rs2 = GET_FIELD(insn, 27, 31);
3995 if (rs2 == 0) {
3996 tcg_gen_movi_tl(dst, 0);
3997 gen_store_gpr(dc, rd, dst);
3998 } else {
3999 cpu_src2 = gen_load_gpr(dc, rs2);
4000 gen_store_gpr(dc, rd, cpu_src2);
4003 } else {
4004 cpu_src1 = get_src1(dc, insn);
4005 if (IS_IMM) { /* immediate */
4006 simm = GET_FIELDs(insn, 19, 31);
4007 tcg_gen_ori_tl(dst, cpu_src1, simm);
4008 gen_store_gpr(dc, rd, dst);
4009 } else { /* register */
4010 rs2 = GET_FIELD(insn, 27, 31);
4011 if (rs2 == 0) {
4012 /* mov shortcut: or x, %g0, y -> mov x, y */
4013 gen_store_gpr(dc, rd, cpu_src1);
4014 } else {
4015 cpu_src2 = gen_load_gpr(dc, rs2);
4016 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
4017 gen_store_gpr(dc, rd, dst);
4021 #ifdef TARGET_SPARC64
4022 } else if (xop == 0x25) { /* sll, V9 sllx */
4023 cpu_src1 = get_src1(dc, insn);
4024 if (IS_IMM) { /* immediate */
4025 simm = GET_FIELDs(insn, 20, 31);
4026 if (insn & (1 << 12)) {
4027 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
4028 } else {
4029 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
4031 } else { /* register */
4032 rs2 = GET_FIELD(insn, 27, 31);
4033 cpu_src2 = gen_load_gpr(dc, rs2);
4034 cpu_tmp0 = get_temp_tl(dc);
4035 if (insn & (1 << 12)) {
4036 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4037 } else {
4038 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4040 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
4042 gen_store_gpr(dc, rd, cpu_dst);
4043 } else if (xop == 0x26) { /* srl, V9 srlx */
4044 cpu_src1 = get_src1(dc, insn);
4045 if (IS_IMM) { /* immediate */
4046 simm = GET_FIELDs(insn, 20, 31);
4047 if (insn & (1 << 12)) {
4048 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
4049 } else {
4050 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4051 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
4053 } else { /* register */
4054 rs2 = GET_FIELD(insn, 27, 31);
4055 cpu_src2 = gen_load_gpr(dc, rs2);
4056 cpu_tmp0 = get_temp_tl(dc);
4057 if (insn & (1 << 12)) {
4058 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4059 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
4060 } else {
4061 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4062 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4063 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
4066 gen_store_gpr(dc, rd, cpu_dst);
4067 } else if (xop == 0x27) { /* sra, V9 srax */
4068 cpu_src1 = get_src1(dc, insn);
4069 if (IS_IMM) { /* immediate */
4070 simm = GET_FIELDs(insn, 20, 31);
4071 if (insn & (1 << 12)) {
4072 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
4073 } else {
4074 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4075 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
4077 } else { /* register */
4078 rs2 = GET_FIELD(insn, 27, 31);
4079 cpu_src2 = gen_load_gpr(dc, rs2);
4080 cpu_tmp0 = get_temp_tl(dc);
4081 if (insn & (1 << 12)) {
4082 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4083 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
4084 } else {
4085 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4086 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4087 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
4090 gen_store_gpr(dc, rd, cpu_dst);
4091 #endif
4092 } else if (xop < 0x36) {
4093 if (xop < 0x20) {
4094 cpu_src1 = get_src1(dc, insn);
4095 cpu_src2 = get_src2(dc, insn);
4096 switch (xop & ~0x10) {
4097 case 0x0: /* add */
4098 if (xop & 0x10) {
4099 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4100 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4101 dc->cc_op = CC_OP_ADD;
4102 } else {
4103 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4105 break;
4106 case 0x1: /* and */
4107 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
4108 if (xop & 0x10) {
4109 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4110 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4111 dc->cc_op = CC_OP_LOGIC;
4113 break;
4114 case 0x2: /* or */
4115 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
4116 if (xop & 0x10) {
4117 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4118 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4119 dc->cc_op = CC_OP_LOGIC;
4121 break;
4122 case 0x3: /* xor */
4123 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
4124 if (xop & 0x10) {
4125 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4126 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4127 dc->cc_op = CC_OP_LOGIC;
4129 break;
4130 case 0x4: /* sub */
4131 if (xop & 0x10) {
4132 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4133 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
4134 dc->cc_op = CC_OP_SUB;
4135 } else {
4136 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
4138 break;
4139 case 0x5: /* andn */
4140 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
4141 if (xop & 0x10) {
4142 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4143 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4144 dc->cc_op = CC_OP_LOGIC;
4146 break;
4147 case 0x6: /* orn */
4148 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
4149 if (xop & 0x10) {
4150 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4151 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4152 dc->cc_op = CC_OP_LOGIC;
4154 break;
4155 case 0x7: /* xorn */
4156 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
4157 if (xop & 0x10) {
4158 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4159 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4160 dc->cc_op = CC_OP_LOGIC;
4162 break;
4163 case 0x8: /* addx, V9 addc */
4164 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4165 (xop & 0x10));
4166 break;
4167 #ifdef TARGET_SPARC64
4168 case 0x9: /* V9 mulx */
4169 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4170 break;
4171 #endif
4172 case 0xa: /* umul */
4173 CHECK_IU_FEATURE(dc, MUL);
4174 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
4175 if (xop & 0x10) {
4176 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4177 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4178 dc->cc_op = CC_OP_LOGIC;
4180 break;
4181 case 0xb: /* smul */
4182 CHECK_IU_FEATURE(dc, MUL);
4183 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
4184 if (xop & 0x10) {
4185 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4186 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4187 dc->cc_op = CC_OP_LOGIC;
4189 break;
4190 case 0xc: /* subx, V9 subc */
4191 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4192 (xop & 0x10));
4193 break;
4194 #ifdef TARGET_SPARC64
4195 case 0xd: /* V9 udivx */
4196 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4197 break;
4198 #endif
4199 case 0xe: /* udiv */
4200 CHECK_IU_FEATURE(dc, DIV);
4201 if (xop & 0x10) {
4202 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4203 cpu_src2);
4204 dc->cc_op = CC_OP_DIV;
4205 } else {
4206 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4207 cpu_src2);
4209 break;
4210 case 0xf: /* sdiv */
4211 CHECK_IU_FEATURE(dc, DIV);
4212 if (xop & 0x10) {
4213 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4214 cpu_src2);
4215 dc->cc_op = CC_OP_DIV;
4216 } else {
4217 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4218 cpu_src2);
4220 break;
4221 default:
4222 goto illegal_insn;
4224 gen_store_gpr(dc, rd, cpu_dst);
4225 } else {
4226 cpu_src1 = get_src1(dc, insn);
4227 cpu_src2 = get_src2(dc, insn);
4228 switch (xop) {
4229 case 0x20: /* taddcc */
4230 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4231 gen_store_gpr(dc, rd, cpu_dst);
4232 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4233 dc->cc_op = CC_OP_TADD;
4234 break;
4235 case 0x21: /* tsubcc */
4236 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4237 gen_store_gpr(dc, rd, cpu_dst);
4238 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4239 dc->cc_op = CC_OP_TSUB;
4240 break;
4241 case 0x22: /* taddcctv */
4242 gen_helper_taddcctv(cpu_dst, cpu_env,
4243 cpu_src1, cpu_src2);
4244 gen_store_gpr(dc, rd, cpu_dst);
4245 dc->cc_op = CC_OP_TADDTV;
4246 break;
4247 case 0x23: /* tsubcctv */
4248 gen_helper_tsubcctv(cpu_dst, cpu_env,
4249 cpu_src1, cpu_src2);
4250 gen_store_gpr(dc, rd, cpu_dst);
4251 dc->cc_op = CC_OP_TSUBTV;
4252 break;
4253 case 0x24: /* mulscc */
4254 update_psr(dc);
4255 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4256 gen_store_gpr(dc, rd, cpu_dst);
4257 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4258 dc->cc_op = CC_OP_ADD;
4259 break;
4260 #ifndef TARGET_SPARC64
4261 case 0x25: /* sll */
4262 if (IS_IMM) { /* immediate */
4263 simm = GET_FIELDs(insn, 20, 31);
4264 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4265 } else { /* register */
4266 cpu_tmp0 = get_temp_tl(dc);
4267 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4268 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4270 gen_store_gpr(dc, rd, cpu_dst);
4271 break;
4272 case 0x26: /* srl */
4273 if (IS_IMM) { /* immediate */
4274 simm = GET_FIELDs(insn, 20, 31);
4275 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4276 } else { /* register */
4277 cpu_tmp0 = get_temp_tl(dc);
4278 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4279 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4281 gen_store_gpr(dc, rd, cpu_dst);
4282 break;
4283 case 0x27: /* sra */
4284 if (IS_IMM) { /* immediate */
4285 simm = GET_FIELDs(insn, 20, 31);
4286 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4287 } else { /* register */
4288 cpu_tmp0 = get_temp_tl(dc);
4289 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4290 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4292 gen_store_gpr(dc, rd, cpu_dst);
4293 break;
4294 #endif
4295 case 0x30:
4297 cpu_tmp0 = get_temp_tl(dc);
4298 switch(rd) {
4299 case 0: /* wry */
4300 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4301 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4302 break;
4303 #ifndef TARGET_SPARC64
4304 case 0x01 ... 0x0f: /* undefined in the
4305 SPARCv8 manual, nop
4306 on the microSPARC
4307 II */
4308 case 0x10 ... 0x1f: /* implementation-dependent
4309 in the SPARCv8
4310 manual, nop on the
4311 microSPARC II */
4312 if ((rd == 0x13) && (dc->def->features &
4313 CPU_FEATURE_POWERDOWN)) {
4314 /* LEON3 power-down */
4315 save_state(dc);
4316 gen_helper_power_down(cpu_env);
4318 break;
4319 #else
4320 case 0x2: /* V9 wrccr */
4321 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4322 gen_helper_wrccr(cpu_env, cpu_tmp0);
4323 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4324 dc->cc_op = CC_OP_FLAGS;
4325 break;
4326 case 0x3: /* V9 wrasi */
4327 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4328 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4329 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4330 offsetof(CPUSPARCState, asi));
4331 /* End TB to notice changed ASI. */
4332 save_state(dc);
4333 gen_op_next_insn();
4334 tcg_gen_exit_tb(NULL, 0);
4335 dc->base.is_jmp = DISAS_NORETURN;
4336 break;
4337 case 0x6: /* V9 wrfprs */
4338 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4339 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4340 dc->fprs_dirty = 0;
4341 save_state(dc);
4342 gen_op_next_insn();
4343 tcg_gen_exit_tb(NULL, 0);
4344 dc->base.is_jmp = DISAS_NORETURN;
4345 break;
4346 case 0xf: /* V9 sir, nop if user */
4347 #if !defined(CONFIG_USER_ONLY)
4348 if (supervisor(dc)) {
4349 ; // XXX
4351 #endif
4352 break;
4353 case 0x13: /* Graphics Status */
4354 if (gen_trap_ifnofpu(dc)) {
4355 goto jmp_insn;
4357 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4358 break;
4359 case 0x14: /* Softint set */
4360 if (!supervisor(dc))
4361 goto illegal_insn;
4362 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4363 gen_helper_set_softint(cpu_env, cpu_tmp0);
4364 break;
4365 case 0x15: /* Softint clear */
4366 if (!supervisor(dc))
4367 goto illegal_insn;
4368 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4369 gen_helper_clear_softint(cpu_env, cpu_tmp0);
4370 break;
4371 case 0x16: /* Softint write */
4372 if (!supervisor(dc))
4373 goto illegal_insn;
4374 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4375 gen_helper_write_softint(cpu_env, cpu_tmp0);
4376 break;
4377 case 0x17: /* Tick compare */
4378 #if !defined(CONFIG_USER_ONLY)
4379 if (!supervisor(dc))
4380 goto illegal_insn;
4381 #endif
4383 TCGv_ptr r_tickptr;
4385 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4386 cpu_src2);
4387 r_tickptr = tcg_temp_new_ptr();
4388 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4389 offsetof(CPUSPARCState, tick));
4390 if (tb_cflags(dc->base.tb) &
4391 CF_USE_ICOUNT) {
4392 gen_io_start();
4394 gen_helper_tick_set_limit(r_tickptr,
4395 cpu_tick_cmpr);
4396 tcg_temp_free_ptr(r_tickptr);
4397 /* End TB to handle timer interrupt */
4398 dc->base.is_jmp = DISAS_EXIT;
4400 break;
4401 case 0x18: /* System tick */
4402 #if !defined(CONFIG_USER_ONLY)
4403 if (!supervisor(dc))
4404 goto illegal_insn;
4405 #endif
4407 TCGv_ptr r_tickptr;
4409 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4410 cpu_src2);
4411 r_tickptr = tcg_temp_new_ptr();
4412 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4413 offsetof(CPUSPARCState, stick));
4414 if (tb_cflags(dc->base.tb) &
4415 CF_USE_ICOUNT) {
4416 gen_io_start();
4418 gen_helper_tick_set_count(r_tickptr,
4419 cpu_tmp0);
4420 tcg_temp_free_ptr(r_tickptr);
4421 /* End TB to handle timer interrupt */
4422 dc->base.is_jmp = DISAS_EXIT;
4424 break;
4425 case 0x19: /* System tick compare */
4426 #if !defined(CONFIG_USER_ONLY)
4427 if (!supervisor(dc))
4428 goto illegal_insn;
4429 #endif
4431 TCGv_ptr r_tickptr;
4433 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4434 cpu_src2);
4435 r_tickptr = tcg_temp_new_ptr();
4436 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4437 offsetof(CPUSPARCState, stick));
4438 if (tb_cflags(dc->base.tb) &
4439 CF_USE_ICOUNT) {
4440 gen_io_start();
4442 gen_helper_tick_set_limit(r_tickptr,
4443 cpu_stick_cmpr);
4444 tcg_temp_free_ptr(r_tickptr);
4445 /* End TB to handle timer interrupt */
4446 dc->base.is_jmp = DISAS_EXIT;
4448 break;
4450 case 0x10: /* Performance Control */
4451 case 0x11: /* Performance Instrumentation
4452 Counter */
4453 case 0x12: /* Dispatch Control */
4454 #endif
4455 default:
4456 goto illegal_insn;
4459 break;
4460 #if !defined(CONFIG_USER_ONLY)
4461 case 0x31: /* wrpsr, V9 saved, restored */
4463 if (!supervisor(dc))
4464 goto priv_insn;
4465 #ifdef TARGET_SPARC64
4466 switch (rd) {
4467 case 0:
4468 gen_helper_saved(cpu_env);
4469 break;
4470 case 1:
4471 gen_helper_restored(cpu_env);
4472 break;
4473 case 2: /* UA2005 allclean */
4474 case 3: /* UA2005 otherw */
4475 case 4: /* UA2005 normalw */
4476 case 5: /* UA2005 invalw */
4477 // XXX
4478 default:
4479 goto illegal_insn;
4481 #else
4482 cpu_tmp0 = get_temp_tl(dc);
4483 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4484 gen_helper_wrpsr(cpu_env, cpu_tmp0);
4485 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4486 dc->cc_op = CC_OP_FLAGS;
4487 save_state(dc);
4488 gen_op_next_insn();
4489 tcg_gen_exit_tb(NULL, 0);
4490 dc->base.is_jmp = DISAS_NORETURN;
4491 #endif
4493 break;
4494 case 0x32: /* wrwim, V9 wrpr */
4496 if (!supervisor(dc))
4497 goto priv_insn;
4498 cpu_tmp0 = get_temp_tl(dc);
4499 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4500 #ifdef TARGET_SPARC64
4501 switch (rd) {
4502 case 0: // tpc
4504 TCGv_ptr r_tsptr;
4506 r_tsptr = tcg_temp_new_ptr();
4507 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4508 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4509 offsetof(trap_state, tpc));
4510 tcg_temp_free_ptr(r_tsptr);
4512 break;
4513 case 1: // tnpc
4515 TCGv_ptr r_tsptr;
4517 r_tsptr = tcg_temp_new_ptr();
4518 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4519 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4520 offsetof(trap_state, tnpc));
4521 tcg_temp_free_ptr(r_tsptr);
4523 break;
4524 case 2: // tstate
4526 TCGv_ptr r_tsptr;
4528 r_tsptr = tcg_temp_new_ptr();
4529 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4530 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4531 offsetof(trap_state,
4532 tstate));
4533 tcg_temp_free_ptr(r_tsptr);
4535 break;
4536 case 3: // tt
4538 TCGv_ptr r_tsptr;
4540 r_tsptr = tcg_temp_new_ptr();
4541 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4542 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4543 offsetof(trap_state, tt));
4544 tcg_temp_free_ptr(r_tsptr);
4546 break;
4547 case 4: // tick
4549 TCGv_ptr r_tickptr;
4551 r_tickptr = tcg_temp_new_ptr();
4552 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4553 offsetof(CPUSPARCState, tick));
4554 if (tb_cflags(dc->base.tb) &
4555 CF_USE_ICOUNT) {
4556 gen_io_start();
4558 gen_helper_tick_set_count(r_tickptr,
4559 cpu_tmp0);
4560 tcg_temp_free_ptr(r_tickptr);
4561 /* End TB to handle timer interrupt */
4562 dc->base.is_jmp = DISAS_EXIT;
4564 break;
4565 case 5: // tba
4566 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4567 break;
4568 case 6: // pstate
4569 save_state(dc);
4570 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
4571 gen_io_start();
4573 gen_helper_wrpstate(cpu_env, cpu_tmp0);
4574 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
4575 /* I/O ops in icount mode must end the TB */
4576 dc->base.is_jmp = DISAS_EXIT;
4578 dc->npc = DYNAMIC_PC;
4579 break;
4580 case 7: // tl
4581 save_state(dc);
4582 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4583 offsetof(CPUSPARCState, tl));
4584 dc->npc = DYNAMIC_PC;
4585 break;
4586 case 8: // pil
4587 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
4588 gen_io_start();
4590 gen_helper_wrpil(cpu_env, cpu_tmp0);
4591 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
4592 /* I/O ops in icount mode must end the TB */
4593 dc->base.is_jmp = DISAS_EXIT;
4595 break;
4596 case 9: // cwp
4597 gen_helper_wrcwp(cpu_env, cpu_tmp0);
4598 break;
4599 case 10: // cansave
4600 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4601 offsetof(CPUSPARCState,
4602 cansave));
4603 break;
4604 case 11: // canrestore
4605 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4606 offsetof(CPUSPARCState,
4607 canrestore));
4608 break;
4609 case 12: // cleanwin
4610 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4611 offsetof(CPUSPARCState,
4612 cleanwin));
4613 break;
4614 case 13: // otherwin
4615 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4616 offsetof(CPUSPARCState,
4617 otherwin));
4618 break;
4619 case 14: // wstate
4620 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4621 offsetof(CPUSPARCState,
4622 wstate));
4623 break;
4624 case 16: // UA2005 gl
4625 CHECK_IU_FEATURE(dc, GL);
4626 gen_helper_wrgl(cpu_env, cpu_tmp0);
4627 break;
4628 case 26: // UA2005 strand status
4629 CHECK_IU_FEATURE(dc, HYPV);
4630 if (!hypervisor(dc))
4631 goto priv_insn;
4632 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4633 break;
4634 default:
4635 goto illegal_insn;
4637 #else
4638 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4639 if (dc->def->nwindows != 32) {
4640 tcg_gen_andi_tl(cpu_wim, cpu_wim,
4641 (1 << dc->def->nwindows) - 1);
4643 #endif
4645 break;
4646 case 0x33: /* wrtbr, UA2005 wrhpr */
4648 #ifndef TARGET_SPARC64
4649 if (!supervisor(dc))
4650 goto priv_insn;
4651 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4652 #else
4653 CHECK_IU_FEATURE(dc, HYPV);
4654 if (!hypervisor(dc))
4655 goto priv_insn;
4656 cpu_tmp0 = get_temp_tl(dc);
4657 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4658 switch (rd) {
4659 case 0: // hpstate
4660 tcg_gen_st_i64(cpu_tmp0, cpu_env,
4661 offsetof(CPUSPARCState,
4662 hpstate));
4663 save_state(dc);
4664 gen_op_next_insn();
4665 tcg_gen_exit_tb(NULL, 0);
4666 dc->base.is_jmp = DISAS_NORETURN;
4667 break;
4668 case 1: // htstate
4669 // XXX gen_op_wrhtstate();
4670 break;
4671 case 3: // hintp
4672 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4673 break;
4674 case 5: // htba
4675 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4676 break;
4677 case 31: // hstick_cmpr
4679 TCGv_ptr r_tickptr;
4681 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4682 r_tickptr = tcg_temp_new_ptr();
4683 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4684 offsetof(CPUSPARCState, hstick));
4685 if (tb_cflags(dc->base.tb) &
4686 CF_USE_ICOUNT) {
4687 gen_io_start();
4689 gen_helper_tick_set_limit(r_tickptr,
4690 cpu_hstick_cmpr);
4691 tcg_temp_free_ptr(r_tickptr);
4692 /* End TB to handle timer interrupt */
4693 dc->base.is_jmp = DISAS_EXIT;
4695 break;
4696 case 6: // hver readonly
4697 default:
4698 goto illegal_insn;
4700 #endif
4702 break;
4703 #endif
4704 #ifdef TARGET_SPARC64
4705 case 0x2c: /* V9 movcc */
4707 int cc = GET_FIELD_SP(insn, 11, 12);
4708 int cond = GET_FIELD_SP(insn, 14, 17);
4709 DisasCompare cmp;
4710 TCGv dst;
4712 if (insn & (1 << 18)) {
4713 if (cc == 0) {
4714 gen_compare(&cmp, 0, cond, dc);
4715 } else if (cc == 2) {
4716 gen_compare(&cmp, 1, cond, dc);
4717 } else {
4718 goto illegal_insn;
4720 } else {
4721 gen_fcompare(&cmp, cc, cond);
4724 /* The get_src2 above loaded the normal 13-bit
4725 immediate field, not the 11-bit field we have
4726 in movcc. But it did handle the reg case. */
4727 if (IS_IMM) {
4728 simm = GET_FIELD_SPs(insn, 0, 10);
4729 tcg_gen_movi_tl(cpu_src2, simm);
4732 dst = gen_load_gpr(dc, rd);
4733 tcg_gen_movcond_tl(cmp.cond, dst,
4734 cmp.c1, cmp.c2,
4735 cpu_src2, dst);
4736 free_compare(&cmp);
4737 gen_store_gpr(dc, rd, dst);
4738 break;
4740 case 0x2d: /* V9 sdivx */
4741 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4742 gen_store_gpr(dc, rd, cpu_dst);
4743 break;
4744 case 0x2e: /* V9 popc */
4745 tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
4746 gen_store_gpr(dc, rd, cpu_dst);
4747 break;
4748 case 0x2f: /* V9 movr */
4750 int cond = GET_FIELD_SP(insn, 10, 12);
4751 DisasCompare cmp;
4752 TCGv dst;
4754 gen_compare_reg(&cmp, cond, cpu_src1);
4756 /* The get_src2 above loaded the normal 13-bit
4757 immediate field, not the 10-bit field we have
4758 in movr. But it did handle the reg case. */
4759 if (IS_IMM) {
4760 simm = GET_FIELD_SPs(insn, 0, 9);
4761 tcg_gen_movi_tl(cpu_src2, simm);
4764 dst = gen_load_gpr(dc, rd);
4765 tcg_gen_movcond_tl(cmp.cond, dst,
4766 cmp.c1, cmp.c2,
4767 cpu_src2, dst);
4768 free_compare(&cmp);
4769 gen_store_gpr(dc, rd, dst);
4770 break;
4772 #endif
4773 default:
4774 goto illegal_insn;
4777 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4778 #ifdef TARGET_SPARC64
4779 int opf = GET_FIELD_SP(insn, 5, 13);
4780 rs1 = GET_FIELD(insn, 13, 17);
4781 rs2 = GET_FIELD(insn, 27, 31);
4782 if (gen_trap_ifnofpu(dc)) {
4783 goto jmp_insn;
4786 switch (opf) {
4787 case 0x000: /* VIS I edge8cc */
4788 CHECK_FPU_FEATURE(dc, VIS1);
4789 cpu_src1 = gen_load_gpr(dc, rs1);
4790 cpu_src2 = gen_load_gpr(dc, rs2);
4791 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4792 gen_store_gpr(dc, rd, cpu_dst);
4793 break;
4794 case 0x001: /* VIS II edge8n */
4795 CHECK_FPU_FEATURE(dc, VIS2);
4796 cpu_src1 = gen_load_gpr(dc, rs1);
4797 cpu_src2 = gen_load_gpr(dc, rs2);
4798 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4799 gen_store_gpr(dc, rd, cpu_dst);
4800 break;
4801 case 0x002: /* VIS I edge8lcc */
4802 CHECK_FPU_FEATURE(dc, VIS1);
4803 cpu_src1 = gen_load_gpr(dc, rs1);
4804 cpu_src2 = gen_load_gpr(dc, rs2);
4805 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4806 gen_store_gpr(dc, rd, cpu_dst);
4807 break;
4808 case 0x003: /* VIS II edge8ln */
4809 CHECK_FPU_FEATURE(dc, VIS2);
4810 cpu_src1 = gen_load_gpr(dc, rs1);
4811 cpu_src2 = gen_load_gpr(dc, rs2);
4812 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4813 gen_store_gpr(dc, rd, cpu_dst);
4814 break;
4815 case 0x004: /* VIS I edge16cc */
4816 CHECK_FPU_FEATURE(dc, VIS1);
4817 cpu_src1 = gen_load_gpr(dc, rs1);
4818 cpu_src2 = gen_load_gpr(dc, rs2);
4819 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4820 gen_store_gpr(dc, rd, cpu_dst);
4821 break;
4822 case 0x005: /* VIS II edge16n */
4823 CHECK_FPU_FEATURE(dc, VIS2);
4824 cpu_src1 = gen_load_gpr(dc, rs1);
4825 cpu_src2 = gen_load_gpr(dc, rs2);
4826 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4827 gen_store_gpr(dc, rd, cpu_dst);
4828 break;
4829 case 0x006: /* VIS I edge16lcc */
4830 CHECK_FPU_FEATURE(dc, VIS1);
4831 cpu_src1 = gen_load_gpr(dc, rs1);
4832 cpu_src2 = gen_load_gpr(dc, rs2);
4833 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4834 gen_store_gpr(dc, rd, cpu_dst);
4835 break;
4836 case 0x007: /* VIS II edge16ln */
4837 CHECK_FPU_FEATURE(dc, VIS2);
4838 cpu_src1 = gen_load_gpr(dc, rs1);
4839 cpu_src2 = gen_load_gpr(dc, rs2);
4840 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4841 gen_store_gpr(dc, rd, cpu_dst);
4842 break;
4843 case 0x008: /* VIS I edge32cc */
4844 CHECK_FPU_FEATURE(dc, VIS1);
4845 cpu_src1 = gen_load_gpr(dc, rs1);
4846 cpu_src2 = gen_load_gpr(dc, rs2);
4847 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4848 gen_store_gpr(dc, rd, cpu_dst);
4849 break;
4850 case 0x009: /* VIS II edge32n */
4851 CHECK_FPU_FEATURE(dc, VIS2);
4852 cpu_src1 = gen_load_gpr(dc, rs1);
4853 cpu_src2 = gen_load_gpr(dc, rs2);
4854 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4855 gen_store_gpr(dc, rd, cpu_dst);
4856 break;
4857 case 0x00a: /* VIS I edge32lcc */
4858 CHECK_FPU_FEATURE(dc, VIS1);
4859 cpu_src1 = gen_load_gpr(dc, rs1);
4860 cpu_src2 = gen_load_gpr(dc, rs2);
4861 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4862 gen_store_gpr(dc, rd, cpu_dst);
4863 break;
4864 case 0x00b: /* VIS II edge32ln */
4865 CHECK_FPU_FEATURE(dc, VIS2);
4866 cpu_src1 = gen_load_gpr(dc, rs1);
4867 cpu_src2 = gen_load_gpr(dc, rs2);
4868 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4869 gen_store_gpr(dc, rd, cpu_dst);
4870 break;
4871 case 0x010: /* VIS I array8 */
4872 CHECK_FPU_FEATURE(dc, VIS1);
4873 cpu_src1 = gen_load_gpr(dc, rs1);
4874 cpu_src2 = gen_load_gpr(dc, rs2);
4875 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4876 gen_store_gpr(dc, rd, cpu_dst);
4877 break;
4878 case 0x012: /* VIS I array16 */
4879 CHECK_FPU_FEATURE(dc, VIS1);
4880 cpu_src1 = gen_load_gpr(dc, rs1);
4881 cpu_src2 = gen_load_gpr(dc, rs2);
4882 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4883 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4884 gen_store_gpr(dc, rd, cpu_dst);
4885 break;
4886 case 0x014: /* VIS I array32 */
4887 CHECK_FPU_FEATURE(dc, VIS1);
4888 cpu_src1 = gen_load_gpr(dc, rs1);
4889 cpu_src2 = gen_load_gpr(dc, rs2);
4890 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4891 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4892 gen_store_gpr(dc, rd, cpu_dst);
4893 break;
4894 case 0x018: /* VIS I alignaddr */
4895 CHECK_FPU_FEATURE(dc, VIS1);
4896 cpu_src1 = gen_load_gpr(dc, rs1);
4897 cpu_src2 = gen_load_gpr(dc, rs2);
4898 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4899 gen_store_gpr(dc, rd, cpu_dst);
4900 break;
4901 case 0x01a: /* VIS I alignaddrl */
4902 CHECK_FPU_FEATURE(dc, VIS1);
4903 cpu_src1 = gen_load_gpr(dc, rs1);
4904 cpu_src2 = gen_load_gpr(dc, rs2);
4905 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4906 gen_store_gpr(dc, rd, cpu_dst);
4907 break;
4908 case 0x019: /* VIS II bmask */
4909 CHECK_FPU_FEATURE(dc, VIS2);
4910 cpu_src1 = gen_load_gpr(dc, rs1);
4911 cpu_src2 = gen_load_gpr(dc, rs2);
4912 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4913 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4914 gen_store_gpr(dc, rd, cpu_dst);
4915 break;
4916 case 0x020: /* VIS I fcmple16 */
4917 CHECK_FPU_FEATURE(dc, VIS1);
4918 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4919 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4920 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4921 gen_store_gpr(dc, rd, cpu_dst);
4922 break;
4923 case 0x022: /* VIS I fcmpne16 */
4924 CHECK_FPU_FEATURE(dc, VIS1);
4925 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4926 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4927 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4928 gen_store_gpr(dc, rd, cpu_dst);
4929 break;
4930 case 0x024: /* VIS I fcmple32 */
4931 CHECK_FPU_FEATURE(dc, VIS1);
4932 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4933 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4934 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4935 gen_store_gpr(dc, rd, cpu_dst);
4936 break;
4937 case 0x026: /* VIS I fcmpne32 */
4938 CHECK_FPU_FEATURE(dc, VIS1);
4939 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4940 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4941 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4942 gen_store_gpr(dc, rd, cpu_dst);
4943 break;
4944 case 0x028: /* VIS I fcmpgt16 */
4945 CHECK_FPU_FEATURE(dc, VIS1);
4946 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4947 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4948 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4949 gen_store_gpr(dc, rd, cpu_dst);
4950 break;
4951 case 0x02a: /* VIS I fcmpeq16 */
4952 CHECK_FPU_FEATURE(dc, VIS1);
4953 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4954 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4955 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4956 gen_store_gpr(dc, rd, cpu_dst);
4957 break;
4958 case 0x02c: /* VIS I fcmpgt32 */
4959 CHECK_FPU_FEATURE(dc, VIS1);
4960 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4961 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4962 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4963 gen_store_gpr(dc, rd, cpu_dst);
4964 break;
4965 case 0x02e: /* VIS I fcmpeq32 */
4966 CHECK_FPU_FEATURE(dc, VIS1);
4967 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4968 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4969 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4970 gen_store_gpr(dc, rd, cpu_dst);
4971 break;
4972 case 0x031: /* VIS I fmul8x16 */
4973 CHECK_FPU_FEATURE(dc, VIS1);
4974 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4975 break;
4976 case 0x033: /* VIS I fmul8x16au */
4977 CHECK_FPU_FEATURE(dc, VIS1);
4978 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4979 break;
4980 case 0x035: /* VIS I fmul8x16al */
4981 CHECK_FPU_FEATURE(dc, VIS1);
4982 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4983 break;
4984 case 0x036: /* VIS I fmul8sux16 */
4985 CHECK_FPU_FEATURE(dc, VIS1);
4986 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4987 break;
4988 case 0x037: /* VIS I fmul8ulx16 */
4989 CHECK_FPU_FEATURE(dc, VIS1);
4990 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4991 break;
4992 case 0x038: /* VIS I fmuld8sux16 */
4993 CHECK_FPU_FEATURE(dc, VIS1);
4994 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4995 break;
4996 case 0x039: /* VIS I fmuld8ulx16 */
4997 CHECK_FPU_FEATURE(dc, VIS1);
4998 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4999 break;
5000 case 0x03a: /* VIS I fpack32 */
5001 CHECK_FPU_FEATURE(dc, VIS1);
5002 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
5003 break;
5004 case 0x03b: /* VIS I fpack16 */
5005 CHECK_FPU_FEATURE(dc, VIS1);
5006 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5007 cpu_dst_32 = gen_dest_fpr_F(dc);
5008 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
5009 gen_store_fpr_F(dc, rd, cpu_dst_32);
5010 break;
5011 case 0x03d: /* VIS I fpackfix */
5012 CHECK_FPU_FEATURE(dc, VIS1);
5013 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5014 cpu_dst_32 = gen_dest_fpr_F(dc);
5015 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
5016 gen_store_fpr_F(dc, rd, cpu_dst_32);
5017 break;
5018 case 0x03e: /* VIS I pdist */
5019 CHECK_FPU_FEATURE(dc, VIS1);
5020 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
5021 break;
5022 case 0x048: /* VIS I faligndata */
5023 CHECK_FPU_FEATURE(dc, VIS1);
5024 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
5025 break;
5026 case 0x04b: /* VIS I fpmerge */
5027 CHECK_FPU_FEATURE(dc, VIS1);
5028 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
5029 break;
5030 case 0x04c: /* VIS II bshuffle */
5031 CHECK_FPU_FEATURE(dc, VIS2);
5032 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
5033 break;
5034 case 0x04d: /* VIS I fexpand */
5035 CHECK_FPU_FEATURE(dc, VIS1);
5036 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
5037 break;
5038 case 0x050: /* VIS I fpadd16 */
5039 CHECK_FPU_FEATURE(dc, VIS1);
5040 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
5041 break;
5042 case 0x051: /* VIS I fpadd16s */
5043 CHECK_FPU_FEATURE(dc, VIS1);
5044 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
5045 break;
5046 case 0x052: /* VIS I fpadd32 */
5047 CHECK_FPU_FEATURE(dc, VIS1);
5048 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
5049 break;
5050 case 0x053: /* VIS I fpadd32s */
5051 CHECK_FPU_FEATURE(dc, VIS1);
5052 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
5053 break;
5054 case 0x054: /* VIS I fpsub16 */
5055 CHECK_FPU_FEATURE(dc, VIS1);
5056 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
5057 break;
5058 case 0x055: /* VIS I fpsub16s */
5059 CHECK_FPU_FEATURE(dc, VIS1);
5060 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
5061 break;
5062 case 0x056: /* VIS I fpsub32 */
5063 CHECK_FPU_FEATURE(dc, VIS1);
5064 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
5065 break;
5066 case 0x057: /* VIS I fpsub32s */
5067 CHECK_FPU_FEATURE(dc, VIS1);
5068 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
5069 break;
5070 case 0x060: /* VIS I fzero */
5071 CHECK_FPU_FEATURE(dc, VIS1);
5072 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5073 tcg_gen_movi_i64(cpu_dst_64, 0);
5074 gen_store_fpr_D(dc, rd, cpu_dst_64);
5075 break;
5076 case 0x061: /* VIS I fzeros */
5077 CHECK_FPU_FEATURE(dc, VIS1);
5078 cpu_dst_32 = gen_dest_fpr_F(dc);
5079 tcg_gen_movi_i32(cpu_dst_32, 0);
5080 gen_store_fpr_F(dc, rd, cpu_dst_32);
5081 break;
5082 case 0x062: /* VIS I fnor */
5083 CHECK_FPU_FEATURE(dc, VIS1);
5084 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
5085 break;
5086 case 0x063: /* VIS I fnors */
5087 CHECK_FPU_FEATURE(dc, VIS1);
5088 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
5089 break;
5090 case 0x064: /* VIS I fandnot2 */
5091 CHECK_FPU_FEATURE(dc, VIS1);
5092 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
5093 break;
5094 case 0x065: /* VIS I fandnot2s */
5095 CHECK_FPU_FEATURE(dc, VIS1);
5096 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5097 break;
5098 case 0x066: /* VIS I fnot2 */
5099 CHECK_FPU_FEATURE(dc, VIS1);
5100 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5101 break;
5102 case 0x067: /* VIS I fnot2s */
5103 CHECK_FPU_FEATURE(dc, VIS1);
5104 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5105 break;
5106 case 0x068: /* VIS I fandnot1 */
5107 CHECK_FPU_FEATURE(dc, VIS1);
5108 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5109 break;
5110 case 0x069: /* VIS I fandnot1s */
5111 CHECK_FPU_FEATURE(dc, VIS1);
5112 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5113 break;
5114 case 0x06a: /* VIS I fnot1 */
5115 CHECK_FPU_FEATURE(dc, VIS1);
5116 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5117 break;
5118 case 0x06b: /* VIS I fnot1s */
5119 CHECK_FPU_FEATURE(dc, VIS1);
5120 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5121 break;
5122 case 0x06c: /* VIS I fxor */
5123 CHECK_FPU_FEATURE(dc, VIS1);
5124 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5125 break;
5126 case 0x06d: /* VIS I fxors */
5127 CHECK_FPU_FEATURE(dc, VIS1);
5128 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5129 break;
5130 case 0x06e: /* VIS I fnand */
5131 CHECK_FPU_FEATURE(dc, VIS1);
5132 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5133 break;
5134 case 0x06f: /* VIS I fnands */
5135 CHECK_FPU_FEATURE(dc, VIS1);
5136 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5137 break;
5138 case 0x070: /* VIS I fand */
5139 CHECK_FPU_FEATURE(dc, VIS1);
5140 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5141 break;
5142 case 0x071: /* VIS I fands */
5143 CHECK_FPU_FEATURE(dc, VIS1);
5144 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5145 break;
5146 case 0x072: /* VIS I fxnor */
5147 CHECK_FPU_FEATURE(dc, VIS1);
5148 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5149 break;
5150 case 0x073: /* VIS I fxnors */
5151 CHECK_FPU_FEATURE(dc, VIS1);
5152 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5153 break;
5154 case 0x074: /* VIS I fsrc1 */
5155 CHECK_FPU_FEATURE(dc, VIS1);
5156 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5157 gen_store_fpr_D(dc, rd, cpu_src1_64);
5158 break;
5159 case 0x075: /* VIS I fsrc1s */
5160 CHECK_FPU_FEATURE(dc, VIS1);
5161 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5162 gen_store_fpr_F(dc, rd, cpu_src1_32);
5163 break;
5164 case 0x076: /* VIS I fornot2 */
5165 CHECK_FPU_FEATURE(dc, VIS1);
5166 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5167 break;
5168 case 0x077: /* VIS I fornot2s */
5169 CHECK_FPU_FEATURE(dc, VIS1);
5170 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5171 break;
5172 case 0x078: /* VIS I fsrc2 */
5173 CHECK_FPU_FEATURE(dc, VIS1);
5174 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5175 gen_store_fpr_D(dc, rd, cpu_src1_64);
5176 break;
5177 case 0x079: /* VIS I fsrc2s */
5178 CHECK_FPU_FEATURE(dc, VIS1);
5179 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5180 gen_store_fpr_F(dc, rd, cpu_src1_32);
5181 break;
5182 case 0x07a: /* VIS I fornot1 */
5183 CHECK_FPU_FEATURE(dc, VIS1);
5184 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5185 break;
5186 case 0x07b: /* VIS I fornot1s */
5187 CHECK_FPU_FEATURE(dc, VIS1);
5188 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5189 break;
5190 case 0x07c: /* VIS I for */
5191 CHECK_FPU_FEATURE(dc, VIS1);
5192 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5193 break;
5194 case 0x07d: /* VIS I fors */
5195 CHECK_FPU_FEATURE(dc, VIS1);
5196 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5197 break;
5198 case 0x07e: /* VIS I fone */
5199 CHECK_FPU_FEATURE(dc, VIS1);
5200 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5201 tcg_gen_movi_i64(cpu_dst_64, -1);
5202 gen_store_fpr_D(dc, rd, cpu_dst_64);
5203 break;
5204 case 0x07f: /* VIS I fones */
5205 CHECK_FPU_FEATURE(dc, VIS1);
5206 cpu_dst_32 = gen_dest_fpr_F(dc);
5207 tcg_gen_movi_i32(cpu_dst_32, -1);
5208 gen_store_fpr_F(dc, rd, cpu_dst_32);
5209 break;
5210 case 0x080: /* VIS I shutdown */
5211 case 0x081: /* VIS II siam */
5212 // XXX
5213 goto illegal_insn;
5214 default:
5215 goto illegal_insn;
5217 #else
5218 goto ncp_insn;
5219 #endif
5220 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5221 #ifdef TARGET_SPARC64
5222 goto illegal_insn;
5223 #else
5224 goto ncp_insn;
5225 #endif
5226 #ifdef TARGET_SPARC64
5227 } else if (xop == 0x39) { /* V9 return */
5228 save_state(dc);
5229 cpu_src1 = get_src1(dc, insn);
5230 cpu_tmp0 = get_temp_tl(dc);
5231 if (IS_IMM) { /* immediate */
5232 simm = GET_FIELDs(insn, 19, 31);
5233 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5234 } else { /* register */
5235 rs2 = GET_FIELD(insn, 27, 31);
5236 if (rs2) {
5237 cpu_src2 = gen_load_gpr(dc, rs2);
5238 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5239 } else {
5240 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5243 gen_helper_restore(cpu_env);
5244 gen_mov_pc_npc(dc);
5245 gen_check_align(cpu_tmp0, 3);
5246 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5247 dc->npc = DYNAMIC_PC;
5248 goto jmp_insn;
5249 #endif
5250 } else {
5251 cpu_src1 = get_src1(dc, insn);
5252 cpu_tmp0 = get_temp_tl(dc);
5253 if (IS_IMM) { /* immediate */
5254 simm = GET_FIELDs(insn, 19, 31);
5255 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5256 } else { /* register */
5257 rs2 = GET_FIELD(insn, 27, 31);
5258 if (rs2) {
5259 cpu_src2 = gen_load_gpr(dc, rs2);
5260 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5261 } else {
5262 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5265 switch (xop) {
5266 case 0x38: /* jmpl */
5268 TCGv t = gen_dest_gpr(dc, rd);
5269 tcg_gen_movi_tl(t, dc->pc);
5270 gen_store_gpr(dc, rd, t);
5272 gen_mov_pc_npc(dc);
5273 gen_check_align(cpu_tmp0, 3);
5274 gen_address_mask(dc, cpu_tmp0);
5275 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5276 dc->npc = DYNAMIC_PC;
5278 goto jmp_insn;
5279 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5280 case 0x39: /* rett, V9 return */
5282 if (!supervisor(dc))
5283 goto priv_insn;
5284 gen_mov_pc_npc(dc);
5285 gen_check_align(cpu_tmp0, 3);
5286 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5287 dc->npc = DYNAMIC_PC;
5288 gen_helper_rett(cpu_env);
5290 goto jmp_insn;
5291 #endif
5292 case 0x3b: /* flush */
5293 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5294 goto unimp_flush;
5295 /* nop */
5296 break;
5297 case 0x3c: /* save */
5298 gen_helper_save(cpu_env);
5299 gen_store_gpr(dc, rd, cpu_tmp0);
5300 break;
5301 case 0x3d: /* restore */
5302 gen_helper_restore(cpu_env);
5303 gen_store_gpr(dc, rd, cpu_tmp0);
5304 break;
5305 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5306 case 0x3e: /* V9 done/retry */
5308 switch (rd) {
5309 case 0:
5310 if (!supervisor(dc))
5311 goto priv_insn;
5312 dc->npc = DYNAMIC_PC;
5313 dc->pc = DYNAMIC_PC;
5314 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
5315 gen_io_start();
5317 gen_helper_done(cpu_env);
5318 goto jmp_insn;
5319 case 1:
5320 if (!supervisor(dc))
5321 goto priv_insn;
5322 dc->npc = DYNAMIC_PC;
5323 dc->pc = DYNAMIC_PC;
5324 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
5325 gen_io_start();
5327 gen_helper_retry(cpu_env);
5328 goto jmp_insn;
5329 default:
5330 goto illegal_insn;
5333 break;
5334 #endif
5335 default:
5336 goto illegal_insn;
5339 break;
5341 break;
5342 case 3: /* load/store instructions */
5344 unsigned int xop = GET_FIELD(insn, 7, 12);
5345 /* ??? gen_address_mask prevents us from using a source
5346 register directly. Always generate a temporary. */
5347 TCGv cpu_addr = get_temp_tl(dc);
5349 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5350 if (xop == 0x3c || xop == 0x3e) {
5351 /* V9 casa/casxa : no offset */
5352 } else if (IS_IMM) { /* immediate */
5353 simm = GET_FIELDs(insn, 19, 31);
5354 if (simm != 0) {
5355 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5357 } else { /* register */
5358 rs2 = GET_FIELD(insn, 27, 31);
5359 if (rs2 != 0) {
5360 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5363 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5364 (xop > 0x17 && xop <= 0x1d ) ||
5365 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5366 TCGv cpu_val = gen_dest_gpr(dc, rd);
5368 switch (xop) {
5369 case 0x0: /* ld, V9 lduw, load unsigned word */
5370 gen_address_mask(dc, cpu_addr);
5371 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
5372 break;
5373 case 0x1: /* ldub, load unsigned byte */
5374 gen_address_mask(dc, cpu_addr);
5375 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
5376 break;
5377 case 0x2: /* lduh, load unsigned halfword */
5378 gen_address_mask(dc, cpu_addr);
5379 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
5380 break;
5381 case 0x3: /* ldd, load double word */
5382 if (rd & 1)
5383 goto illegal_insn;
5384 else {
5385 TCGv_i64 t64;
5387 gen_address_mask(dc, cpu_addr);
5388 t64 = tcg_temp_new_i64();
5389 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5390 tcg_gen_trunc_i64_tl(cpu_val, t64);
5391 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5392 gen_store_gpr(dc, rd + 1, cpu_val);
5393 tcg_gen_shri_i64(t64, t64, 32);
5394 tcg_gen_trunc_i64_tl(cpu_val, t64);
5395 tcg_temp_free_i64(t64);
5396 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5398 break;
5399 case 0x9: /* ldsb, load signed byte */
5400 gen_address_mask(dc, cpu_addr);
5401 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
5402 break;
5403 case 0xa: /* ldsh, load signed halfword */
5404 gen_address_mask(dc, cpu_addr);
5405 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
5406 break;
5407 case 0xd: /* ldstub */
5408 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5409 break;
5410 case 0x0f:
5411 /* swap, swap register with memory. Also atomically */
5412 CHECK_IU_FEATURE(dc, SWAP);
5413 cpu_src1 = gen_load_gpr(dc, rd);
5414 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5415 dc->mem_idx, MO_TEUL);
5416 break;
5417 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5418 case 0x10: /* lda, V9 lduwa, load word alternate */
5419 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5420 break;
5421 case 0x11: /* lduba, load unsigned byte alternate */
5422 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5423 break;
5424 case 0x12: /* lduha, load unsigned halfword alternate */
5425 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5426 break;
5427 case 0x13: /* ldda, load double word alternate */
5428 if (rd & 1) {
5429 goto illegal_insn;
5431 gen_ldda_asi(dc, cpu_addr, insn, rd);
5432 goto skip_move;
5433 case 0x19: /* ldsba, load signed byte alternate */
5434 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5435 break;
5436 case 0x1a: /* ldsha, load signed halfword alternate */
5437 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5438 break;
5439 case 0x1d: /* ldstuba -- XXX: should be atomically */
5440 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5441 break;
5442 case 0x1f: /* swapa, swap reg with alt. memory. Also
5443 atomically */
5444 CHECK_IU_FEATURE(dc, SWAP);
5445 cpu_src1 = gen_load_gpr(dc, rd);
5446 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5447 break;
5449 #ifndef TARGET_SPARC64
5450 case 0x30: /* ldc */
5451 case 0x31: /* ldcsr */
5452 case 0x33: /* lddc */
5453 goto ncp_insn;
5454 #endif
5455 #endif
5456 #ifdef TARGET_SPARC64
5457 case 0x08: /* V9 ldsw */
5458 gen_address_mask(dc, cpu_addr);
5459 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
5460 break;
5461 case 0x0b: /* V9 ldx */
5462 gen_address_mask(dc, cpu_addr);
5463 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
5464 break;
5465 case 0x18: /* V9 ldswa */
5466 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5467 break;
5468 case 0x1b: /* V9 ldxa */
5469 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
5470 break;
5471 case 0x2d: /* V9 prefetch, no effect */
5472 goto skip_move;
5473 case 0x30: /* V9 ldfa */
5474 if (gen_trap_ifnofpu(dc)) {
5475 goto jmp_insn;
5477 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5478 gen_update_fprs_dirty(dc, rd);
5479 goto skip_move;
5480 case 0x33: /* V9 lddfa */
5481 if (gen_trap_ifnofpu(dc)) {
5482 goto jmp_insn;
5484 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5485 gen_update_fprs_dirty(dc, DFPREG(rd));
5486 goto skip_move;
5487 case 0x3d: /* V9 prefetcha, no effect */
5488 goto skip_move;
5489 case 0x32: /* V9 ldqfa */
5490 CHECK_FPU_FEATURE(dc, FLOAT128);
5491 if (gen_trap_ifnofpu(dc)) {
5492 goto jmp_insn;
5494 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5495 gen_update_fprs_dirty(dc, QFPREG(rd));
5496 goto skip_move;
5497 #endif
5498 default:
5499 goto illegal_insn;
5501 gen_store_gpr(dc, rd, cpu_val);
5502 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5503 skip_move: ;
5504 #endif
5505 } else if (xop >= 0x20 && xop < 0x24) {
5506 if (gen_trap_ifnofpu(dc)) {
5507 goto jmp_insn;
5509 switch (xop) {
5510 case 0x20: /* ldf, load fpreg */
5511 gen_address_mask(dc, cpu_addr);
5512 cpu_dst_32 = gen_dest_fpr_F(dc);
5513 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5514 dc->mem_idx, MO_TEUL);
5515 gen_store_fpr_F(dc, rd, cpu_dst_32);
5516 break;
5517 case 0x21: /* ldfsr, V9 ldxfsr */
5518 #ifdef TARGET_SPARC64
5519 gen_address_mask(dc, cpu_addr);
5520 if (rd == 1) {
5521 TCGv_i64 t64 = tcg_temp_new_i64();
5522 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5523 dc->mem_idx, MO_TEUQ);
5524 gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5525 tcg_temp_free_i64(t64);
5526 break;
5528 #endif
5529 cpu_dst_32 = get_temp_i32(dc);
5530 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5531 dc->mem_idx, MO_TEUL);
5532 gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5533 break;
5534 case 0x22: /* ldqf, load quad fpreg */
5535 CHECK_FPU_FEATURE(dc, FLOAT128);
5536 gen_address_mask(dc, cpu_addr);
5537 cpu_src1_64 = tcg_temp_new_i64();
5538 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5539 MO_TEUQ | MO_ALIGN_4);
5540 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5541 cpu_src2_64 = tcg_temp_new_i64();
5542 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5543 MO_TEUQ | MO_ALIGN_4);
5544 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5545 tcg_temp_free_i64(cpu_src1_64);
5546 tcg_temp_free_i64(cpu_src2_64);
5547 break;
5548 case 0x23: /* lddf, load double fpreg */
5549 gen_address_mask(dc, cpu_addr);
5550 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5551 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5552 MO_TEUQ | MO_ALIGN_4);
5553 gen_store_fpr_D(dc, rd, cpu_dst_64);
5554 break;
5555 default:
5556 goto illegal_insn;
5558 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5559 xop == 0xe || xop == 0x1e) {
5560 TCGv cpu_val = gen_load_gpr(dc, rd);
5562 switch (xop) {
5563 case 0x4: /* st, store word */
5564 gen_address_mask(dc, cpu_addr);
5565 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
5566 break;
5567 case 0x5: /* stb, store byte */
5568 gen_address_mask(dc, cpu_addr);
5569 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
5570 break;
5571 case 0x6: /* sth, store halfword */
5572 gen_address_mask(dc, cpu_addr);
5573 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
5574 break;
5575 case 0x7: /* std, store double word */
5576 if (rd & 1)
5577 goto illegal_insn;
5578 else {
5579 TCGv_i64 t64;
5580 TCGv lo;
5582 gen_address_mask(dc, cpu_addr);
5583 lo = gen_load_gpr(dc, rd + 1);
5584 t64 = tcg_temp_new_i64();
5585 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5586 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
5587 tcg_temp_free_i64(t64);
5589 break;
5590 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5591 case 0x14: /* sta, V9 stwa, store word alternate */
5592 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5593 break;
5594 case 0x15: /* stba, store byte alternate */
5595 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5596 break;
5597 case 0x16: /* stha, store halfword alternate */
5598 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5599 break;
5600 case 0x17: /* stda, store double word alternate */
5601 if (rd & 1) {
5602 goto illegal_insn;
5604 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5605 break;
5606 #endif
5607 #ifdef TARGET_SPARC64
5608 case 0x0e: /* V9 stx */
5609 gen_address_mask(dc, cpu_addr);
5610 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5611 break;
5612 case 0x1e: /* V9 stxa */
5613 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
5614 break;
5615 #endif
5616 default:
5617 goto illegal_insn;
5619 } else if (xop > 0x23 && xop < 0x28) {
5620 if (gen_trap_ifnofpu(dc)) {
5621 goto jmp_insn;
5623 switch (xop) {
5624 case 0x24: /* stf, store fpreg */
5625 gen_address_mask(dc, cpu_addr);
5626 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5627 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5628 dc->mem_idx, MO_TEUL);
5629 break;
5630 case 0x25: /* stfsr, V9 stxfsr */
5632 #ifdef TARGET_SPARC64
5633 gen_address_mask(dc, cpu_addr);
5634 if (rd == 1) {
5635 tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
5636 break;
5638 #endif
5639 tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
5641 break;
5642 case 0x26:
5643 #ifdef TARGET_SPARC64
5644 /* V9 stqf, store quad fpreg */
5645 CHECK_FPU_FEATURE(dc, FLOAT128);
5646 gen_address_mask(dc, cpu_addr);
5647 /* ??? While stqf only requires 4-byte alignment, it is
5648 legal for the cpu to signal the unaligned exception.
5649 The OS trap handler is then required to fix it up.
5650 For qemu, this avoids having to probe the second page
5651 before performing the first write. */
5652 cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5653 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5654 dc->mem_idx, MO_TEUQ | MO_ALIGN_16);
5655 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5656 cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5657 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5658 dc->mem_idx, MO_TEUQ);
5659 break;
5660 #else /* !TARGET_SPARC64 */
5661 /* stdfq, store floating point queue */
5662 #if defined(CONFIG_USER_ONLY)
5663 goto illegal_insn;
5664 #else
5665 if (!supervisor(dc))
5666 goto priv_insn;
5667 if (gen_trap_ifnofpu(dc)) {
5668 goto jmp_insn;
5670 goto nfq_insn;
5671 #endif
5672 #endif
5673 case 0x27: /* stdf, store double fpreg */
5674 gen_address_mask(dc, cpu_addr);
5675 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5676 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5677 MO_TEUQ | MO_ALIGN_4);
5678 break;
5679 default:
5680 goto illegal_insn;
5682 } else if (xop > 0x33 && xop < 0x3f) {
5683 switch (xop) {
5684 #ifdef TARGET_SPARC64
5685 case 0x34: /* V9 stfa */
5686 if (gen_trap_ifnofpu(dc)) {
5687 goto jmp_insn;
5689 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5690 break;
5691 case 0x36: /* V9 stqfa */
5693 CHECK_FPU_FEATURE(dc, FLOAT128);
5694 if (gen_trap_ifnofpu(dc)) {
5695 goto jmp_insn;
5697 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5699 break;
5700 case 0x37: /* V9 stdfa */
5701 if (gen_trap_ifnofpu(dc)) {
5702 goto jmp_insn;
5704 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5705 break;
5706 case 0x3e: /* V9 casxa */
5707 rs2 = GET_FIELD(insn, 27, 31);
5708 cpu_src2 = gen_load_gpr(dc, rs2);
5709 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5710 break;
5711 #else
5712 case 0x34: /* stc */
5713 case 0x35: /* stcsr */
5714 case 0x36: /* stdcq */
5715 case 0x37: /* stdc */
5716 goto ncp_insn;
5717 #endif
5718 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5719 case 0x3c: /* V9 or LEON3 casa */
5720 #ifndef TARGET_SPARC64
5721 CHECK_IU_FEATURE(dc, CASA);
5722 #endif
5723 rs2 = GET_FIELD(insn, 27, 31);
5724 cpu_src2 = gen_load_gpr(dc, rs2);
5725 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5726 break;
5727 #endif
5728 default:
5729 goto illegal_insn;
5731 } else {
5732 goto illegal_insn;
5735 break;
5737 /* default case for non jump instructions */
5738 if (dc->npc == DYNAMIC_PC) {
5739 dc->pc = DYNAMIC_PC;
5740 gen_op_next_insn();
5741 } else if (dc->npc == JUMP_PC) {
5742 /* we can do a static jump */
5743 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5744 dc->base.is_jmp = DISAS_NORETURN;
5745 } else {
5746 dc->pc = dc->npc;
5747 dc->npc = dc->npc + 4;
5749 jmp_insn:
5750 goto egress;
5751 illegal_insn:
5752 gen_exception(dc, TT_ILL_INSN);
5753 goto egress;
5754 unimp_flush:
5755 gen_exception(dc, TT_UNIMP_FLUSH);
5756 goto egress;
5757 #if !defined(CONFIG_USER_ONLY)
5758 priv_insn:
5759 gen_exception(dc, TT_PRIV_INSN);
5760 goto egress;
5761 #endif
5762 nfpu_insn:
5763 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5764 goto egress;
5765 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5766 nfq_insn:
5767 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5768 goto egress;
5769 #endif
5770 #ifndef TARGET_SPARC64
5771 ncp_insn:
5772 gen_exception(dc, TT_NCP_INSN);
5773 goto egress;
5774 #endif
5775 egress:
5776 if (dc->n_t32 != 0) {
5777 int i;
5778 for (i = dc->n_t32 - 1; i >= 0; --i) {
5779 tcg_temp_free_i32(dc->t32[i]);
5781 dc->n_t32 = 0;
5783 if (dc->n_ttl != 0) {
5784 int i;
5785 for (i = dc->n_ttl - 1; i >= 0; --i) {
5786 tcg_temp_free(dc->ttl[i]);
5788 dc->n_ttl = 0;
5792 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5794 DisasContext *dc = container_of(dcbase, DisasContext, base);
5795 CPUSPARCState *env = cs->env_ptr;
5796 int bound;
5798 dc->pc = dc->base.pc_first;
5799 dc->npc = (target_ulong)dc->base.tb->cs_base;
5800 dc->cc_op = CC_OP_DYNAMIC;
5801 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5802 dc->def = &env->def;
5803 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5804 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5805 #ifndef CONFIG_USER_ONLY
5806 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5807 #endif
5808 #ifdef TARGET_SPARC64
5809 dc->fprs_dirty = 0;
5810 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5811 #ifndef CONFIG_USER_ONLY
5812 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5813 #endif
5814 #endif
5816 * if we reach a page boundary, we stop generation so that the
5817 * PC of a TT_TFAULT exception is always in the right page
5819 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5820 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5823 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5827 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5829 DisasContext *dc = container_of(dcbase, DisasContext, base);
5831 if (dc->npc & JUMP_PC) {
5832 assert(dc->jump_pc[1] == dc->pc + 4);
5833 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5834 } else {
5835 tcg_gen_insn_start(dc->pc, dc->npc);
5839 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5841 DisasContext *dc = container_of(dcbase, DisasContext, base);
5842 CPUSPARCState *env = cs->env_ptr;
5843 unsigned int insn;
5845 insn = translator_ldl(env, &dc->base, dc->pc);
5846 dc->base.pc_next += 4;
5847 disas_sparc_insn(dc, insn);
5849 if (dc->base.is_jmp == DISAS_NORETURN) {
5850 return;
5852 if (dc->pc != dc->base.pc_next) {
5853 dc->base.is_jmp = DISAS_TOO_MANY;
5857 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5859 DisasContext *dc = container_of(dcbase, DisasContext, base);
5861 switch (dc->base.is_jmp) {
5862 case DISAS_NEXT:
5863 case DISAS_TOO_MANY:
5864 if (dc->pc != DYNAMIC_PC &&
5865 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5866 /* static PC and NPC: we can use direct chaining */
5867 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5868 } else {
5869 if (dc->pc != DYNAMIC_PC) {
5870 tcg_gen_movi_tl(cpu_pc, dc->pc);
5872 save_npc(dc);
5873 tcg_gen_exit_tb(NULL, 0);
5875 break;
5877 case DISAS_NORETURN:
5878 break;
5880 case DISAS_EXIT:
5881 /* Exit TB */
5882 save_state(dc);
5883 tcg_gen_exit_tb(NULL, 0);
5884 break;
5886 default:
5887 g_assert_not_reached();
5891 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5892 CPUState *cpu, FILE *logfile)
5894 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5895 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5898 static const TranslatorOps sparc_tr_ops = {
5899 .init_disas_context = sparc_tr_init_disas_context,
5900 .tb_start = sparc_tr_tb_start,
5901 .insn_start = sparc_tr_insn_start,
5902 .translate_insn = sparc_tr_translate_insn,
5903 .tb_stop = sparc_tr_tb_stop,
5904 .disas_log = sparc_tr_disas_log,
5907 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
5908 target_ulong pc, void *host_pc)
5910 DisasContext dc = {};
5912 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5915 void sparc_tcg_init(void)
5917 static const char gregnames[32][4] = {
5918 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5919 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5920 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5921 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5923 static const char fregnames[32][4] = {
5924 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5925 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5926 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5927 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5930 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5931 #ifdef TARGET_SPARC64
5932 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5933 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5934 #else
5935 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5936 #endif
5937 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5938 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5941 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5942 #ifdef TARGET_SPARC64
5943 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5944 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5945 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5946 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5947 "hstick_cmpr" },
5948 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5949 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5950 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5951 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5952 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5953 #endif
5954 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5955 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5956 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5957 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5958 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5959 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5960 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5961 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5962 #ifndef CONFIG_USER_ONLY
5963 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5964 #endif
5967 unsigned int i;
5969 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5970 offsetof(CPUSPARCState, regwptr),
5971 "regwptr");
5973 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5974 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5977 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5978 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5981 cpu_regs[0] = NULL;
5982 for (i = 1; i < 8; ++i) {
5983 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5984 offsetof(CPUSPARCState, gregs[i]),
5985 gregnames[i]);
5988 for (i = 8; i < 32; ++i) {
5989 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5990 (i - 8) * sizeof(target_ulong),
5991 gregnames[i]);
5994 for (i = 0; i < TARGET_DPREGS; i++) {
5995 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5996 offsetof(CPUSPARCState, fpr[i]),
5997 fregnames[i]);
6001 void sparc_restore_state_to_opc(CPUState *cs,
6002 const TranslationBlock *tb,
6003 const uint64_t *data)
6005 SPARCCPU *cpu = SPARC_CPU(cs);
6006 CPUSPARCState *env = &cpu->env;
6007 target_ulong pc = data[0];
6008 target_ulong npc = data[1];
6010 env->pc = pc;
6011 if (npc == DYNAMIC_PC) {
6012 /* dynamic NPC: already stored */
6013 } else if (npc & JUMP_PC) {
6014 /* jump PC: use 'cond' and the jump targets of the translation */
6015 if (env->cond) {
6016 env->npc = npc & ~3;
6017 } else {
6018 env->npc = pc + 4;
6020 } else {
6021 env->npc = npc;