gt64120: convert to realize()
[qemu/ar7.git] / target-sparc / translate.c
blobf99ceed83423d98e27a0502556206ebca7287b6d
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "exec/helper-proto.h"
30 #include "tcg-op.h"
31 #include "exec/cpu_ldst.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
38 #define DEBUG_DISAS
40 #define DYNAMIC_PC 1 /* dynamic pc value */
41 #define JUMP_PC 2 /* dynamic pc value which takes only two values
42 according to jump_pc[T2] */
44 /* global register indexes */
45 static TCGv_ptr cpu_env, cpu_regwptr;
46 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
47 static TCGv_i32 cpu_cc_op;
48 static TCGv_i32 cpu_psr;
49 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
50 static TCGv cpu_y;
51 #ifndef CONFIG_USER_ONLY
52 static TCGv cpu_tbr;
53 #endif
54 static TCGv cpu_cond;
55 #ifdef TARGET_SPARC64
56 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
57 static TCGv cpu_gsr;
58 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
59 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
60 static TCGv_i32 cpu_softint;
61 #else
62 static TCGv cpu_wim;
63 #endif
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext {
70 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
73 int is_br;
74 int mem_idx;
75 int fpu_enabled;
76 int address_mask_32bit;
77 int singlestep;
78 uint32_t cc_op; /* current CC operation */
79 struct TranslationBlock *tb;
80 sparc_def_t *def;
81 TCGv_i32 t32[3];
82 TCGv ttl[5];
83 int n_t32;
84 int n_ttl;
85 } DisasContext;
87 typedef struct {
88 TCGCond cond;
89 bool is_bool;
90 bool g1, g2;
91 TCGv c1, c2;
92 } DisasCompare;
94 // This function uses non-native bit order
95 #define GET_FIELD(X, FROM, TO) \
96 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
98 // This function uses the order in the manuals, i.e. bit 0 is 2^0
99 #define GET_FIELD_SP(X, FROM, TO) \
100 GET_FIELD(X, 31 - (TO), 31 - (FROM))
102 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
103 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
105 #ifdef TARGET_SPARC64
106 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
107 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
108 #else
109 #define DFPREG(r) (r & 0x1e)
110 #define QFPREG(r) (r & 0x1c)
111 #endif
113 #define UA2005_HTRAP_MASK 0xff
114 #define V8_TRAP_MASK 0x7f
116 static int sign_extend(int x, int len)
118 len = 32 - len;
119 return (x << len) >> len;
122 #define IS_IMM (insn & (1<<13))
124 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
126 TCGv_i32 t;
127 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
128 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
129 return t;
132 static inline TCGv get_temp_tl(DisasContext *dc)
134 TCGv t;
135 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
136 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
137 return t;
140 static inline void gen_update_fprs_dirty(int rd)
142 #if defined(TARGET_SPARC64)
143 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
144 #endif
147 /* floating point registers moves */
148 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
150 #if TCG_TARGET_REG_BITS == 32
151 if (src & 1) {
152 return TCGV_LOW(cpu_fpr[src / 2]);
153 } else {
154 return TCGV_HIGH(cpu_fpr[src / 2]);
156 #else
157 if (src & 1) {
158 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
159 } else {
160 TCGv_i32 ret = get_temp_i32(dc);
161 TCGv_i64 t = tcg_temp_new_i64();
163 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
164 tcg_gen_extrl_i64_i32(ret, t);
165 tcg_temp_free_i64(t);
167 return ret;
169 #endif
172 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
174 #if TCG_TARGET_REG_BITS == 32
175 if (dst & 1) {
176 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
177 } else {
178 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
180 #else
181 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
182 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
183 (dst & 1 ? 0 : 32), 32);
184 #endif
185 gen_update_fprs_dirty(dst);
188 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
190 return get_temp_i32(dc);
193 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
195 src = DFPREG(src);
196 return cpu_fpr[src / 2];
199 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
201 dst = DFPREG(dst);
202 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
203 gen_update_fprs_dirty(dst);
206 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
208 return cpu_fpr[DFPREG(dst) / 2];
211 static void gen_op_load_fpr_QT0(unsigned int src)
213 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
214 offsetof(CPU_QuadU, ll.upper));
215 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
216 offsetof(CPU_QuadU, ll.lower));
219 static void gen_op_load_fpr_QT1(unsigned int src)
221 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
222 offsetof(CPU_QuadU, ll.upper));
223 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
224 offsetof(CPU_QuadU, ll.lower));
227 static void gen_op_store_QT0_fpr(unsigned int dst)
229 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
230 offsetof(CPU_QuadU, ll.upper));
231 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
232 offsetof(CPU_QuadU, ll.lower));
235 #ifdef TARGET_SPARC64
236 static void gen_move_Q(unsigned int rd, unsigned int rs)
238 rd = QFPREG(rd);
239 rs = QFPREG(rs);
241 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
242 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
243 gen_update_fprs_dirty(rd);
245 #endif
247 /* moves */
248 #ifdef CONFIG_USER_ONLY
249 #define supervisor(dc) 0
250 #ifdef TARGET_SPARC64
251 #define hypervisor(dc) 0
252 #endif
253 #else
254 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
255 #ifdef TARGET_SPARC64
256 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
257 #else
258 #endif
259 #endif
261 #ifdef TARGET_SPARC64
262 #ifndef TARGET_ABI32
263 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
264 #else
265 #define AM_CHECK(dc) (1)
266 #endif
267 #endif
269 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
271 #ifdef TARGET_SPARC64
272 if (AM_CHECK(dc))
273 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
274 #endif
277 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
279 if (reg == 0 || reg >= 8) {
280 TCGv t = get_temp_tl(dc);
281 if (reg == 0) {
282 tcg_gen_movi_tl(t, 0);
283 } else {
284 tcg_gen_ld_tl(t, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
286 return t;
287 } else {
288 return cpu_gregs[reg];
292 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
294 if (reg > 0) {
295 if (reg < 8) {
296 tcg_gen_mov_tl(cpu_gregs[reg], v);
297 } else {
298 tcg_gen_st_tl(v, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
303 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
305 if (reg == 0 || reg >= 8) {
306 return get_temp_tl(dc);
307 } else {
308 return cpu_gregs[reg];
312 static inline void gen_goto_tb(DisasContext *s, int tb_num,
313 target_ulong pc, target_ulong npc)
315 TranslationBlock *tb;
317 tb = s->tb;
318 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
319 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
320 !s->singlestep) {
321 /* jump to same page: we can use a direct jump */
322 tcg_gen_goto_tb(tb_num);
323 tcg_gen_movi_tl(cpu_pc, pc);
324 tcg_gen_movi_tl(cpu_npc, npc);
325 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
326 } else {
327 /* jump to another page: currently not optimized */
328 tcg_gen_movi_tl(cpu_pc, pc);
329 tcg_gen_movi_tl(cpu_npc, npc);
330 tcg_gen_exit_tb(0);
334 // XXX suboptimal
335 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
337 tcg_gen_extu_i32_tl(reg, src);
338 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
339 tcg_gen_andi_tl(reg, reg, 0x1);
342 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
344 tcg_gen_extu_i32_tl(reg, src);
345 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
346 tcg_gen_andi_tl(reg, reg, 0x1);
349 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
351 tcg_gen_extu_i32_tl(reg, src);
352 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
353 tcg_gen_andi_tl(reg, reg, 0x1);
356 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
358 tcg_gen_extu_i32_tl(reg, src);
359 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
360 tcg_gen_andi_tl(reg, reg, 0x1);
363 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
365 tcg_gen_mov_tl(cpu_cc_src, src1);
366 tcg_gen_mov_tl(cpu_cc_src2, src2);
367 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
368 tcg_gen_mov_tl(dst, cpu_cc_dst);
371 static TCGv_i32 gen_add32_carry32(void)
373 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
375 /* Carry is computed from a previous add: (dst < src) */
376 #if TARGET_LONG_BITS == 64
377 cc_src1_32 = tcg_temp_new_i32();
378 cc_src2_32 = tcg_temp_new_i32();
379 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
380 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
381 #else
382 cc_src1_32 = cpu_cc_dst;
383 cc_src2_32 = cpu_cc_src;
384 #endif
386 carry_32 = tcg_temp_new_i32();
387 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
389 #if TARGET_LONG_BITS == 64
390 tcg_temp_free_i32(cc_src1_32);
391 tcg_temp_free_i32(cc_src2_32);
392 #endif
394 return carry_32;
397 static TCGv_i32 gen_sub32_carry32(void)
399 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
401 /* Carry is computed from a previous borrow: (src1 < src2) */
402 #if TARGET_LONG_BITS == 64
403 cc_src1_32 = tcg_temp_new_i32();
404 cc_src2_32 = tcg_temp_new_i32();
405 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
406 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
407 #else
408 cc_src1_32 = cpu_cc_src;
409 cc_src2_32 = cpu_cc_src2;
410 #endif
412 carry_32 = tcg_temp_new_i32();
413 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
415 #if TARGET_LONG_BITS == 64
416 tcg_temp_free_i32(cc_src1_32);
417 tcg_temp_free_i32(cc_src2_32);
418 #endif
420 return carry_32;
423 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
424 TCGv src2, int update_cc)
426 TCGv_i32 carry_32;
427 TCGv carry;
429 switch (dc->cc_op) {
430 case CC_OP_DIV:
431 case CC_OP_LOGIC:
432 /* Carry is known to be zero. Fall back to plain ADD. */
433 if (update_cc) {
434 gen_op_add_cc(dst, src1, src2);
435 } else {
436 tcg_gen_add_tl(dst, src1, src2);
438 return;
440 case CC_OP_ADD:
441 case CC_OP_TADD:
442 case CC_OP_TADDTV:
443 if (TARGET_LONG_BITS == 32) {
444 /* We can re-use the host's hardware carry generation by using
445 an ADD2 opcode. We discard the low part of the output.
446 Ideally we'd combine this operation with the add that
447 generated the carry in the first place. */
448 carry = tcg_temp_new();
449 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
450 tcg_temp_free(carry);
451 goto add_done;
453 carry_32 = gen_add32_carry32();
454 break;
456 case CC_OP_SUB:
457 case CC_OP_TSUB:
458 case CC_OP_TSUBTV:
459 carry_32 = gen_sub32_carry32();
460 break;
462 default:
463 /* We need external help to produce the carry. */
464 carry_32 = tcg_temp_new_i32();
465 gen_helper_compute_C_icc(carry_32, cpu_env);
466 break;
469 #if TARGET_LONG_BITS == 64
470 carry = tcg_temp_new();
471 tcg_gen_extu_i32_i64(carry, carry_32);
472 #else
473 carry = carry_32;
474 #endif
476 tcg_gen_add_tl(dst, src1, src2);
477 tcg_gen_add_tl(dst, dst, carry);
479 tcg_temp_free_i32(carry_32);
480 #if TARGET_LONG_BITS == 64
481 tcg_temp_free(carry);
482 #endif
484 add_done:
485 if (update_cc) {
486 tcg_gen_mov_tl(cpu_cc_src, src1);
487 tcg_gen_mov_tl(cpu_cc_src2, src2);
488 tcg_gen_mov_tl(cpu_cc_dst, dst);
489 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
490 dc->cc_op = CC_OP_ADDX;
494 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
496 tcg_gen_mov_tl(cpu_cc_src, src1);
497 tcg_gen_mov_tl(cpu_cc_src2, src2);
498 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
499 tcg_gen_mov_tl(dst, cpu_cc_dst);
502 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
503 TCGv src2, int update_cc)
505 TCGv_i32 carry_32;
506 TCGv carry;
508 switch (dc->cc_op) {
509 case CC_OP_DIV:
510 case CC_OP_LOGIC:
511 /* Carry is known to be zero. Fall back to plain SUB. */
512 if (update_cc) {
513 gen_op_sub_cc(dst, src1, src2);
514 } else {
515 tcg_gen_sub_tl(dst, src1, src2);
517 return;
519 case CC_OP_ADD:
520 case CC_OP_TADD:
521 case CC_OP_TADDTV:
522 carry_32 = gen_add32_carry32();
523 break;
525 case CC_OP_SUB:
526 case CC_OP_TSUB:
527 case CC_OP_TSUBTV:
528 if (TARGET_LONG_BITS == 32) {
529 /* We can re-use the host's hardware carry generation by using
530 a SUB2 opcode. We discard the low part of the output.
531 Ideally we'd combine this operation with the add that
532 generated the carry in the first place. */
533 carry = tcg_temp_new();
534 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
535 tcg_temp_free(carry);
536 goto sub_done;
538 carry_32 = gen_sub32_carry32();
539 break;
541 default:
542 /* We need external help to produce the carry. */
543 carry_32 = tcg_temp_new_i32();
544 gen_helper_compute_C_icc(carry_32, cpu_env);
545 break;
548 #if TARGET_LONG_BITS == 64
549 carry = tcg_temp_new();
550 tcg_gen_extu_i32_i64(carry, carry_32);
551 #else
552 carry = carry_32;
553 #endif
555 tcg_gen_sub_tl(dst, src1, src2);
556 tcg_gen_sub_tl(dst, dst, carry);
558 tcg_temp_free_i32(carry_32);
559 #if TARGET_LONG_BITS == 64
560 tcg_temp_free(carry);
561 #endif
563 sub_done:
564 if (update_cc) {
565 tcg_gen_mov_tl(cpu_cc_src, src1);
566 tcg_gen_mov_tl(cpu_cc_src2, src2);
567 tcg_gen_mov_tl(cpu_cc_dst, dst);
568 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
569 dc->cc_op = CC_OP_SUBX;
573 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
575 TCGv r_temp, zero, t0;
577 r_temp = tcg_temp_new();
578 t0 = tcg_temp_new();
580 /* old op:
581 if (!(env->y & 1))
582 T1 = 0;
584 zero = tcg_const_tl(0);
585 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
586 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
587 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
588 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
589 zero, cpu_cc_src2);
590 tcg_temp_free(zero);
592 // b2 = T0 & 1;
593 // env->y = (b2 << 31) | (env->y >> 1);
594 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
595 tcg_gen_shli_tl(r_temp, r_temp, 31);
596 tcg_gen_shri_tl(t0, cpu_y, 1);
597 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
598 tcg_gen_or_tl(t0, t0, r_temp);
599 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
601 // b1 = N ^ V;
602 gen_mov_reg_N(t0, cpu_psr);
603 gen_mov_reg_V(r_temp, cpu_psr);
604 tcg_gen_xor_tl(t0, t0, r_temp);
605 tcg_temp_free(r_temp);
607 // T0 = (b1 << 31) | (T0 >> 1);
608 // src1 = T0;
609 tcg_gen_shli_tl(t0, t0, 31);
610 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
611 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
612 tcg_temp_free(t0);
614 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
616 tcg_gen_mov_tl(dst, cpu_cc_dst);
619 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
621 #if TARGET_LONG_BITS == 32
622 if (sign_ext) {
623 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
624 } else {
625 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
627 #else
628 TCGv t0 = tcg_temp_new_i64();
629 TCGv t1 = tcg_temp_new_i64();
631 if (sign_ext) {
632 tcg_gen_ext32s_i64(t0, src1);
633 tcg_gen_ext32s_i64(t1, src2);
634 } else {
635 tcg_gen_ext32u_i64(t0, src1);
636 tcg_gen_ext32u_i64(t1, src2);
639 tcg_gen_mul_i64(dst, t0, t1);
640 tcg_temp_free(t0);
641 tcg_temp_free(t1);
643 tcg_gen_shri_i64(cpu_y, dst, 32);
644 #endif
647 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
649 /* zero-extend truncated operands before multiplication */
650 gen_op_multiply(dst, src1, src2, 0);
653 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
655 /* sign-extend truncated operands before multiplication */
656 gen_op_multiply(dst, src1, src2, 1);
659 // 1
660 static inline void gen_op_eval_ba(TCGv dst)
662 tcg_gen_movi_tl(dst, 1);
665 // Z
666 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
668 gen_mov_reg_Z(dst, src);
671 // Z | (N ^ V)
672 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
674 TCGv t0 = tcg_temp_new();
675 gen_mov_reg_N(t0, src);
676 gen_mov_reg_V(dst, src);
677 tcg_gen_xor_tl(dst, dst, t0);
678 gen_mov_reg_Z(t0, src);
679 tcg_gen_or_tl(dst, dst, t0);
680 tcg_temp_free(t0);
683 // N ^ V
684 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
686 TCGv t0 = tcg_temp_new();
687 gen_mov_reg_V(t0, src);
688 gen_mov_reg_N(dst, src);
689 tcg_gen_xor_tl(dst, dst, t0);
690 tcg_temp_free(t0);
693 // C | Z
694 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
696 TCGv t0 = tcg_temp_new();
697 gen_mov_reg_Z(t0, src);
698 gen_mov_reg_C(dst, src);
699 tcg_gen_or_tl(dst, dst, t0);
700 tcg_temp_free(t0);
703 // C
704 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
706 gen_mov_reg_C(dst, src);
709 // V
710 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
712 gen_mov_reg_V(dst, src);
715 // 0
716 static inline void gen_op_eval_bn(TCGv dst)
718 tcg_gen_movi_tl(dst, 0);
721 // N
722 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
724 gen_mov_reg_N(dst, src);
727 // !Z
728 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
730 gen_mov_reg_Z(dst, src);
731 tcg_gen_xori_tl(dst, dst, 0x1);
734 // !(Z | (N ^ V))
735 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
737 gen_op_eval_ble(dst, src);
738 tcg_gen_xori_tl(dst, dst, 0x1);
741 // !(N ^ V)
742 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
744 gen_op_eval_bl(dst, src);
745 tcg_gen_xori_tl(dst, dst, 0x1);
748 // !(C | Z)
749 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
751 gen_op_eval_bleu(dst, src);
752 tcg_gen_xori_tl(dst, dst, 0x1);
755 // !C
756 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
758 gen_mov_reg_C(dst, src);
759 tcg_gen_xori_tl(dst, dst, 0x1);
762 // !N
763 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
765 gen_mov_reg_N(dst, src);
766 tcg_gen_xori_tl(dst, dst, 0x1);
769 // !V
770 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
772 gen_mov_reg_V(dst, src);
773 tcg_gen_xori_tl(dst, dst, 0x1);
777 FPSR bit field FCC1 | FCC0:
781 3 unordered
783 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
784 unsigned int fcc_offset)
786 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
787 tcg_gen_andi_tl(reg, reg, 0x1);
790 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
791 unsigned int fcc_offset)
793 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
794 tcg_gen_andi_tl(reg, reg, 0x1);
797 // !0: FCC0 | FCC1
798 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
799 unsigned int fcc_offset)
801 TCGv t0 = tcg_temp_new();
802 gen_mov_reg_FCC0(dst, src, fcc_offset);
803 gen_mov_reg_FCC1(t0, src, fcc_offset);
804 tcg_gen_or_tl(dst, dst, t0);
805 tcg_temp_free(t0);
808 // 1 or 2: FCC0 ^ FCC1
809 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
810 unsigned int fcc_offset)
812 TCGv t0 = tcg_temp_new();
813 gen_mov_reg_FCC0(dst, src, fcc_offset);
814 gen_mov_reg_FCC1(t0, src, fcc_offset);
815 tcg_gen_xor_tl(dst, dst, t0);
816 tcg_temp_free(t0);
819 // 1 or 3: FCC0
820 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
821 unsigned int fcc_offset)
823 gen_mov_reg_FCC0(dst, src, fcc_offset);
826 // 1: FCC0 & !FCC1
827 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
828 unsigned int fcc_offset)
830 TCGv t0 = tcg_temp_new();
831 gen_mov_reg_FCC0(dst, src, fcc_offset);
832 gen_mov_reg_FCC1(t0, src, fcc_offset);
833 tcg_gen_andc_tl(dst, dst, t0);
834 tcg_temp_free(t0);
837 // 2 or 3: FCC1
838 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
839 unsigned int fcc_offset)
841 gen_mov_reg_FCC1(dst, src, fcc_offset);
844 // 2: !FCC0 & FCC1
845 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
846 unsigned int fcc_offset)
848 TCGv t0 = tcg_temp_new();
849 gen_mov_reg_FCC0(dst, src, fcc_offset);
850 gen_mov_reg_FCC1(t0, src, fcc_offset);
851 tcg_gen_andc_tl(dst, t0, dst);
852 tcg_temp_free(t0);
855 // 3: FCC0 & FCC1
856 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
857 unsigned int fcc_offset)
859 TCGv t0 = tcg_temp_new();
860 gen_mov_reg_FCC0(dst, src, fcc_offset);
861 gen_mov_reg_FCC1(t0, src, fcc_offset);
862 tcg_gen_and_tl(dst, dst, t0);
863 tcg_temp_free(t0);
866 // 0: !(FCC0 | FCC1)
867 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
868 unsigned int fcc_offset)
870 TCGv t0 = tcg_temp_new();
871 gen_mov_reg_FCC0(dst, src, fcc_offset);
872 gen_mov_reg_FCC1(t0, src, fcc_offset);
873 tcg_gen_or_tl(dst, dst, t0);
874 tcg_gen_xori_tl(dst, dst, 0x1);
875 tcg_temp_free(t0);
878 // 0 or 3: !(FCC0 ^ FCC1)
879 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
880 unsigned int fcc_offset)
882 TCGv t0 = tcg_temp_new();
883 gen_mov_reg_FCC0(dst, src, fcc_offset);
884 gen_mov_reg_FCC1(t0, src, fcc_offset);
885 tcg_gen_xor_tl(dst, dst, t0);
886 tcg_gen_xori_tl(dst, dst, 0x1);
887 tcg_temp_free(t0);
890 // 0 or 2: !FCC0
891 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
892 unsigned int fcc_offset)
894 gen_mov_reg_FCC0(dst, src, fcc_offset);
895 tcg_gen_xori_tl(dst, dst, 0x1);
898 // !1: !(FCC0 & !FCC1)
899 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
900 unsigned int fcc_offset)
902 TCGv t0 = tcg_temp_new();
903 gen_mov_reg_FCC0(dst, src, fcc_offset);
904 gen_mov_reg_FCC1(t0, src, fcc_offset);
905 tcg_gen_andc_tl(dst, dst, t0);
906 tcg_gen_xori_tl(dst, dst, 0x1);
907 tcg_temp_free(t0);
910 // 0 or 1: !FCC1
911 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
912 unsigned int fcc_offset)
914 gen_mov_reg_FCC1(dst, src, fcc_offset);
915 tcg_gen_xori_tl(dst, dst, 0x1);
918 // !2: !(!FCC0 & FCC1)
919 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
920 unsigned int fcc_offset)
922 TCGv t0 = tcg_temp_new();
923 gen_mov_reg_FCC0(dst, src, fcc_offset);
924 gen_mov_reg_FCC1(t0, src, fcc_offset);
925 tcg_gen_andc_tl(dst, t0, dst);
926 tcg_gen_xori_tl(dst, dst, 0x1);
927 tcg_temp_free(t0);
930 // !3: !(FCC0 & FCC1)
931 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
932 unsigned int fcc_offset)
934 TCGv t0 = tcg_temp_new();
935 gen_mov_reg_FCC0(dst, src, fcc_offset);
936 gen_mov_reg_FCC1(t0, src, fcc_offset);
937 tcg_gen_and_tl(dst, dst, t0);
938 tcg_gen_xori_tl(dst, dst, 0x1);
939 tcg_temp_free(t0);
942 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
943 target_ulong pc2, TCGv r_cond)
945 TCGLabel *l1 = gen_new_label();
947 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
949 gen_goto_tb(dc, 0, pc1, pc1 + 4);
951 gen_set_label(l1);
952 gen_goto_tb(dc, 1, pc2, pc2 + 4);
955 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
957 TCGLabel *l1 = gen_new_label();
958 target_ulong npc = dc->npc;
960 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
962 gen_goto_tb(dc, 0, npc, pc1);
964 gen_set_label(l1);
965 gen_goto_tb(dc, 1, npc + 4, npc + 8);
967 dc->is_br = 1;
970 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
972 target_ulong npc = dc->npc;
974 if (likely(npc != DYNAMIC_PC)) {
975 dc->pc = npc;
976 dc->jump_pc[0] = pc1;
977 dc->jump_pc[1] = npc + 4;
978 dc->npc = JUMP_PC;
979 } else {
980 TCGv t, z;
982 tcg_gen_mov_tl(cpu_pc, cpu_npc);
984 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
985 t = tcg_const_tl(pc1);
986 z = tcg_const_tl(0);
987 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
988 tcg_temp_free(t);
989 tcg_temp_free(z);
991 dc->pc = DYNAMIC_PC;
995 static inline void gen_generic_branch(DisasContext *dc)
997 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
998 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
999 TCGv zero = tcg_const_tl(0);
1001 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1003 tcg_temp_free(npc0);
1004 tcg_temp_free(npc1);
1005 tcg_temp_free(zero);
1008 /* call this function before using the condition register as it may
1009 have been set for a jump */
1010 static inline void flush_cond(DisasContext *dc)
1012 if (dc->npc == JUMP_PC) {
1013 gen_generic_branch(dc);
1014 dc->npc = DYNAMIC_PC;
1018 static inline void save_npc(DisasContext *dc)
1020 if (dc->npc == JUMP_PC) {
1021 gen_generic_branch(dc);
1022 dc->npc = DYNAMIC_PC;
1023 } else if (dc->npc != DYNAMIC_PC) {
1024 tcg_gen_movi_tl(cpu_npc, dc->npc);
1028 static inline void update_psr(DisasContext *dc)
1030 if (dc->cc_op != CC_OP_FLAGS) {
1031 dc->cc_op = CC_OP_FLAGS;
1032 gen_helper_compute_psr(cpu_env);
1036 static inline void save_state(DisasContext *dc)
1038 tcg_gen_movi_tl(cpu_pc, dc->pc);
1039 save_npc(dc);
1042 static inline void gen_mov_pc_npc(DisasContext *dc)
1044 if (dc->npc == JUMP_PC) {
1045 gen_generic_branch(dc);
1046 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1047 dc->pc = DYNAMIC_PC;
1048 } else if (dc->npc == DYNAMIC_PC) {
1049 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1050 dc->pc = DYNAMIC_PC;
1051 } else {
1052 dc->pc = dc->npc;
1056 static inline void gen_op_next_insn(void)
1058 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1059 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1062 static void free_compare(DisasCompare *cmp)
1064 if (!cmp->g1) {
1065 tcg_temp_free(cmp->c1);
1067 if (!cmp->g2) {
1068 tcg_temp_free(cmp->c2);
1072 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1073 DisasContext *dc)
1075 static int subcc_cond[16] = {
1076 TCG_COND_NEVER,
1077 TCG_COND_EQ,
1078 TCG_COND_LE,
1079 TCG_COND_LT,
1080 TCG_COND_LEU,
1081 TCG_COND_LTU,
1082 -1, /* neg */
1083 -1, /* overflow */
1084 TCG_COND_ALWAYS,
1085 TCG_COND_NE,
1086 TCG_COND_GT,
1087 TCG_COND_GE,
1088 TCG_COND_GTU,
1089 TCG_COND_GEU,
1090 -1, /* pos */
1091 -1, /* no overflow */
1094 static int logic_cond[16] = {
1095 TCG_COND_NEVER,
1096 TCG_COND_EQ, /* eq: Z */
1097 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1098 TCG_COND_LT, /* lt: N ^ V -> N */
1099 TCG_COND_EQ, /* leu: C | Z -> Z */
1100 TCG_COND_NEVER, /* ltu: C -> 0 */
1101 TCG_COND_LT, /* neg: N */
1102 TCG_COND_NEVER, /* vs: V -> 0 */
1103 TCG_COND_ALWAYS,
1104 TCG_COND_NE, /* ne: !Z */
1105 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1106 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1107 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1108 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1109 TCG_COND_GE, /* pos: !N */
1110 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1113 TCGv_i32 r_src;
1114 TCGv r_dst;
1116 #ifdef TARGET_SPARC64
1117 if (xcc) {
1118 r_src = cpu_xcc;
1119 } else {
1120 r_src = cpu_psr;
1122 #else
1123 r_src = cpu_psr;
1124 #endif
1126 switch (dc->cc_op) {
1127 case CC_OP_LOGIC:
1128 cmp->cond = logic_cond[cond];
1129 do_compare_dst_0:
1130 cmp->is_bool = false;
1131 cmp->g2 = false;
1132 cmp->c2 = tcg_const_tl(0);
1133 #ifdef TARGET_SPARC64
1134 if (!xcc) {
1135 cmp->g1 = false;
1136 cmp->c1 = tcg_temp_new();
1137 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1138 break;
1140 #endif
1141 cmp->g1 = true;
1142 cmp->c1 = cpu_cc_dst;
1143 break;
1145 case CC_OP_SUB:
1146 switch (cond) {
1147 case 6: /* neg */
1148 case 14: /* pos */
1149 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1150 goto do_compare_dst_0;
1152 case 7: /* overflow */
1153 case 15: /* !overflow */
1154 goto do_dynamic;
1156 default:
1157 cmp->cond = subcc_cond[cond];
1158 cmp->is_bool = false;
1159 #ifdef TARGET_SPARC64
1160 if (!xcc) {
1161 /* Note that sign-extension works for unsigned compares as
1162 long as both operands are sign-extended. */
1163 cmp->g1 = cmp->g2 = false;
1164 cmp->c1 = tcg_temp_new();
1165 cmp->c2 = tcg_temp_new();
1166 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1167 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1168 break;
1170 #endif
1171 cmp->g1 = cmp->g2 = true;
1172 cmp->c1 = cpu_cc_src;
1173 cmp->c2 = cpu_cc_src2;
1174 break;
1176 break;
1178 default:
1179 do_dynamic:
1180 gen_helper_compute_psr(cpu_env);
1181 dc->cc_op = CC_OP_FLAGS;
1182 /* FALLTHRU */
1184 case CC_OP_FLAGS:
1185 /* We're going to generate a boolean result. */
1186 cmp->cond = TCG_COND_NE;
1187 cmp->is_bool = true;
1188 cmp->g1 = cmp->g2 = false;
1189 cmp->c1 = r_dst = tcg_temp_new();
1190 cmp->c2 = tcg_const_tl(0);
1192 switch (cond) {
1193 case 0x0:
1194 gen_op_eval_bn(r_dst);
1195 break;
1196 case 0x1:
1197 gen_op_eval_be(r_dst, r_src);
1198 break;
1199 case 0x2:
1200 gen_op_eval_ble(r_dst, r_src);
1201 break;
1202 case 0x3:
1203 gen_op_eval_bl(r_dst, r_src);
1204 break;
1205 case 0x4:
1206 gen_op_eval_bleu(r_dst, r_src);
1207 break;
1208 case 0x5:
1209 gen_op_eval_bcs(r_dst, r_src);
1210 break;
1211 case 0x6:
1212 gen_op_eval_bneg(r_dst, r_src);
1213 break;
1214 case 0x7:
1215 gen_op_eval_bvs(r_dst, r_src);
1216 break;
1217 case 0x8:
1218 gen_op_eval_ba(r_dst);
1219 break;
1220 case 0x9:
1221 gen_op_eval_bne(r_dst, r_src);
1222 break;
1223 case 0xa:
1224 gen_op_eval_bg(r_dst, r_src);
1225 break;
1226 case 0xb:
1227 gen_op_eval_bge(r_dst, r_src);
1228 break;
1229 case 0xc:
1230 gen_op_eval_bgu(r_dst, r_src);
1231 break;
1232 case 0xd:
1233 gen_op_eval_bcc(r_dst, r_src);
1234 break;
1235 case 0xe:
1236 gen_op_eval_bpos(r_dst, r_src);
1237 break;
1238 case 0xf:
1239 gen_op_eval_bvc(r_dst, r_src);
1240 break;
1242 break;
1246 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1248 unsigned int offset;
1249 TCGv r_dst;
1251 /* For now we still generate a straight boolean result. */
1252 cmp->cond = TCG_COND_NE;
1253 cmp->is_bool = true;
1254 cmp->g1 = cmp->g2 = false;
1255 cmp->c1 = r_dst = tcg_temp_new();
1256 cmp->c2 = tcg_const_tl(0);
1258 switch (cc) {
1259 default:
1260 case 0x0:
1261 offset = 0;
1262 break;
1263 case 0x1:
1264 offset = 32 - 10;
1265 break;
1266 case 0x2:
1267 offset = 34 - 10;
1268 break;
1269 case 0x3:
1270 offset = 36 - 10;
1271 break;
1274 switch (cond) {
1275 case 0x0:
1276 gen_op_eval_bn(r_dst);
1277 break;
1278 case 0x1:
1279 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1280 break;
1281 case 0x2:
1282 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1283 break;
1284 case 0x3:
1285 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1286 break;
1287 case 0x4:
1288 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1289 break;
1290 case 0x5:
1291 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1292 break;
1293 case 0x6:
1294 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1295 break;
1296 case 0x7:
1297 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1298 break;
1299 case 0x8:
1300 gen_op_eval_ba(r_dst);
1301 break;
1302 case 0x9:
1303 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1304 break;
1305 case 0xa:
1306 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1307 break;
1308 case 0xb:
1309 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1310 break;
1311 case 0xc:
1312 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1313 break;
1314 case 0xd:
1315 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1316 break;
1317 case 0xe:
1318 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1319 break;
1320 case 0xf:
1321 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1322 break;
1326 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1327 DisasContext *dc)
1329 DisasCompare cmp;
1330 gen_compare(&cmp, cc, cond, dc);
1332 /* The interface is to return a boolean in r_dst. */
1333 if (cmp.is_bool) {
1334 tcg_gen_mov_tl(r_dst, cmp.c1);
1335 } else {
1336 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1339 free_compare(&cmp);
1342 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1344 DisasCompare cmp;
1345 gen_fcompare(&cmp, cc, cond);
1347 /* The interface is to return a boolean in r_dst. */
1348 if (cmp.is_bool) {
1349 tcg_gen_mov_tl(r_dst, cmp.c1);
1350 } else {
1351 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1354 free_compare(&cmp);
1357 #ifdef TARGET_SPARC64
1358 // Inverted logic
1359 static const int gen_tcg_cond_reg[8] = {
1361 TCG_COND_NE,
1362 TCG_COND_GT,
1363 TCG_COND_GE,
1365 TCG_COND_EQ,
1366 TCG_COND_LE,
1367 TCG_COND_LT,
1370 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1372 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1373 cmp->is_bool = false;
1374 cmp->g1 = true;
1375 cmp->g2 = false;
1376 cmp->c1 = r_src;
1377 cmp->c2 = tcg_const_tl(0);
1380 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1382 DisasCompare cmp;
1383 gen_compare_reg(&cmp, cond, r_src);
1385 /* The interface is to return a boolean in r_dst. */
1386 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1388 free_compare(&cmp);
1390 #endif
1392 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1394 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1395 target_ulong target = dc->pc + offset;
1397 #ifdef TARGET_SPARC64
1398 if (unlikely(AM_CHECK(dc))) {
1399 target &= 0xffffffffULL;
1401 #endif
1402 if (cond == 0x0) {
1403 /* unconditional not taken */
1404 if (a) {
1405 dc->pc = dc->npc + 4;
1406 dc->npc = dc->pc + 4;
1407 } else {
1408 dc->pc = dc->npc;
1409 dc->npc = dc->pc + 4;
1411 } else if (cond == 0x8) {
1412 /* unconditional taken */
1413 if (a) {
1414 dc->pc = target;
1415 dc->npc = dc->pc + 4;
1416 } else {
1417 dc->pc = dc->npc;
1418 dc->npc = target;
1419 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1421 } else {
1422 flush_cond(dc);
1423 gen_cond(cpu_cond, cc, cond, dc);
1424 if (a) {
1425 gen_branch_a(dc, target);
1426 } else {
1427 gen_branch_n(dc, target);
1432 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1434 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1435 target_ulong target = dc->pc + offset;
1437 #ifdef TARGET_SPARC64
1438 if (unlikely(AM_CHECK(dc))) {
1439 target &= 0xffffffffULL;
1441 #endif
1442 if (cond == 0x0) {
1443 /* unconditional not taken */
1444 if (a) {
1445 dc->pc = dc->npc + 4;
1446 dc->npc = dc->pc + 4;
1447 } else {
1448 dc->pc = dc->npc;
1449 dc->npc = dc->pc + 4;
1451 } else if (cond == 0x8) {
1452 /* unconditional taken */
1453 if (a) {
1454 dc->pc = target;
1455 dc->npc = dc->pc + 4;
1456 } else {
1457 dc->pc = dc->npc;
1458 dc->npc = target;
1459 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1461 } else {
1462 flush_cond(dc);
1463 gen_fcond(cpu_cond, cc, cond);
1464 if (a) {
1465 gen_branch_a(dc, target);
1466 } else {
1467 gen_branch_n(dc, target);
1472 #ifdef TARGET_SPARC64
1473 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1474 TCGv r_reg)
1476 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1477 target_ulong target = dc->pc + offset;
1479 if (unlikely(AM_CHECK(dc))) {
1480 target &= 0xffffffffULL;
1482 flush_cond(dc);
1483 gen_cond_reg(cpu_cond, cond, r_reg);
1484 if (a) {
1485 gen_branch_a(dc, target);
1486 } else {
1487 gen_branch_n(dc, target);
1491 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1493 switch (fccno) {
1494 case 0:
1495 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1496 break;
1497 case 1:
1498 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1499 break;
1500 case 2:
1501 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1502 break;
1503 case 3:
1504 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1505 break;
1509 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1511 switch (fccno) {
1512 case 0:
1513 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1514 break;
1515 case 1:
1516 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1517 break;
1518 case 2:
1519 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1520 break;
1521 case 3:
1522 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1523 break;
1527 static inline void gen_op_fcmpq(int fccno)
1529 switch (fccno) {
1530 case 0:
1531 gen_helper_fcmpq(cpu_env);
1532 break;
1533 case 1:
1534 gen_helper_fcmpq_fcc1(cpu_env);
1535 break;
1536 case 2:
1537 gen_helper_fcmpq_fcc2(cpu_env);
1538 break;
1539 case 3:
1540 gen_helper_fcmpq_fcc3(cpu_env);
1541 break;
1545 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1547 switch (fccno) {
1548 case 0:
1549 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1550 break;
1551 case 1:
1552 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1553 break;
1554 case 2:
1555 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1556 break;
1557 case 3:
1558 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1559 break;
1563 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1565 switch (fccno) {
1566 case 0:
1567 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1568 break;
1569 case 1:
1570 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1571 break;
1572 case 2:
1573 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1574 break;
1575 case 3:
1576 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1577 break;
1581 static inline void gen_op_fcmpeq(int fccno)
1583 switch (fccno) {
1584 case 0:
1585 gen_helper_fcmpeq(cpu_env);
1586 break;
1587 case 1:
1588 gen_helper_fcmpeq_fcc1(cpu_env);
1589 break;
1590 case 2:
1591 gen_helper_fcmpeq_fcc2(cpu_env);
1592 break;
1593 case 3:
1594 gen_helper_fcmpeq_fcc3(cpu_env);
1595 break;
1599 #else
1601 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1603 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1606 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1608 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1611 static inline void gen_op_fcmpq(int fccno)
1613 gen_helper_fcmpq(cpu_env);
1616 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1618 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1621 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1623 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1626 static inline void gen_op_fcmpeq(int fccno)
1628 gen_helper_fcmpeq(cpu_env);
1630 #endif
1632 static inline void gen_op_fpexception_im(int fsr_flags)
1634 TCGv_i32 r_const;
1636 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1637 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1638 r_const = tcg_const_i32(TT_FP_EXCP);
1639 gen_helper_raise_exception(cpu_env, r_const);
1640 tcg_temp_free_i32(r_const);
1643 static int gen_trap_ifnofpu(DisasContext *dc)
1645 #if !defined(CONFIG_USER_ONLY)
1646 if (!dc->fpu_enabled) {
1647 TCGv_i32 r_const;
1649 save_state(dc);
1650 r_const = tcg_const_i32(TT_NFPU_INSN);
1651 gen_helper_raise_exception(cpu_env, r_const);
1652 tcg_temp_free_i32(r_const);
1653 dc->is_br = 1;
1654 return 1;
1656 #endif
1657 return 0;
1660 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1662 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1665 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1666 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1668 TCGv_i32 dst, src;
1670 src = gen_load_fpr_F(dc, rs);
1671 dst = gen_dest_fpr_F(dc);
1673 gen(dst, cpu_env, src);
1675 gen_store_fpr_F(dc, rd, dst);
1678 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1679 void (*gen)(TCGv_i32, TCGv_i32))
1681 TCGv_i32 dst, src;
1683 src = gen_load_fpr_F(dc, rs);
1684 dst = gen_dest_fpr_F(dc);
1686 gen(dst, src);
1688 gen_store_fpr_F(dc, rd, dst);
1691 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1692 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1694 TCGv_i32 dst, src1, src2;
1696 src1 = gen_load_fpr_F(dc, rs1);
1697 src2 = gen_load_fpr_F(dc, rs2);
1698 dst = gen_dest_fpr_F(dc);
1700 gen(dst, cpu_env, src1, src2);
1702 gen_store_fpr_F(dc, rd, dst);
1705 #ifdef TARGET_SPARC64
1706 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1707 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1709 TCGv_i32 dst, src1, src2;
1711 src1 = gen_load_fpr_F(dc, rs1);
1712 src2 = gen_load_fpr_F(dc, rs2);
1713 dst = gen_dest_fpr_F(dc);
1715 gen(dst, src1, src2);
1717 gen_store_fpr_F(dc, rd, dst);
1719 #endif
1721 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1722 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1724 TCGv_i64 dst, src;
1726 src = gen_load_fpr_D(dc, rs);
1727 dst = gen_dest_fpr_D(dc, rd);
1729 gen(dst, cpu_env, src);
1731 gen_store_fpr_D(dc, rd, dst);
1734 #ifdef TARGET_SPARC64
1735 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1736 void (*gen)(TCGv_i64, TCGv_i64))
1738 TCGv_i64 dst, src;
1740 src = gen_load_fpr_D(dc, rs);
1741 dst = gen_dest_fpr_D(dc, rd);
1743 gen(dst, src);
1745 gen_store_fpr_D(dc, rd, dst);
1747 #endif
1749 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1750 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1752 TCGv_i64 dst, src1, src2;
1754 src1 = gen_load_fpr_D(dc, rs1);
1755 src2 = gen_load_fpr_D(dc, rs2);
1756 dst = gen_dest_fpr_D(dc, rd);
1758 gen(dst, cpu_env, src1, src2);
1760 gen_store_fpr_D(dc, rd, dst);
1763 #ifdef TARGET_SPARC64
1764 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1765 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1767 TCGv_i64 dst, src1, src2;
1769 src1 = gen_load_fpr_D(dc, rs1);
1770 src2 = gen_load_fpr_D(dc, rs2);
1771 dst = gen_dest_fpr_D(dc, rd);
1773 gen(dst, src1, src2);
1775 gen_store_fpr_D(dc, rd, dst);
1778 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1779 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1781 TCGv_i64 dst, src1, src2;
1783 src1 = gen_load_fpr_D(dc, rs1);
1784 src2 = gen_load_fpr_D(dc, rs2);
1785 dst = gen_dest_fpr_D(dc, rd);
1787 gen(dst, cpu_gsr, src1, src2);
1789 gen_store_fpr_D(dc, rd, dst);
1792 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1793 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1795 TCGv_i64 dst, src0, src1, src2;
1797 src1 = gen_load_fpr_D(dc, rs1);
1798 src2 = gen_load_fpr_D(dc, rs2);
1799 src0 = gen_load_fpr_D(dc, rd);
1800 dst = gen_dest_fpr_D(dc, rd);
1802 gen(dst, src0, src1, src2);
1804 gen_store_fpr_D(dc, rd, dst);
1806 #endif
1808 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1809 void (*gen)(TCGv_ptr))
1811 gen_op_load_fpr_QT1(QFPREG(rs));
1813 gen(cpu_env);
1815 gen_op_store_QT0_fpr(QFPREG(rd));
1816 gen_update_fprs_dirty(QFPREG(rd));
1819 #ifdef TARGET_SPARC64
1820 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1821 void (*gen)(TCGv_ptr))
1823 gen_op_load_fpr_QT1(QFPREG(rs));
1825 gen(cpu_env);
1827 gen_op_store_QT0_fpr(QFPREG(rd));
1828 gen_update_fprs_dirty(QFPREG(rd));
1830 #endif
1832 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1833 void (*gen)(TCGv_ptr))
1835 gen_op_load_fpr_QT0(QFPREG(rs1));
1836 gen_op_load_fpr_QT1(QFPREG(rs2));
1838 gen(cpu_env);
1840 gen_op_store_QT0_fpr(QFPREG(rd));
1841 gen_update_fprs_dirty(QFPREG(rd));
1844 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1845 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1847 TCGv_i64 dst;
1848 TCGv_i32 src1, src2;
1850 src1 = gen_load_fpr_F(dc, rs1);
1851 src2 = gen_load_fpr_F(dc, rs2);
1852 dst = gen_dest_fpr_D(dc, rd);
1854 gen(dst, cpu_env, src1, src2);
1856 gen_store_fpr_D(dc, rd, dst);
1859 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1860 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1862 TCGv_i64 src1, src2;
1864 src1 = gen_load_fpr_D(dc, rs1);
1865 src2 = gen_load_fpr_D(dc, rs2);
1867 gen(cpu_env, src1, src2);
1869 gen_op_store_QT0_fpr(QFPREG(rd));
1870 gen_update_fprs_dirty(QFPREG(rd));
1873 #ifdef TARGET_SPARC64
1874 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1875 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1877 TCGv_i64 dst;
1878 TCGv_i32 src;
1880 src = gen_load_fpr_F(dc, rs);
1881 dst = gen_dest_fpr_D(dc, rd);
1883 gen(dst, cpu_env, src);
1885 gen_store_fpr_D(dc, rd, dst);
1887 #endif
1889 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1890 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1892 TCGv_i64 dst;
1893 TCGv_i32 src;
1895 src = gen_load_fpr_F(dc, rs);
1896 dst = gen_dest_fpr_D(dc, rd);
1898 gen(dst, cpu_env, src);
1900 gen_store_fpr_D(dc, rd, dst);
1903 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1904 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1906 TCGv_i32 dst;
1907 TCGv_i64 src;
1909 src = gen_load_fpr_D(dc, rs);
1910 dst = gen_dest_fpr_F(dc);
1912 gen(dst, cpu_env, src);
1914 gen_store_fpr_F(dc, rd, dst);
1917 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1918 void (*gen)(TCGv_i32, TCGv_ptr))
1920 TCGv_i32 dst;
1922 gen_op_load_fpr_QT1(QFPREG(rs));
1923 dst = gen_dest_fpr_F(dc);
1925 gen(dst, cpu_env);
1927 gen_store_fpr_F(dc, rd, dst);
1930 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1931 void (*gen)(TCGv_i64, TCGv_ptr))
1933 TCGv_i64 dst;
1935 gen_op_load_fpr_QT1(QFPREG(rs));
1936 dst = gen_dest_fpr_D(dc, rd);
1938 gen(dst, cpu_env);
1940 gen_store_fpr_D(dc, rd, dst);
1943 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1944 void (*gen)(TCGv_ptr, TCGv_i32))
1946 TCGv_i32 src;
1948 src = gen_load_fpr_F(dc, rs);
1950 gen(cpu_env, src);
1952 gen_op_store_QT0_fpr(QFPREG(rd));
1953 gen_update_fprs_dirty(QFPREG(rd));
1956 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1957 void (*gen)(TCGv_ptr, TCGv_i64))
1959 TCGv_i64 src;
1961 src = gen_load_fpr_D(dc, rs);
1963 gen(cpu_env, src);
1965 gen_op_store_QT0_fpr(QFPREG(rd));
1966 gen_update_fprs_dirty(QFPREG(rd));
1969 /* asi moves */
1970 #ifdef TARGET_SPARC64
1971 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
1973 int asi;
1974 TCGv_i32 r_asi;
1976 if (IS_IMM) {
1977 r_asi = tcg_temp_new_i32();
1978 tcg_gen_mov_i32(r_asi, cpu_asi);
1979 } else {
1980 asi = GET_FIELD(insn, 19, 26);
1981 r_asi = tcg_const_i32(asi);
1983 return r_asi;
1986 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
1987 int sign)
1989 TCGv_i32 r_asi, r_size, r_sign;
1991 r_asi = gen_get_asi(insn, addr);
1992 r_size = tcg_const_i32(size);
1993 r_sign = tcg_const_i32(sign);
1994 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
1995 tcg_temp_free_i32(r_sign);
1996 tcg_temp_free_i32(r_size);
1997 tcg_temp_free_i32(r_asi);
2000 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2002 TCGv_i32 r_asi, r_size;
2004 r_asi = gen_get_asi(insn, addr);
2005 r_size = tcg_const_i32(size);
2006 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2007 tcg_temp_free_i32(r_size);
2008 tcg_temp_free_i32(r_asi);
2011 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2013 TCGv_i32 r_asi, r_size, r_rd;
2015 r_asi = gen_get_asi(insn, addr);
2016 r_size = tcg_const_i32(size);
2017 r_rd = tcg_const_i32(rd);
2018 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2019 tcg_temp_free_i32(r_rd);
2020 tcg_temp_free_i32(r_size);
2021 tcg_temp_free_i32(r_asi);
2024 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2026 TCGv_i32 r_asi, r_size, r_rd;
2028 r_asi = gen_get_asi(insn, addr);
2029 r_size = tcg_const_i32(size);
2030 r_rd = tcg_const_i32(rd);
2031 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2032 tcg_temp_free_i32(r_rd);
2033 tcg_temp_free_i32(r_size);
2034 tcg_temp_free_i32(r_asi);
2037 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2039 TCGv_i32 r_asi, r_size, r_sign;
2040 TCGv_i64 t64 = tcg_temp_new_i64();
2042 r_asi = gen_get_asi(insn, addr);
2043 r_size = tcg_const_i32(4);
2044 r_sign = tcg_const_i32(0);
2045 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2046 tcg_temp_free_i32(r_sign);
2047 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2048 tcg_temp_free_i32(r_size);
2049 tcg_temp_free_i32(r_asi);
2050 tcg_gen_trunc_i64_tl(dst, t64);
2051 tcg_temp_free_i64(t64);
2054 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2055 int insn, int rd)
2057 TCGv_i32 r_asi, r_rd;
2059 r_asi = gen_get_asi(insn, addr);
2060 r_rd = tcg_const_i32(rd);
2061 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2062 tcg_temp_free_i32(r_rd);
2063 tcg_temp_free_i32(r_asi);
2066 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2067 int insn, int rd)
2069 TCGv_i32 r_asi, r_size;
2070 TCGv lo = gen_load_gpr(dc, rd + 1);
2071 TCGv_i64 t64 = tcg_temp_new_i64();
2073 tcg_gen_concat_tl_i64(t64, lo, hi);
2074 r_asi = gen_get_asi(insn, addr);
2075 r_size = tcg_const_i32(8);
2076 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2077 tcg_temp_free_i32(r_size);
2078 tcg_temp_free_i32(r_asi);
2079 tcg_temp_free_i64(t64);
2082 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2083 TCGv val2, int insn, int rd)
2085 TCGv val1 = gen_load_gpr(dc, rd);
2086 TCGv dst = gen_dest_gpr(dc, rd);
2087 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2089 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2090 tcg_temp_free_i32(r_asi);
2091 gen_store_gpr(dc, rd, dst);
2094 #elif !defined(CONFIG_USER_ONLY)
2096 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2097 int sign)
2099 TCGv_i32 r_asi, r_size, r_sign;
2100 TCGv_i64 t64 = tcg_temp_new_i64();
2102 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2103 r_size = tcg_const_i32(size);
2104 r_sign = tcg_const_i32(sign);
2105 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2106 tcg_temp_free_i32(r_sign);
2107 tcg_temp_free_i32(r_size);
2108 tcg_temp_free_i32(r_asi);
2109 tcg_gen_trunc_i64_tl(dst, t64);
2110 tcg_temp_free_i64(t64);
2113 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2115 TCGv_i32 r_asi, r_size;
2116 TCGv_i64 t64 = tcg_temp_new_i64();
2118 tcg_gen_extu_tl_i64(t64, src);
2119 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2120 r_size = tcg_const_i32(size);
2121 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2122 tcg_temp_free_i32(r_size);
2123 tcg_temp_free_i32(r_asi);
2124 tcg_temp_free_i64(t64);
2127 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2129 TCGv_i32 r_asi, r_size, r_sign;
2130 TCGv_i64 r_val, t64;
2132 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2133 r_size = tcg_const_i32(4);
2134 r_sign = tcg_const_i32(0);
2135 t64 = tcg_temp_new_i64();
2136 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2137 tcg_temp_free(r_sign);
2138 r_val = tcg_temp_new_i64();
2139 tcg_gen_extu_tl_i64(r_val, src);
2140 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2141 tcg_temp_free_i64(r_val);
2142 tcg_temp_free_i32(r_size);
2143 tcg_temp_free_i32(r_asi);
2144 tcg_gen_trunc_i64_tl(dst, t64);
2145 tcg_temp_free_i64(t64);
2148 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2149 int insn, int rd)
2151 TCGv_i32 r_asi, r_size, r_sign;
2152 TCGv t;
2153 TCGv_i64 t64;
2155 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2156 r_size = tcg_const_i32(8);
2157 r_sign = tcg_const_i32(0);
2158 t64 = tcg_temp_new_i64();
2159 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2160 tcg_temp_free_i32(r_sign);
2161 tcg_temp_free_i32(r_size);
2162 tcg_temp_free_i32(r_asi);
2164 t = gen_dest_gpr(dc, rd + 1);
2165 tcg_gen_trunc_i64_tl(t, t64);
2166 gen_store_gpr(dc, rd + 1, t);
2168 tcg_gen_shri_i64(t64, t64, 32);
2169 tcg_gen_trunc_i64_tl(hi, t64);
2170 tcg_temp_free_i64(t64);
2171 gen_store_gpr(dc, rd, hi);
2174 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2175 int insn, int rd)
2177 TCGv_i32 r_asi, r_size;
2178 TCGv lo = gen_load_gpr(dc, rd + 1);
2179 TCGv_i64 t64 = tcg_temp_new_i64();
2181 tcg_gen_concat_tl_i64(t64, lo, hi);
2182 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2183 r_size = tcg_const_i32(8);
2184 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2185 tcg_temp_free_i32(r_size);
2186 tcg_temp_free_i32(r_asi);
2187 tcg_temp_free_i64(t64);
2189 #endif
2191 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2192 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2193 TCGv val2, int insn, int rd)
2195 TCGv val1 = gen_load_gpr(dc, rd);
2196 TCGv dst = gen_dest_gpr(dc, rd);
2197 #ifdef TARGET_SPARC64
2198 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2199 #else
2200 TCGv_i32 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2201 #endif
2203 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2204 tcg_temp_free_i32(r_asi);
2205 gen_store_gpr(dc, rd, dst);
2208 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2210 TCGv_i64 r_val;
2211 TCGv_i32 r_asi, r_size;
2213 gen_ld_asi(dst, addr, insn, 1, 0);
2215 r_val = tcg_const_i64(0xffULL);
2216 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2217 r_size = tcg_const_i32(1);
2218 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2219 tcg_temp_free_i32(r_size);
2220 tcg_temp_free_i32(r_asi);
2221 tcg_temp_free_i64(r_val);
2223 #endif
2225 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2227 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2228 return gen_load_gpr(dc, rs1);
2231 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2233 if (IS_IMM) { /* immediate */
2234 target_long simm = GET_FIELDs(insn, 19, 31);
2235 TCGv t = get_temp_tl(dc);
2236 tcg_gen_movi_tl(t, simm);
2237 return t;
2238 } else { /* register */
2239 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2240 return gen_load_gpr(dc, rs2);
2244 #ifdef TARGET_SPARC64
2245 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2247 TCGv_i32 c32, zero, dst, s1, s2;
2249 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2250 or fold the comparison down to 32 bits and use movcond_i32. Choose
2251 the later. */
2252 c32 = tcg_temp_new_i32();
2253 if (cmp->is_bool) {
2254 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2255 } else {
2256 TCGv_i64 c64 = tcg_temp_new_i64();
2257 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2258 tcg_gen_extrl_i64_i32(c32, c64);
2259 tcg_temp_free_i64(c64);
2262 s1 = gen_load_fpr_F(dc, rs);
2263 s2 = gen_load_fpr_F(dc, rd);
2264 dst = gen_dest_fpr_F(dc);
2265 zero = tcg_const_i32(0);
2267 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2269 tcg_temp_free_i32(c32);
2270 tcg_temp_free_i32(zero);
2271 gen_store_fpr_F(dc, rd, dst);
2274 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2276 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2277 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2278 gen_load_fpr_D(dc, rs),
2279 gen_load_fpr_D(dc, rd));
2280 gen_store_fpr_D(dc, rd, dst);
2283 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2285 int qd = QFPREG(rd);
2286 int qs = QFPREG(rs);
2288 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2289 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2290 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2291 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2293 gen_update_fprs_dirty(qd);
2296 #ifndef CONFIG_USER_ONLY
2297 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2299 TCGv_i32 r_tl = tcg_temp_new_i32();
2301 /* load env->tl into r_tl */
2302 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2304 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2305 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2307 /* calculate offset to current trap state from env->ts, reuse r_tl */
2308 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2309 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2311 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2313 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2314 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2315 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2316 tcg_temp_free_ptr(r_tl_tmp);
2319 tcg_temp_free_i32(r_tl);
2321 #endif
2323 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2324 int width, bool cc, bool left)
2326 TCGv lo1, lo2, t1, t2;
2327 uint64_t amask, tabl, tabr;
2328 int shift, imask, omask;
2330 if (cc) {
2331 tcg_gen_mov_tl(cpu_cc_src, s1);
2332 tcg_gen_mov_tl(cpu_cc_src2, s2);
2333 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2334 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2335 dc->cc_op = CC_OP_SUB;
2338 /* Theory of operation: there are two tables, left and right (not to
2339 be confused with the left and right versions of the opcode). These
2340 are indexed by the low 3 bits of the inputs. To make things "easy",
2341 these tables are loaded into two constants, TABL and TABR below.
2342 The operation index = (input & imask) << shift calculates the index
2343 into the constant, while val = (table >> index) & omask calculates
2344 the value we're looking for. */
2345 switch (width) {
2346 case 8:
2347 imask = 0x7;
2348 shift = 3;
2349 omask = 0xff;
2350 if (left) {
2351 tabl = 0x80c0e0f0f8fcfeffULL;
2352 tabr = 0xff7f3f1f0f070301ULL;
2353 } else {
2354 tabl = 0x0103070f1f3f7fffULL;
2355 tabr = 0xfffefcf8f0e0c080ULL;
2357 break;
2358 case 16:
2359 imask = 0x6;
2360 shift = 1;
2361 omask = 0xf;
2362 if (left) {
2363 tabl = 0x8cef;
2364 tabr = 0xf731;
2365 } else {
2366 tabl = 0x137f;
2367 tabr = 0xfec8;
2369 break;
2370 case 32:
2371 imask = 0x4;
2372 shift = 0;
2373 omask = 0x3;
2374 if (left) {
2375 tabl = (2 << 2) | 3;
2376 tabr = (3 << 2) | 1;
2377 } else {
2378 tabl = (1 << 2) | 3;
2379 tabr = (3 << 2) | 2;
2381 break;
2382 default:
2383 abort();
2386 lo1 = tcg_temp_new();
2387 lo2 = tcg_temp_new();
2388 tcg_gen_andi_tl(lo1, s1, imask);
2389 tcg_gen_andi_tl(lo2, s2, imask);
2390 tcg_gen_shli_tl(lo1, lo1, shift);
2391 tcg_gen_shli_tl(lo2, lo2, shift);
2393 t1 = tcg_const_tl(tabl);
2394 t2 = tcg_const_tl(tabr);
2395 tcg_gen_shr_tl(lo1, t1, lo1);
2396 tcg_gen_shr_tl(lo2, t2, lo2);
2397 tcg_gen_andi_tl(dst, lo1, omask);
2398 tcg_gen_andi_tl(lo2, lo2, omask);
2400 amask = -8;
2401 if (AM_CHECK(dc)) {
2402 amask &= 0xffffffffULL;
2404 tcg_gen_andi_tl(s1, s1, amask);
2405 tcg_gen_andi_tl(s2, s2, amask);
2407 /* We want to compute
2408 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2409 We've already done dst = lo1, so this reduces to
2410 dst &= (s1 == s2 ? -1 : lo2)
2411 Which we perform by
2412 lo2 |= -(s1 == s2)
2413 dst &= lo2
2415 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2416 tcg_gen_neg_tl(t1, t1);
2417 tcg_gen_or_tl(lo2, lo2, t1);
2418 tcg_gen_and_tl(dst, dst, lo2);
2420 tcg_temp_free(lo1);
2421 tcg_temp_free(lo2);
2422 tcg_temp_free(t1);
2423 tcg_temp_free(t2);
2426 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2428 TCGv tmp = tcg_temp_new();
2430 tcg_gen_add_tl(tmp, s1, s2);
2431 tcg_gen_andi_tl(dst, tmp, -8);
2432 if (left) {
2433 tcg_gen_neg_tl(tmp, tmp);
2435 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2437 tcg_temp_free(tmp);
2440 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2442 TCGv t1, t2, shift;
2444 t1 = tcg_temp_new();
2445 t2 = tcg_temp_new();
2446 shift = tcg_temp_new();
2448 tcg_gen_andi_tl(shift, gsr, 7);
2449 tcg_gen_shli_tl(shift, shift, 3);
2450 tcg_gen_shl_tl(t1, s1, shift);
2452 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2453 shift of (up to 63) followed by a constant shift of 1. */
2454 tcg_gen_xori_tl(shift, shift, 63);
2455 tcg_gen_shr_tl(t2, s2, shift);
2456 tcg_gen_shri_tl(t2, t2, 1);
2458 tcg_gen_or_tl(dst, t1, t2);
2460 tcg_temp_free(t1);
2461 tcg_temp_free(t2);
2462 tcg_temp_free(shift);
2464 #endif
2466 #define CHECK_IU_FEATURE(dc, FEATURE) \
2467 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2468 goto illegal_insn;
2469 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2470 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2471 goto nfpu_insn;
2473 /* before an instruction, dc->pc must be static */
2474 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2476 unsigned int opc, rs1, rs2, rd;
2477 TCGv cpu_src1, cpu_src2;
2478 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2479 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2480 target_long simm;
2482 opc = GET_FIELD(insn, 0, 1);
2483 rd = GET_FIELD(insn, 2, 6);
2485 switch (opc) {
2486 case 0: /* branches/sethi */
2488 unsigned int xop = GET_FIELD(insn, 7, 9);
2489 int32_t target;
2490 switch (xop) {
2491 #ifdef TARGET_SPARC64
2492 case 0x1: /* V9 BPcc */
2494 int cc;
2496 target = GET_FIELD_SP(insn, 0, 18);
2497 target = sign_extend(target, 19);
2498 target <<= 2;
2499 cc = GET_FIELD_SP(insn, 20, 21);
2500 if (cc == 0)
2501 do_branch(dc, target, insn, 0);
2502 else if (cc == 2)
2503 do_branch(dc, target, insn, 1);
2504 else
2505 goto illegal_insn;
2506 goto jmp_insn;
2508 case 0x3: /* V9 BPr */
2510 target = GET_FIELD_SP(insn, 0, 13) |
2511 (GET_FIELD_SP(insn, 20, 21) << 14);
2512 target = sign_extend(target, 16);
2513 target <<= 2;
2514 cpu_src1 = get_src1(dc, insn);
2515 do_branch_reg(dc, target, insn, cpu_src1);
2516 goto jmp_insn;
2518 case 0x5: /* V9 FBPcc */
2520 int cc = GET_FIELD_SP(insn, 20, 21);
2521 if (gen_trap_ifnofpu(dc)) {
2522 goto jmp_insn;
2524 target = GET_FIELD_SP(insn, 0, 18);
2525 target = sign_extend(target, 19);
2526 target <<= 2;
2527 do_fbranch(dc, target, insn, cc);
2528 goto jmp_insn;
2530 #else
2531 case 0x7: /* CBN+x */
2533 goto ncp_insn;
2535 #endif
2536 case 0x2: /* BN+x */
2538 target = GET_FIELD(insn, 10, 31);
2539 target = sign_extend(target, 22);
2540 target <<= 2;
2541 do_branch(dc, target, insn, 0);
2542 goto jmp_insn;
2544 case 0x6: /* FBN+x */
2546 if (gen_trap_ifnofpu(dc)) {
2547 goto jmp_insn;
2549 target = GET_FIELD(insn, 10, 31);
2550 target = sign_extend(target, 22);
2551 target <<= 2;
2552 do_fbranch(dc, target, insn, 0);
2553 goto jmp_insn;
2555 case 0x4: /* SETHI */
2556 /* Special-case %g0 because that's the canonical nop. */
2557 if (rd) {
2558 uint32_t value = GET_FIELD(insn, 10, 31);
2559 TCGv t = gen_dest_gpr(dc, rd);
2560 tcg_gen_movi_tl(t, value << 10);
2561 gen_store_gpr(dc, rd, t);
2563 break;
2564 case 0x0: /* UNIMPL */
2565 default:
2566 goto illegal_insn;
2568 break;
2570 break;
2571 case 1: /*CALL*/
2573 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2574 TCGv o7 = gen_dest_gpr(dc, 15);
2576 tcg_gen_movi_tl(o7, dc->pc);
2577 gen_store_gpr(dc, 15, o7);
2578 target += dc->pc;
2579 gen_mov_pc_npc(dc);
2580 #ifdef TARGET_SPARC64
2581 if (unlikely(AM_CHECK(dc))) {
2582 target &= 0xffffffffULL;
2584 #endif
2585 dc->npc = target;
2587 goto jmp_insn;
2588 case 2: /* FPU & Logical Operations */
2590 unsigned int xop = GET_FIELD(insn, 7, 12);
2591 TCGv cpu_dst = get_temp_tl(dc);
2592 TCGv cpu_tmp0;
2594 if (xop == 0x3a) { /* generate trap */
2595 int cond = GET_FIELD(insn, 3, 6);
2596 TCGv_i32 trap;
2597 TCGLabel *l1 = NULL;
2598 int mask;
2600 if (cond == 0) {
2601 /* Trap never. */
2602 break;
2605 save_state(dc);
2607 if (cond != 8) {
2608 /* Conditional trap. */
2609 DisasCompare cmp;
2610 #ifdef TARGET_SPARC64
2611 /* V9 icc/xcc */
2612 int cc = GET_FIELD_SP(insn, 11, 12);
2613 if (cc == 0) {
2614 gen_compare(&cmp, 0, cond, dc);
2615 } else if (cc == 2) {
2616 gen_compare(&cmp, 1, cond, dc);
2617 } else {
2618 goto illegal_insn;
2620 #else
2621 gen_compare(&cmp, 0, cond, dc);
2622 #endif
2623 l1 = gen_new_label();
2624 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2625 cmp.c1, cmp.c2, l1);
2626 free_compare(&cmp);
2629 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2630 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2632 /* Don't use the normal temporaries, as they may well have
2633 gone out of scope with the branch above. While we're
2634 doing that we might as well pre-truncate to 32-bit. */
2635 trap = tcg_temp_new_i32();
2637 rs1 = GET_FIELD_SP(insn, 14, 18);
2638 if (IS_IMM) {
2639 rs2 = GET_FIELD_SP(insn, 0, 6);
2640 if (rs1 == 0) {
2641 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2642 /* Signal that the trap value is fully constant. */
2643 mask = 0;
2644 } else {
2645 TCGv t1 = gen_load_gpr(dc, rs1);
2646 tcg_gen_trunc_tl_i32(trap, t1);
2647 tcg_gen_addi_i32(trap, trap, rs2);
2649 } else {
2650 TCGv t1, t2;
2651 rs2 = GET_FIELD_SP(insn, 0, 4);
2652 t1 = gen_load_gpr(dc, rs1);
2653 t2 = gen_load_gpr(dc, rs2);
2654 tcg_gen_add_tl(t1, t1, t2);
2655 tcg_gen_trunc_tl_i32(trap, t1);
2657 if (mask != 0) {
2658 tcg_gen_andi_i32(trap, trap, mask);
2659 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2662 gen_helper_raise_exception(cpu_env, trap);
2663 tcg_temp_free_i32(trap);
2665 if (cond == 8) {
2666 /* An unconditional trap ends the TB. */
2667 dc->is_br = 1;
2668 goto jmp_insn;
2669 } else {
2670 /* A conditional trap falls through to the next insn. */
2671 gen_set_label(l1);
2672 break;
2674 } else if (xop == 0x28) {
2675 rs1 = GET_FIELD(insn, 13, 17);
2676 switch(rs1) {
2677 case 0: /* rdy */
2678 #ifndef TARGET_SPARC64
2679 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2680 manual, rdy on the microSPARC
2681 II */
2682 case 0x0f: /* stbar in the SPARCv8 manual,
2683 rdy on the microSPARC II */
2684 case 0x10 ... 0x1f: /* implementation-dependent in the
2685 SPARCv8 manual, rdy on the
2686 microSPARC II */
2687 /* Read Asr17 */
2688 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2689 TCGv t = gen_dest_gpr(dc, rd);
2690 /* Read Asr17 for a Leon3 monoprocessor */
2691 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2692 gen_store_gpr(dc, rd, t);
2693 break;
2695 #endif
2696 gen_store_gpr(dc, rd, cpu_y);
2697 break;
2698 #ifdef TARGET_SPARC64
2699 case 0x2: /* V9 rdccr */
2700 update_psr(dc);
2701 gen_helper_rdccr(cpu_dst, cpu_env);
2702 gen_store_gpr(dc, rd, cpu_dst);
2703 break;
2704 case 0x3: /* V9 rdasi */
2705 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2706 gen_store_gpr(dc, rd, cpu_dst);
2707 break;
2708 case 0x4: /* V9 rdtick */
2710 TCGv_ptr r_tickptr;
2711 TCGv_i32 r_const;
2713 r_tickptr = tcg_temp_new_ptr();
2714 r_const = tcg_const_i32(dc->mem_idx);
2715 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2716 offsetof(CPUSPARCState, tick));
2717 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2718 r_const);
2719 tcg_temp_free_ptr(r_tickptr);
2720 tcg_temp_free_i32(r_const);
2721 gen_store_gpr(dc, rd, cpu_dst);
2723 break;
2724 case 0x5: /* V9 rdpc */
2726 TCGv t = gen_dest_gpr(dc, rd);
2727 if (unlikely(AM_CHECK(dc))) {
2728 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2729 } else {
2730 tcg_gen_movi_tl(t, dc->pc);
2732 gen_store_gpr(dc, rd, t);
2734 break;
2735 case 0x6: /* V9 rdfprs */
2736 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2737 gen_store_gpr(dc, rd, cpu_dst);
2738 break;
2739 case 0xf: /* V9 membar */
2740 break; /* no effect */
2741 case 0x13: /* Graphics Status */
2742 if (gen_trap_ifnofpu(dc)) {
2743 goto jmp_insn;
2745 gen_store_gpr(dc, rd, cpu_gsr);
2746 break;
2747 case 0x16: /* Softint */
2748 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2749 gen_store_gpr(dc, rd, cpu_dst);
2750 break;
2751 case 0x17: /* Tick compare */
2752 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2753 break;
2754 case 0x18: /* System tick */
2756 TCGv_ptr r_tickptr;
2757 TCGv_i32 r_const;
2759 r_tickptr = tcg_temp_new_ptr();
2760 r_const = tcg_const_i32(dc->mem_idx);
2761 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2762 offsetof(CPUSPARCState, stick));
2763 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2764 r_const);
2765 tcg_temp_free_ptr(r_tickptr);
2766 tcg_temp_free_i32(r_const);
2767 gen_store_gpr(dc, rd, cpu_dst);
2769 break;
2770 case 0x19: /* System tick compare */
2771 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2772 break;
2773 case 0x10: /* Performance Control */
2774 case 0x11: /* Performance Instrumentation Counter */
2775 case 0x12: /* Dispatch Control */
2776 case 0x14: /* Softint set, WO */
2777 case 0x15: /* Softint clear, WO */
2778 #endif
2779 default:
2780 goto illegal_insn;
2782 #if !defined(CONFIG_USER_ONLY)
2783 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2784 #ifndef TARGET_SPARC64
2785 if (!supervisor(dc)) {
2786 goto priv_insn;
2788 update_psr(dc);
2789 gen_helper_rdpsr(cpu_dst, cpu_env);
2790 #else
2791 CHECK_IU_FEATURE(dc, HYPV);
2792 if (!hypervisor(dc))
2793 goto priv_insn;
2794 rs1 = GET_FIELD(insn, 13, 17);
2795 switch (rs1) {
2796 case 0: // hpstate
2797 // gen_op_rdhpstate();
2798 break;
2799 case 1: // htstate
2800 // gen_op_rdhtstate();
2801 break;
2802 case 3: // hintp
2803 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2804 break;
2805 case 5: // htba
2806 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2807 break;
2808 case 6: // hver
2809 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2810 break;
2811 case 31: // hstick_cmpr
2812 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2813 break;
2814 default:
2815 goto illegal_insn;
2817 #endif
2818 gen_store_gpr(dc, rd, cpu_dst);
2819 break;
2820 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2821 if (!supervisor(dc)) {
2822 goto priv_insn;
2824 cpu_tmp0 = get_temp_tl(dc);
2825 #ifdef TARGET_SPARC64
2826 rs1 = GET_FIELD(insn, 13, 17);
2827 switch (rs1) {
2828 case 0: // tpc
2830 TCGv_ptr r_tsptr;
2832 r_tsptr = tcg_temp_new_ptr();
2833 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2834 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2835 offsetof(trap_state, tpc));
2836 tcg_temp_free_ptr(r_tsptr);
2838 break;
2839 case 1: // tnpc
2841 TCGv_ptr r_tsptr;
2843 r_tsptr = tcg_temp_new_ptr();
2844 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2845 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2846 offsetof(trap_state, tnpc));
2847 tcg_temp_free_ptr(r_tsptr);
2849 break;
2850 case 2: // tstate
2852 TCGv_ptr r_tsptr;
2854 r_tsptr = tcg_temp_new_ptr();
2855 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2856 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2857 offsetof(trap_state, tstate));
2858 tcg_temp_free_ptr(r_tsptr);
2860 break;
2861 case 3: // tt
2863 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2865 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2866 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2867 offsetof(trap_state, tt));
2868 tcg_temp_free_ptr(r_tsptr);
2870 break;
2871 case 4: // tick
2873 TCGv_ptr r_tickptr;
2874 TCGv_i32 r_const;
2876 r_tickptr = tcg_temp_new_ptr();
2877 r_const = tcg_const_i32(dc->mem_idx);
2878 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2879 offsetof(CPUSPARCState, tick));
2880 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
2881 r_tickptr, r_const);
2882 tcg_temp_free_ptr(r_tickptr);
2883 tcg_temp_free_i32(r_const);
2885 break;
2886 case 5: // tba
2887 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2888 break;
2889 case 6: // pstate
2890 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2891 offsetof(CPUSPARCState, pstate));
2892 break;
2893 case 7: // tl
2894 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2895 offsetof(CPUSPARCState, tl));
2896 break;
2897 case 8: // pil
2898 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2899 offsetof(CPUSPARCState, psrpil));
2900 break;
2901 case 9: // cwp
2902 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2903 break;
2904 case 10: // cansave
2905 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2906 offsetof(CPUSPARCState, cansave));
2907 break;
2908 case 11: // canrestore
2909 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2910 offsetof(CPUSPARCState, canrestore));
2911 break;
2912 case 12: // cleanwin
2913 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2914 offsetof(CPUSPARCState, cleanwin));
2915 break;
2916 case 13: // otherwin
2917 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2918 offsetof(CPUSPARCState, otherwin));
2919 break;
2920 case 14: // wstate
2921 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2922 offsetof(CPUSPARCState, wstate));
2923 break;
2924 case 16: // UA2005 gl
2925 CHECK_IU_FEATURE(dc, GL);
2926 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2927 offsetof(CPUSPARCState, gl));
2928 break;
2929 case 26: // UA2005 strand status
2930 CHECK_IU_FEATURE(dc, HYPV);
2931 if (!hypervisor(dc))
2932 goto priv_insn;
2933 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2934 break;
2935 case 31: // ver
2936 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2937 break;
2938 case 15: // fq
2939 default:
2940 goto illegal_insn;
2942 #else
2943 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2944 #endif
2945 gen_store_gpr(dc, rd, cpu_tmp0);
2946 break;
2947 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2948 #ifdef TARGET_SPARC64
2949 save_state(dc);
2950 gen_helper_flushw(cpu_env);
2951 #else
2952 if (!supervisor(dc))
2953 goto priv_insn;
2954 gen_store_gpr(dc, rd, cpu_tbr);
2955 #endif
2956 break;
2957 #endif
2958 } else if (xop == 0x34) { /* FPU Operations */
2959 if (gen_trap_ifnofpu(dc)) {
2960 goto jmp_insn;
2962 gen_op_clear_ieee_excp_and_FTT();
2963 rs1 = GET_FIELD(insn, 13, 17);
2964 rs2 = GET_FIELD(insn, 27, 31);
2965 xop = GET_FIELD(insn, 18, 26);
2966 save_state(dc);
2967 switch (xop) {
2968 case 0x1: /* fmovs */
2969 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2970 gen_store_fpr_F(dc, rd, cpu_src1_32);
2971 break;
2972 case 0x5: /* fnegs */
2973 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2974 break;
2975 case 0x9: /* fabss */
2976 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2977 break;
2978 case 0x29: /* fsqrts */
2979 CHECK_FPU_FEATURE(dc, FSQRT);
2980 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2981 break;
2982 case 0x2a: /* fsqrtd */
2983 CHECK_FPU_FEATURE(dc, FSQRT);
2984 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2985 break;
2986 case 0x2b: /* fsqrtq */
2987 CHECK_FPU_FEATURE(dc, FLOAT128);
2988 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2989 break;
2990 case 0x41: /* fadds */
2991 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
2992 break;
2993 case 0x42: /* faddd */
2994 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
2995 break;
2996 case 0x43: /* faddq */
2997 CHECK_FPU_FEATURE(dc, FLOAT128);
2998 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
2999 break;
3000 case 0x45: /* fsubs */
3001 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3002 break;
3003 case 0x46: /* fsubd */
3004 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3005 break;
3006 case 0x47: /* fsubq */
3007 CHECK_FPU_FEATURE(dc, FLOAT128);
3008 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3009 break;
3010 case 0x49: /* fmuls */
3011 CHECK_FPU_FEATURE(dc, FMUL);
3012 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3013 break;
3014 case 0x4a: /* fmuld */
3015 CHECK_FPU_FEATURE(dc, FMUL);
3016 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3017 break;
3018 case 0x4b: /* fmulq */
3019 CHECK_FPU_FEATURE(dc, FLOAT128);
3020 CHECK_FPU_FEATURE(dc, FMUL);
3021 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3022 break;
3023 case 0x4d: /* fdivs */
3024 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3025 break;
3026 case 0x4e: /* fdivd */
3027 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3028 break;
3029 case 0x4f: /* fdivq */
3030 CHECK_FPU_FEATURE(dc, FLOAT128);
3031 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3032 break;
3033 case 0x69: /* fsmuld */
3034 CHECK_FPU_FEATURE(dc, FSMULD);
3035 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3036 break;
3037 case 0x6e: /* fdmulq */
3038 CHECK_FPU_FEATURE(dc, FLOAT128);
3039 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3040 break;
3041 case 0xc4: /* fitos */
3042 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3043 break;
3044 case 0xc6: /* fdtos */
3045 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3046 break;
3047 case 0xc7: /* fqtos */
3048 CHECK_FPU_FEATURE(dc, FLOAT128);
3049 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3050 break;
3051 case 0xc8: /* fitod */
3052 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3053 break;
3054 case 0xc9: /* fstod */
3055 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3056 break;
3057 case 0xcb: /* fqtod */
3058 CHECK_FPU_FEATURE(dc, FLOAT128);
3059 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3060 break;
3061 case 0xcc: /* fitoq */
3062 CHECK_FPU_FEATURE(dc, FLOAT128);
3063 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3064 break;
3065 case 0xcd: /* fstoq */
3066 CHECK_FPU_FEATURE(dc, FLOAT128);
3067 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3068 break;
3069 case 0xce: /* fdtoq */
3070 CHECK_FPU_FEATURE(dc, FLOAT128);
3071 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3072 break;
3073 case 0xd1: /* fstoi */
3074 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3075 break;
3076 case 0xd2: /* fdtoi */
3077 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3078 break;
3079 case 0xd3: /* fqtoi */
3080 CHECK_FPU_FEATURE(dc, FLOAT128);
3081 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3082 break;
3083 #ifdef TARGET_SPARC64
3084 case 0x2: /* V9 fmovd */
3085 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3086 gen_store_fpr_D(dc, rd, cpu_src1_64);
3087 break;
3088 case 0x3: /* V9 fmovq */
3089 CHECK_FPU_FEATURE(dc, FLOAT128);
3090 gen_move_Q(rd, rs2);
3091 break;
3092 case 0x6: /* V9 fnegd */
3093 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3094 break;
3095 case 0x7: /* V9 fnegq */
3096 CHECK_FPU_FEATURE(dc, FLOAT128);
3097 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3098 break;
3099 case 0xa: /* V9 fabsd */
3100 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3101 break;
3102 case 0xb: /* V9 fabsq */
3103 CHECK_FPU_FEATURE(dc, FLOAT128);
3104 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3105 break;
3106 case 0x81: /* V9 fstox */
3107 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3108 break;
3109 case 0x82: /* V9 fdtox */
3110 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3111 break;
3112 case 0x83: /* V9 fqtox */
3113 CHECK_FPU_FEATURE(dc, FLOAT128);
3114 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3115 break;
3116 case 0x84: /* V9 fxtos */
3117 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3118 break;
3119 case 0x88: /* V9 fxtod */
3120 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3121 break;
3122 case 0x8c: /* V9 fxtoq */
3123 CHECK_FPU_FEATURE(dc, FLOAT128);
3124 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3125 break;
3126 #endif
3127 default:
3128 goto illegal_insn;
3130 } else if (xop == 0x35) { /* FPU Operations */
3131 #ifdef TARGET_SPARC64
3132 int cond;
3133 #endif
3134 if (gen_trap_ifnofpu(dc)) {
3135 goto jmp_insn;
3137 gen_op_clear_ieee_excp_and_FTT();
3138 rs1 = GET_FIELD(insn, 13, 17);
3139 rs2 = GET_FIELD(insn, 27, 31);
3140 xop = GET_FIELD(insn, 18, 26);
3141 save_state(dc);
3143 #ifdef TARGET_SPARC64
3144 #define FMOVR(sz) \
3145 do { \
3146 DisasCompare cmp; \
3147 cond = GET_FIELD_SP(insn, 10, 12); \
3148 cpu_src1 = get_src1(dc, insn); \
3149 gen_compare_reg(&cmp, cond, cpu_src1); \
3150 gen_fmov##sz(dc, &cmp, rd, rs2); \
3151 free_compare(&cmp); \
3152 } while (0)
3154 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3155 FMOVR(s);
3156 break;
3157 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3158 FMOVR(d);
3159 break;
3160 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3161 CHECK_FPU_FEATURE(dc, FLOAT128);
3162 FMOVR(q);
3163 break;
3165 #undef FMOVR
3166 #endif
3167 switch (xop) {
3168 #ifdef TARGET_SPARC64
3169 #define FMOVCC(fcc, sz) \
3170 do { \
3171 DisasCompare cmp; \
3172 cond = GET_FIELD_SP(insn, 14, 17); \
3173 gen_fcompare(&cmp, fcc, cond); \
3174 gen_fmov##sz(dc, &cmp, rd, rs2); \
3175 free_compare(&cmp); \
3176 } while (0)
3178 case 0x001: /* V9 fmovscc %fcc0 */
3179 FMOVCC(0, s);
3180 break;
3181 case 0x002: /* V9 fmovdcc %fcc0 */
3182 FMOVCC(0, d);
3183 break;
3184 case 0x003: /* V9 fmovqcc %fcc0 */
3185 CHECK_FPU_FEATURE(dc, FLOAT128);
3186 FMOVCC(0, q);
3187 break;
3188 case 0x041: /* V9 fmovscc %fcc1 */
3189 FMOVCC(1, s);
3190 break;
3191 case 0x042: /* V9 fmovdcc %fcc1 */
3192 FMOVCC(1, d);
3193 break;
3194 case 0x043: /* V9 fmovqcc %fcc1 */
3195 CHECK_FPU_FEATURE(dc, FLOAT128);
3196 FMOVCC(1, q);
3197 break;
3198 case 0x081: /* V9 fmovscc %fcc2 */
3199 FMOVCC(2, s);
3200 break;
3201 case 0x082: /* V9 fmovdcc %fcc2 */
3202 FMOVCC(2, d);
3203 break;
3204 case 0x083: /* V9 fmovqcc %fcc2 */
3205 CHECK_FPU_FEATURE(dc, FLOAT128);
3206 FMOVCC(2, q);
3207 break;
3208 case 0x0c1: /* V9 fmovscc %fcc3 */
3209 FMOVCC(3, s);
3210 break;
3211 case 0x0c2: /* V9 fmovdcc %fcc3 */
3212 FMOVCC(3, d);
3213 break;
3214 case 0x0c3: /* V9 fmovqcc %fcc3 */
3215 CHECK_FPU_FEATURE(dc, FLOAT128);
3216 FMOVCC(3, q);
3217 break;
3218 #undef FMOVCC
3219 #define FMOVCC(xcc, sz) \
3220 do { \
3221 DisasCompare cmp; \
3222 cond = GET_FIELD_SP(insn, 14, 17); \
3223 gen_compare(&cmp, xcc, cond, dc); \
3224 gen_fmov##sz(dc, &cmp, rd, rs2); \
3225 free_compare(&cmp); \
3226 } while (0)
3228 case 0x101: /* V9 fmovscc %icc */
3229 FMOVCC(0, s);
3230 break;
3231 case 0x102: /* V9 fmovdcc %icc */
3232 FMOVCC(0, d);
3233 break;
3234 case 0x103: /* V9 fmovqcc %icc */
3235 CHECK_FPU_FEATURE(dc, FLOAT128);
3236 FMOVCC(0, q);
3237 break;
3238 case 0x181: /* V9 fmovscc %xcc */
3239 FMOVCC(1, s);
3240 break;
3241 case 0x182: /* V9 fmovdcc %xcc */
3242 FMOVCC(1, d);
3243 break;
3244 case 0x183: /* V9 fmovqcc %xcc */
3245 CHECK_FPU_FEATURE(dc, FLOAT128);
3246 FMOVCC(1, q);
3247 break;
3248 #undef FMOVCC
3249 #endif
3250 case 0x51: /* fcmps, V9 %fcc */
3251 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3252 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3253 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3254 break;
3255 case 0x52: /* fcmpd, V9 %fcc */
3256 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3257 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3258 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3259 break;
3260 case 0x53: /* fcmpq, V9 %fcc */
3261 CHECK_FPU_FEATURE(dc, FLOAT128);
3262 gen_op_load_fpr_QT0(QFPREG(rs1));
3263 gen_op_load_fpr_QT1(QFPREG(rs2));
3264 gen_op_fcmpq(rd & 3);
3265 break;
3266 case 0x55: /* fcmpes, V9 %fcc */
3267 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3268 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3269 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3270 break;
3271 case 0x56: /* fcmped, V9 %fcc */
3272 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3273 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3274 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3275 break;
3276 case 0x57: /* fcmpeq, V9 %fcc */
3277 CHECK_FPU_FEATURE(dc, FLOAT128);
3278 gen_op_load_fpr_QT0(QFPREG(rs1));
3279 gen_op_load_fpr_QT1(QFPREG(rs2));
3280 gen_op_fcmpeq(rd & 3);
3281 break;
3282 default:
3283 goto illegal_insn;
3285 } else if (xop == 0x2) {
3286 TCGv dst = gen_dest_gpr(dc, rd);
3287 rs1 = GET_FIELD(insn, 13, 17);
3288 if (rs1 == 0) {
3289 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3290 if (IS_IMM) { /* immediate */
3291 simm = GET_FIELDs(insn, 19, 31);
3292 tcg_gen_movi_tl(dst, simm);
3293 gen_store_gpr(dc, rd, dst);
3294 } else { /* register */
3295 rs2 = GET_FIELD(insn, 27, 31);
3296 if (rs2 == 0) {
3297 tcg_gen_movi_tl(dst, 0);
3298 gen_store_gpr(dc, rd, dst);
3299 } else {
3300 cpu_src2 = gen_load_gpr(dc, rs2);
3301 gen_store_gpr(dc, rd, cpu_src2);
3304 } else {
3305 cpu_src1 = get_src1(dc, insn);
3306 if (IS_IMM) { /* immediate */
3307 simm = GET_FIELDs(insn, 19, 31);
3308 tcg_gen_ori_tl(dst, cpu_src1, simm);
3309 gen_store_gpr(dc, rd, dst);
3310 } else { /* register */
3311 rs2 = GET_FIELD(insn, 27, 31);
3312 if (rs2 == 0) {
3313 /* mov shortcut: or x, %g0, y -> mov x, y */
3314 gen_store_gpr(dc, rd, cpu_src1);
3315 } else {
3316 cpu_src2 = gen_load_gpr(dc, rs2);
3317 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3318 gen_store_gpr(dc, rd, dst);
3322 #ifdef TARGET_SPARC64
3323 } else if (xop == 0x25) { /* sll, V9 sllx */
3324 cpu_src1 = get_src1(dc, insn);
3325 if (IS_IMM) { /* immediate */
3326 simm = GET_FIELDs(insn, 20, 31);
3327 if (insn & (1 << 12)) {
3328 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3329 } else {
3330 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3332 } else { /* register */
3333 rs2 = GET_FIELD(insn, 27, 31);
3334 cpu_src2 = gen_load_gpr(dc, rs2);
3335 cpu_tmp0 = get_temp_tl(dc);
3336 if (insn & (1 << 12)) {
3337 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3338 } else {
3339 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3341 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3343 gen_store_gpr(dc, rd, cpu_dst);
3344 } else if (xop == 0x26) { /* srl, V9 srlx */
3345 cpu_src1 = get_src1(dc, insn);
3346 if (IS_IMM) { /* immediate */
3347 simm = GET_FIELDs(insn, 20, 31);
3348 if (insn & (1 << 12)) {
3349 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3350 } else {
3351 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3352 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3354 } else { /* register */
3355 rs2 = GET_FIELD(insn, 27, 31);
3356 cpu_src2 = gen_load_gpr(dc, rs2);
3357 cpu_tmp0 = get_temp_tl(dc);
3358 if (insn & (1 << 12)) {
3359 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3360 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3361 } else {
3362 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3363 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3364 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3367 gen_store_gpr(dc, rd, cpu_dst);
3368 } else if (xop == 0x27) { /* sra, V9 srax */
3369 cpu_src1 = get_src1(dc, insn);
3370 if (IS_IMM) { /* immediate */
3371 simm = GET_FIELDs(insn, 20, 31);
3372 if (insn & (1 << 12)) {
3373 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3374 } else {
3375 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3376 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3378 } else { /* register */
3379 rs2 = GET_FIELD(insn, 27, 31);
3380 cpu_src2 = gen_load_gpr(dc, rs2);
3381 cpu_tmp0 = get_temp_tl(dc);
3382 if (insn & (1 << 12)) {
3383 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3384 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3385 } else {
3386 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3387 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3388 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3391 gen_store_gpr(dc, rd, cpu_dst);
3392 #endif
3393 } else if (xop < 0x36) {
3394 if (xop < 0x20) {
3395 cpu_src1 = get_src1(dc, insn);
3396 cpu_src2 = get_src2(dc, insn);
3397 switch (xop & ~0x10) {
3398 case 0x0: /* add */
3399 if (xop & 0x10) {
3400 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3401 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3402 dc->cc_op = CC_OP_ADD;
3403 } else {
3404 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3406 break;
3407 case 0x1: /* and */
3408 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3409 if (xop & 0x10) {
3410 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3411 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3412 dc->cc_op = CC_OP_LOGIC;
3414 break;
3415 case 0x2: /* or */
3416 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3417 if (xop & 0x10) {
3418 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3419 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3420 dc->cc_op = CC_OP_LOGIC;
3422 break;
3423 case 0x3: /* xor */
3424 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3425 if (xop & 0x10) {
3426 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3427 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3428 dc->cc_op = CC_OP_LOGIC;
3430 break;
3431 case 0x4: /* sub */
3432 if (xop & 0x10) {
3433 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3434 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3435 dc->cc_op = CC_OP_SUB;
3436 } else {
3437 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3439 break;
3440 case 0x5: /* andn */
3441 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3442 if (xop & 0x10) {
3443 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3444 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3445 dc->cc_op = CC_OP_LOGIC;
3447 break;
3448 case 0x6: /* orn */
3449 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3450 if (xop & 0x10) {
3451 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3452 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3453 dc->cc_op = CC_OP_LOGIC;
3455 break;
3456 case 0x7: /* xorn */
3457 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3458 if (xop & 0x10) {
3459 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3460 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3461 dc->cc_op = CC_OP_LOGIC;
3463 break;
3464 case 0x8: /* addx, V9 addc */
3465 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3466 (xop & 0x10));
3467 break;
3468 #ifdef TARGET_SPARC64
3469 case 0x9: /* V9 mulx */
3470 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3471 break;
3472 #endif
3473 case 0xa: /* umul */
3474 CHECK_IU_FEATURE(dc, MUL);
3475 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3476 if (xop & 0x10) {
3477 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3478 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3479 dc->cc_op = CC_OP_LOGIC;
3481 break;
3482 case 0xb: /* smul */
3483 CHECK_IU_FEATURE(dc, MUL);
3484 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3485 if (xop & 0x10) {
3486 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3487 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3488 dc->cc_op = CC_OP_LOGIC;
3490 break;
3491 case 0xc: /* subx, V9 subc */
3492 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3493 (xop & 0x10));
3494 break;
3495 #ifdef TARGET_SPARC64
3496 case 0xd: /* V9 udivx */
3497 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3498 break;
3499 #endif
3500 case 0xe: /* udiv */
3501 CHECK_IU_FEATURE(dc, DIV);
3502 if (xop & 0x10) {
3503 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3504 cpu_src2);
3505 dc->cc_op = CC_OP_DIV;
3506 } else {
3507 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3508 cpu_src2);
3510 break;
3511 case 0xf: /* sdiv */
3512 CHECK_IU_FEATURE(dc, DIV);
3513 if (xop & 0x10) {
3514 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3515 cpu_src2);
3516 dc->cc_op = CC_OP_DIV;
3517 } else {
3518 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3519 cpu_src2);
3521 break;
3522 default:
3523 goto illegal_insn;
3525 gen_store_gpr(dc, rd, cpu_dst);
3526 } else {
3527 cpu_src1 = get_src1(dc, insn);
3528 cpu_src2 = get_src2(dc, insn);
3529 switch (xop) {
3530 case 0x20: /* taddcc */
3531 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3532 gen_store_gpr(dc, rd, cpu_dst);
3533 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3534 dc->cc_op = CC_OP_TADD;
3535 break;
3536 case 0x21: /* tsubcc */
3537 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3538 gen_store_gpr(dc, rd, cpu_dst);
3539 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3540 dc->cc_op = CC_OP_TSUB;
3541 break;
3542 case 0x22: /* taddcctv */
3543 gen_helper_taddcctv(cpu_dst, cpu_env,
3544 cpu_src1, cpu_src2);
3545 gen_store_gpr(dc, rd, cpu_dst);
3546 dc->cc_op = CC_OP_TADDTV;
3547 break;
3548 case 0x23: /* tsubcctv */
3549 gen_helper_tsubcctv(cpu_dst, cpu_env,
3550 cpu_src1, cpu_src2);
3551 gen_store_gpr(dc, rd, cpu_dst);
3552 dc->cc_op = CC_OP_TSUBTV;
3553 break;
3554 case 0x24: /* mulscc */
3555 update_psr(dc);
3556 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3557 gen_store_gpr(dc, rd, cpu_dst);
3558 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3559 dc->cc_op = CC_OP_ADD;
3560 break;
3561 #ifndef TARGET_SPARC64
3562 case 0x25: /* sll */
3563 if (IS_IMM) { /* immediate */
3564 simm = GET_FIELDs(insn, 20, 31);
3565 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3566 } else { /* register */
3567 cpu_tmp0 = get_temp_tl(dc);
3568 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3569 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3571 gen_store_gpr(dc, rd, cpu_dst);
3572 break;
3573 case 0x26: /* srl */
3574 if (IS_IMM) { /* immediate */
3575 simm = GET_FIELDs(insn, 20, 31);
3576 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3577 } else { /* register */
3578 cpu_tmp0 = get_temp_tl(dc);
3579 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3580 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3582 gen_store_gpr(dc, rd, cpu_dst);
3583 break;
3584 case 0x27: /* sra */
3585 if (IS_IMM) { /* immediate */
3586 simm = GET_FIELDs(insn, 20, 31);
3587 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3588 } else { /* register */
3589 cpu_tmp0 = get_temp_tl(dc);
3590 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3591 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3593 gen_store_gpr(dc, rd, cpu_dst);
3594 break;
3595 #endif
3596 case 0x30:
3598 cpu_tmp0 = get_temp_tl(dc);
3599 switch(rd) {
3600 case 0: /* wry */
3601 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3602 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3603 break;
3604 #ifndef TARGET_SPARC64
3605 case 0x01 ... 0x0f: /* undefined in the
3606 SPARCv8 manual, nop
3607 on the microSPARC
3608 II */
3609 case 0x10 ... 0x1f: /* implementation-dependent
3610 in the SPARCv8
3611 manual, nop on the
3612 microSPARC II */
3613 if ((rd == 0x13) && (dc->def->features &
3614 CPU_FEATURE_POWERDOWN)) {
3615 /* LEON3 power-down */
3616 save_state(dc);
3617 gen_helper_power_down(cpu_env);
3619 break;
3620 #else
3621 case 0x2: /* V9 wrccr */
3622 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3623 gen_helper_wrccr(cpu_env, cpu_tmp0);
3624 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3625 dc->cc_op = CC_OP_FLAGS;
3626 break;
3627 case 0x3: /* V9 wrasi */
3628 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3629 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
3630 tcg_gen_trunc_tl_i32(cpu_asi, cpu_tmp0);
3631 break;
3632 case 0x6: /* V9 wrfprs */
3633 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3634 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
3635 save_state(dc);
3636 gen_op_next_insn();
3637 tcg_gen_exit_tb(0);
3638 dc->is_br = 1;
3639 break;
3640 case 0xf: /* V9 sir, nop if user */
3641 #if !defined(CONFIG_USER_ONLY)
3642 if (supervisor(dc)) {
3643 ; // XXX
3645 #endif
3646 break;
3647 case 0x13: /* Graphics Status */
3648 if (gen_trap_ifnofpu(dc)) {
3649 goto jmp_insn;
3651 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3652 break;
3653 case 0x14: /* Softint set */
3654 if (!supervisor(dc))
3655 goto illegal_insn;
3656 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3657 gen_helper_set_softint(cpu_env, cpu_tmp0);
3658 break;
3659 case 0x15: /* Softint clear */
3660 if (!supervisor(dc))
3661 goto illegal_insn;
3662 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3663 gen_helper_clear_softint(cpu_env, cpu_tmp0);
3664 break;
3665 case 0x16: /* Softint write */
3666 if (!supervisor(dc))
3667 goto illegal_insn;
3668 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3669 gen_helper_write_softint(cpu_env, cpu_tmp0);
3670 break;
3671 case 0x17: /* Tick compare */
3672 #if !defined(CONFIG_USER_ONLY)
3673 if (!supervisor(dc))
3674 goto illegal_insn;
3675 #endif
3677 TCGv_ptr r_tickptr;
3679 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3680 cpu_src2);
3681 r_tickptr = tcg_temp_new_ptr();
3682 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3683 offsetof(CPUSPARCState, tick));
3684 gen_helper_tick_set_limit(r_tickptr,
3685 cpu_tick_cmpr);
3686 tcg_temp_free_ptr(r_tickptr);
3688 break;
3689 case 0x18: /* System tick */
3690 #if !defined(CONFIG_USER_ONLY)
3691 if (!supervisor(dc))
3692 goto illegal_insn;
3693 #endif
3695 TCGv_ptr r_tickptr;
3697 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
3698 cpu_src2);
3699 r_tickptr = tcg_temp_new_ptr();
3700 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3701 offsetof(CPUSPARCState, stick));
3702 gen_helper_tick_set_count(r_tickptr,
3703 cpu_tmp0);
3704 tcg_temp_free_ptr(r_tickptr);
3706 break;
3707 case 0x19: /* System tick compare */
3708 #if !defined(CONFIG_USER_ONLY)
3709 if (!supervisor(dc))
3710 goto illegal_insn;
3711 #endif
3713 TCGv_ptr r_tickptr;
3715 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3716 cpu_src2);
3717 r_tickptr = tcg_temp_new_ptr();
3718 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3719 offsetof(CPUSPARCState, stick));
3720 gen_helper_tick_set_limit(r_tickptr,
3721 cpu_stick_cmpr);
3722 tcg_temp_free_ptr(r_tickptr);
3724 break;
3726 case 0x10: /* Performance Control */
3727 case 0x11: /* Performance Instrumentation
3728 Counter */
3729 case 0x12: /* Dispatch Control */
3730 #endif
3731 default:
3732 goto illegal_insn;
3735 break;
3736 #if !defined(CONFIG_USER_ONLY)
3737 case 0x31: /* wrpsr, V9 saved, restored */
3739 if (!supervisor(dc))
3740 goto priv_insn;
3741 #ifdef TARGET_SPARC64
3742 switch (rd) {
3743 case 0:
3744 gen_helper_saved(cpu_env);
3745 break;
3746 case 1:
3747 gen_helper_restored(cpu_env);
3748 break;
3749 case 2: /* UA2005 allclean */
3750 case 3: /* UA2005 otherw */
3751 case 4: /* UA2005 normalw */
3752 case 5: /* UA2005 invalw */
3753 // XXX
3754 default:
3755 goto illegal_insn;
3757 #else
3758 cpu_tmp0 = get_temp_tl(dc);
3759 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3760 gen_helper_wrpsr(cpu_env, cpu_tmp0);
3761 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3762 dc->cc_op = CC_OP_FLAGS;
3763 save_state(dc);
3764 gen_op_next_insn();
3765 tcg_gen_exit_tb(0);
3766 dc->is_br = 1;
3767 #endif
3769 break;
3770 case 0x32: /* wrwim, V9 wrpr */
3772 if (!supervisor(dc))
3773 goto priv_insn;
3774 cpu_tmp0 = get_temp_tl(dc);
3775 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3776 #ifdef TARGET_SPARC64
3777 switch (rd) {
3778 case 0: // tpc
3780 TCGv_ptr r_tsptr;
3782 r_tsptr = tcg_temp_new_ptr();
3783 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3784 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3785 offsetof(trap_state, tpc));
3786 tcg_temp_free_ptr(r_tsptr);
3788 break;
3789 case 1: // tnpc
3791 TCGv_ptr r_tsptr;
3793 r_tsptr = tcg_temp_new_ptr();
3794 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3795 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3796 offsetof(trap_state, tnpc));
3797 tcg_temp_free_ptr(r_tsptr);
3799 break;
3800 case 2: // tstate
3802 TCGv_ptr r_tsptr;
3804 r_tsptr = tcg_temp_new_ptr();
3805 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3806 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3807 offsetof(trap_state,
3808 tstate));
3809 tcg_temp_free_ptr(r_tsptr);
3811 break;
3812 case 3: // tt
3814 TCGv_ptr r_tsptr;
3816 r_tsptr = tcg_temp_new_ptr();
3817 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3818 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
3819 offsetof(trap_state, tt));
3820 tcg_temp_free_ptr(r_tsptr);
3822 break;
3823 case 4: // tick
3825 TCGv_ptr r_tickptr;
3827 r_tickptr = tcg_temp_new_ptr();
3828 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3829 offsetof(CPUSPARCState, tick));
3830 gen_helper_tick_set_count(r_tickptr,
3831 cpu_tmp0);
3832 tcg_temp_free_ptr(r_tickptr);
3834 break;
3835 case 5: // tba
3836 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3837 break;
3838 case 6: // pstate
3839 save_state(dc);
3840 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3841 dc->npc = DYNAMIC_PC;
3842 break;
3843 case 7: // tl
3844 save_state(dc);
3845 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3846 offsetof(CPUSPARCState, tl));
3847 dc->npc = DYNAMIC_PC;
3848 break;
3849 case 8: // pil
3850 gen_helper_wrpil(cpu_env, cpu_tmp0);
3851 break;
3852 case 9: // cwp
3853 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3854 break;
3855 case 10: // cansave
3856 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3857 offsetof(CPUSPARCState,
3858 cansave));
3859 break;
3860 case 11: // canrestore
3861 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3862 offsetof(CPUSPARCState,
3863 canrestore));
3864 break;
3865 case 12: // cleanwin
3866 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3867 offsetof(CPUSPARCState,
3868 cleanwin));
3869 break;
3870 case 13: // otherwin
3871 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3872 offsetof(CPUSPARCState,
3873 otherwin));
3874 break;
3875 case 14: // wstate
3876 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3877 offsetof(CPUSPARCState,
3878 wstate));
3879 break;
3880 case 16: // UA2005 gl
3881 CHECK_IU_FEATURE(dc, GL);
3882 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3883 offsetof(CPUSPARCState, gl));
3884 break;
3885 case 26: // UA2005 strand status
3886 CHECK_IU_FEATURE(dc, HYPV);
3887 if (!hypervisor(dc))
3888 goto priv_insn;
3889 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3890 break;
3891 default:
3892 goto illegal_insn;
3894 #else
3895 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
3896 if (dc->def->nwindows != 32) {
3897 tcg_gen_andi_tl(cpu_wim, cpu_wim,
3898 (1 << dc->def->nwindows) - 1);
3900 #endif
3902 break;
3903 case 0x33: /* wrtbr, UA2005 wrhpr */
3905 #ifndef TARGET_SPARC64
3906 if (!supervisor(dc))
3907 goto priv_insn;
3908 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3909 #else
3910 CHECK_IU_FEATURE(dc, HYPV);
3911 if (!hypervisor(dc))
3912 goto priv_insn;
3913 cpu_tmp0 = get_temp_tl(dc);
3914 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3915 switch (rd) {
3916 case 0: // hpstate
3917 // XXX gen_op_wrhpstate();
3918 save_state(dc);
3919 gen_op_next_insn();
3920 tcg_gen_exit_tb(0);
3921 dc->is_br = 1;
3922 break;
3923 case 1: // htstate
3924 // XXX gen_op_wrhtstate();
3925 break;
3926 case 3: // hintp
3927 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3928 break;
3929 case 5: // htba
3930 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3931 break;
3932 case 31: // hstick_cmpr
3934 TCGv_ptr r_tickptr;
3936 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3937 r_tickptr = tcg_temp_new_ptr();
3938 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3939 offsetof(CPUSPARCState, hstick));
3940 gen_helper_tick_set_limit(r_tickptr,
3941 cpu_hstick_cmpr);
3942 tcg_temp_free_ptr(r_tickptr);
3944 break;
3945 case 6: // hver readonly
3946 default:
3947 goto illegal_insn;
3949 #endif
3951 break;
3952 #endif
3953 #ifdef TARGET_SPARC64
3954 case 0x2c: /* V9 movcc */
3956 int cc = GET_FIELD_SP(insn, 11, 12);
3957 int cond = GET_FIELD_SP(insn, 14, 17);
3958 DisasCompare cmp;
3959 TCGv dst;
3961 if (insn & (1 << 18)) {
3962 if (cc == 0) {
3963 gen_compare(&cmp, 0, cond, dc);
3964 } else if (cc == 2) {
3965 gen_compare(&cmp, 1, cond, dc);
3966 } else {
3967 goto illegal_insn;
3969 } else {
3970 gen_fcompare(&cmp, cc, cond);
3973 /* The get_src2 above loaded the normal 13-bit
3974 immediate field, not the 11-bit field we have
3975 in movcc. But it did handle the reg case. */
3976 if (IS_IMM) {
3977 simm = GET_FIELD_SPs(insn, 0, 10);
3978 tcg_gen_movi_tl(cpu_src2, simm);
3981 dst = gen_load_gpr(dc, rd);
3982 tcg_gen_movcond_tl(cmp.cond, dst,
3983 cmp.c1, cmp.c2,
3984 cpu_src2, dst);
3985 free_compare(&cmp);
3986 gen_store_gpr(dc, rd, dst);
3987 break;
3989 case 0x2d: /* V9 sdivx */
3990 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3991 gen_store_gpr(dc, rd, cpu_dst);
3992 break;
3993 case 0x2e: /* V9 popc */
3994 gen_helper_popc(cpu_dst, cpu_src2);
3995 gen_store_gpr(dc, rd, cpu_dst);
3996 break;
3997 case 0x2f: /* V9 movr */
3999 int cond = GET_FIELD_SP(insn, 10, 12);
4000 DisasCompare cmp;
4001 TCGv dst;
4003 gen_compare_reg(&cmp, cond, cpu_src1);
4005 /* The get_src2 above loaded the normal 13-bit
4006 immediate field, not the 10-bit field we have
4007 in movr. But it did handle the reg case. */
4008 if (IS_IMM) {
4009 simm = GET_FIELD_SPs(insn, 0, 9);
4010 tcg_gen_movi_tl(cpu_src2, simm);
4013 dst = gen_load_gpr(dc, rd);
4014 tcg_gen_movcond_tl(cmp.cond, dst,
4015 cmp.c1, cmp.c2,
4016 cpu_src2, dst);
4017 free_compare(&cmp);
4018 gen_store_gpr(dc, rd, dst);
4019 break;
4021 #endif
4022 default:
4023 goto illegal_insn;
4026 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4027 #ifdef TARGET_SPARC64
4028 int opf = GET_FIELD_SP(insn, 5, 13);
4029 rs1 = GET_FIELD(insn, 13, 17);
4030 rs2 = GET_FIELD(insn, 27, 31);
4031 if (gen_trap_ifnofpu(dc)) {
4032 goto jmp_insn;
4035 switch (opf) {
4036 case 0x000: /* VIS I edge8cc */
4037 CHECK_FPU_FEATURE(dc, VIS1);
4038 cpu_src1 = gen_load_gpr(dc, rs1);
4039 cpu_src2 = gen_load_gpr(dc, rs2);
4040 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4041 gen_store_gpr(dc, rd, cpu_dst);
4042 break;
4043 case 0x001: /* VIS II edge8n */
4044 CHECK_FPU_FEATURE(dc, VIS2);
4045 cpu_src1 = gen_load_gpr(dc, rs1);
4046 cpu_src2 = gen_load_gpr(dc, rs2);
4047 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4048 gen_store_gpr(dc, rd, cpu_dst);
4049 break;
4050 case 0x002: /* VIS I edge8lcc */
4051 CHECK_FPU_FEATURE(dc, VIS1);
4052 cpu_src1 = gen_load_gpr(dc, rs1);
4053 cpu_src2 = gen_load_gpr(dc, rs2);
4054 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4055 gen_store_gpr(dc, rd, cpu_dst);
4056 break;
4057 case 0x003: /* VIS II edge8ln */
4058 CHECK_FPU_FEATURE(dc, VIS2);
4059 cpu_src1 = gen_load_gpr(dc, rs1);
4060 cpu_src2 = gen_load_gpr(dc, rs2);
4061 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4062 gen_store_gpr(dc, rd, cpu_dst);
4063 break;
4064 case 0x004: /* VIS I edge16cc */
4065 CHECK_FPU_FEATURE(dc, VIS1);
4066 cpu_src1 = gen_load_gpr(dc, rs1);
4067 cpu_src2 = gen_load_gpr(dc, rs2);
4068 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4069 gen_store_gpr(dc, rd, cpu_dst);
4070 break;
4071 case 0x005: /* VIS II edge16n */
4072 CHECK_FPU_FEATURE(dc, VIS2);
4073 cpu_src1 = gen_load_gpr(dc, rs1);
4074 cpu_src2 = gen_load_gpr(dc, rs2);
4075 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4076 gen_store_gpr(dc, rd, cpu_dst);
4077 break;
4078 case 0x006: /* VIS I edge16lcc */
4079 CHECK_FPU_FEATURE(dc, VIS1);
4080 cpu_src1 = gen_load_gpr(dc, rs1);
4081 cpu_src2 = gen_load_gpr(dc, rs2);
4082 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4083 gen_store_gpr(dc, rd, cpu_dst);
4084 break;
4085 case 0x007: /* VIS II edge16ln */
4086 CHECK_FPU_FEATURE(dc, VIS2);
4087 cpu_src1 = gen_load_gpr(dc, rs1);
4088 cpu_src2 = gen_load_gpr(dc, rs2);
4089 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4090 gen_store_gpr(dc, rd, cpu_dst);
4091 break;
4092 case 0x008: /* VIS I edge32cc */
4093 CHECK_FPU_FEATURE(dc, VIS1);
4094 cpu_src1 = gen_load_gpr(dc, rs1);
4095 cpu_src2 = gen_load_gpr(dc, rs2);
4096 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4097 gen_store_gpr(dc, rd, cpu_dst);
4098 break;
4099 case 0x009: /* VIS II edge32n */
4100 CHECK_FPU_FEATURE(dc, VIS2);
4101 cpu_src1 = gen_load_gpr(dc, rs1);
4102 cpu_src2 = gen_load_gpr(dc, rs2);
4103 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4104 gen_store_gpr(dc, rd, cpu_dst);
4105 break;
4106 case 0x00a: /* VIS I edge32lcc */
4107 CHECK_FPU_FEATURE(dc, VIS1);
4108 cpu_src1 = gen_load_gpr(dc, rs1);
4109 cpu_src2 = gen_load_gpr(dc, rs2);
4110 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4111 gen_store_gpr(dc, rd, cpu_dst);
4112 break;
4113 case 0x00b: /* VIS II edge32ln */
4114 CHECK_FPU_FEATURE(dc, VIS2);
4115 cpu_src1 = gen_load_gpr(dc, rs1);
4116 cpu_src2 = gen_load_gpr(dc, rs2);
4117 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4118 gen_store_gpr(dc, rd, cpu_dst);
4119 break;
4120 case 0x010: /* VIS I array8 */
4121 CHECK_FPU_FEATURE(dc, VIS1);
4122 cpu_src1 = gen_load_gpr(dc, rs1);
4123 cpu_src2 = gen_load_gpr(dc, rs2);
4124 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4125 gen_store_gpr(dc, rd, cpu_dst);
4126 break;
4127 case 0x012: /* VIS I array16 */
4128 CHECK_FPU_FEATURE(dc, VIS1);
4129 cpu_src1 = gen_load_gpr(dc, rs1);
4130 cpu_src2 = gen_load_gpr(dc, rs2);
4131 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4132 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4133 gen_store_gpr(dc, rd, cpu_dst);
4134 break;
4135 case 0x014: /* VIS I array32 */
4136 CHECK_FPU_FEATURE(dc, VIS1);
4137 cpu_src1 = gen_load_gpr(dc, rs1);
4138 cpu_src2 = gen_load_gpr(dc, rs2);
4139 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4140 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4141 gen_store_gpr(dc, rd, cpu_dst);
4142 break;
4143 case 0x018: /* VIS I alignaddr */
4144 CHECK_FPU_FEATURE(dc, VIS1);
4145 cpu_src1 = gen_load_gpr(dc, rs1);
4146 cpu_src2 = gen_load_gpr(dc, rs2);
4147 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4148 gen_store_gpr(dc, rd, cpu_dst);
4149 break;
4150 case 0x01a: /* VIS I alignaddrl */
4151 CHECK_FPU_FEATURE(dc, VIS1);
4152 cpu_src1 = gen_load_gpr(dc, rs1);
4153 cpu_src2 = gen_load_gpr(dc, rs2);
4154 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4155 gen_store_gpr(dc, rd, cpu_dst);
4156 break;
4157 case 0x019: /* VIS II bmask */
4158 CHECK_FPU_FEATURE(dc, VIS2);
4159 cpu_src1 = gen_load_gpr(dc, rs1);
4160 cpu_src2 = gen_load_gpr(dc, rs2);
4161 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4162 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4163 gen_store_gpr(dc, rd, cpu_dst);
4164 break;
4165 case 0x020: /* VIS I fcmple16 */
4166 CHECK_FPU_FEATURE(dc, VIS1);
4167 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4168 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4169 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4170 gen_store_gpr(dc, rd, cpu_dst);
4171 break;
4172 case 0x022: /* VIS I fcmpne16 */
4173 CHECK_FPU_FEATURE(dc, VIS1);
4174 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4175 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4176 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4177 gen_store_gpr(dc, rd, cpu_dst);
4178 break;
4179 case 0x024: /* VIS I fcmple32 */
4180 CHECK_FPU_FEATURE(dc, VIS1);
4181 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4182 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4183 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4184 gen_store_gpr(dc, rd, cpu_dst);
4185 break;
4186 case 0x026: /* VIS I fcmpne32 */
4187 CHECK_FPU_FEATURE(dc, VIS1);
4188 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4189 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4190 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4191 gen_store_gpr(dc, rd, cpu_dst);
4192 break;
4193 case 0x028: /* VIS I fcmpgt16 */
4194 CHECK_FPU_FEATURE(dc, VIS1);
4195 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4196 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4197 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4198 gen_store_gpr(dc, rd, cpu_dst);
4199 break;
4200 case 0x02a: /* VIS I fcmpeq16 */
4201 CHECK_FPU_FEATURE(dc, VIS1);
4202 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4203 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4204 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4205 gen_store_gpr(dc, rd, cpu_dst);
4206 break;
4207 case 0x02c: /* VIS I fcmpgt32 */
4208 CHECK_FPU_FEATURE(dc, VIS1);
4209 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4210 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4211 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4212 gen_store_gpr(dc, rd, cpu_dst);
4213 break;
4214 case 0x02e: /* VIS I fcmpeq32 */
4215 CHECK_FPU_FEATURE(dc, VIS1);
4216 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4217 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4218 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4219 gen_store_gpr(dc, rd, cpu_dst);
4220 break;
4221 case 0x031: /* VIS I fmul8x16 */
4222 CHECK_FPU_FEATURE(dc, VIS1);
4223 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4224 break;
4225 case 0x033: /* VIS I fmul8x16au */
4226 CHECK_FPU_FEATURE(dc, VIS1);
4227 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4228 break;
4229 case 0x035: /* VIS I fmul8x16al */
4230 CHECK_FPU_FEATURE(dc, VIS1);
4231 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4232 break;
4233 case 0x036: /* VIS I fmul8sux16 */
4234 CHECK_FPU_FEATURE(dc, VIS1);
4235 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4236 break;
4237 case 0x037: /* VIS I fmul8ulx16 */
4238 CHECK_FPU_FEATURE(dc, VIS1);
4239 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4240 break;
4241 case 0x038: /* VIS I fmuld8sux16 */
4242 CHECK_FPU_FEATURE(dc, VIS1);
4243 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4244 break;
4245 case 0x039: /* VIS I fmuld8ulx16 */
4246 CHECK_FPU_FEATURE(dc, VIS1);
4247 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4248 break;
4249 case 0x03a: /* VIS I fpack32 */
4250 CHECK_FPU_FEATURE(dc, VIS1);
4251 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4252 break;
4253 case 0x03b: /* VIS I fpack16 */
4254 CHECK_FPU_FEATURE(dc, VIS1);
4255 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4256 cpu_dst_32 = gen_dest_fpr_F(dc);
4257 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4258 gen_store_fpr_F(dc, rd, cpu_dst_32);
4259 break;
4260 case 0x03d: /* VIS I fpackfix */
4261 CHECK_FPU_FEATURE(dc, VIS1);
4262 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4263 cpu_dst_32 = gen_dest_fpr_F(dc);
4264 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4265 gen_store_fpr_F(dc, rd, cpu_dst_32);
4266 break;
4267 case 0x03e: /* VIS I pdist */
4268 CHECK_FPU_FEATURE(dc, VIS1);
4269 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4270 break;
4271 case 0x048: /* VIS I faligndata */
4272 CHECK_FPU_FEATURE(dc, VIS1);
4273 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4274 break;
4275 case 0x04b: /* VIS I fpmerge */
4276 CHECK_FPU_FEATURE(dc, VIS1);
4277 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4278 break;
4279 case 0x04c: /* VIS II bshuffle */
4280 CHECK_FPU_FEATURE(dc, VIS2);
4281 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4282 break;
4283 case 0x04d: /* VIS I fexpand */
4284 CHECK_FPU_FEATURE(dc, VIS1);
4285 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4286 break;
4287 case 0x050: /* VIS I fpadd16 */
4288 CHECK_FPU_FEATURE(dc, VIS1);
4289 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4290 break;
4291 case 0x051: /* VIS I fpadd16s */
4292 CHECK_FPU_FEATURE(dc, VIS1);
4293 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4294 break;
4295 case 0x052: /* VIS I fpadd32 */
4296 CHECK_FPU_FEATURE(dc, VIS1);
4297 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4298 break;
4299 case 0x053: /* VIS I fpadd32s */
4300 CHECK_FPU_FEATURE(dc, VIS1);
4301 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4302 break;
4303 case 0x054: /* VIS I fpsub16 */
4304 CHECK_FPU_FEATURE(dc, VIS1);
4305 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4306 break;
4307 case 0x055: /* VIS I fpsub16s */
4308 CHECK_FPU_FEATURE(dc, VIS1);
4309 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4310 break;
4311 case 0x056: /* VIS I fpsub32 */
4312 CHECK_FPU_FEATURE(dc, VIS1);
4313 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4314 break;
4315 case 0x057: /* VIS I fpsub32s */
4316 CHECK_FPU_FEATURE(dc, VIS1);
4317 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4318 break;
4319 case 0x060: /* VIS I fzero */
4320 CHECK_FPU_FEATURE(dc, VIS1);
4321 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4322 tcg_gen_movi_i64(cpu_dst_64, 0);
4323 gen_store_fpr_D(dc, rd, cpu_dst_64);
4324 break;
4325 case 0x061: /* VIS I fzeros */
4326 CHECK_FPU_FEATURE(dc, VIS1);
4327 cpu_dst_32 = gen_dest_fpr_F(dc);
4328 tcg_gen_movi_i32(cpu_dst_32, 0);
4329 gen_store_fpr_F(dc, rd, cpu_dst_32);
4330 break;
4331 case 0x062: /* VIS I fnor */
4332 CHECK_FPU_FEATURE(dc, VIS1);
4333 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4334 break;
4335 case 0x063: /* VIS I fnors */
4336 CHECK_FPU_FEATURE(dc, VIS1);
4337 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4338 break;
4339 case 0x064: /* VIS I fandnot2 */
4340 CHECK_FPU_FEATURE(dc, VIS1);
4341 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4342 break;
4343 case 0x065: /* VIS I fandnot2s */
4344 CHECK_FPU_FEATURE(dc, VIS1);
4345 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4346 break;
4347 case 0x066: /* VIS I fnot2 */
4348 CHECK_FPU_FEATURE(dc, VIS1);
4349 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4350 break;
4351 case 0x067: /* VIS I fnot2s */
4352 CHECK_FPU_FEATURE(dc, VIS1);
4353 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4354 break;
4355 case 0x068: /* VIS I fandnot1 */
4356 CHECK_FPU_FEATURE(dc, VIS1);
4357 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4358 break;
4359 case 0x069: /* VIS I fandnot1s */
4360 CHECK_FPU_FEATURE(dc, VIS1);
4361 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4362 break;
4363 case 0x06a: /* VIS I fnot1 */
4364 CHECK_FPU_FEATURE(dc, VIS1);
4365 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4366 break;
4367 case 0x06b: /* VIS I fnot1s */
4368 CHECK_FPU_FEATURE(dc, VIS1);
4369 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4370 break;
4371 case 0x06c: /* VIS I fxor */
4372 CHECK_FPU_FEATURE(dc, VIS1);
4373 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4374 break;
4375 case 0x06d: /* VIS I fxors */
4376 CHECK_FPU_FEATURE(dc, VIS1);
4377 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4378 break;
4379 case 0x06e: /* VIS I fnand */
4380 CHECK_FPU_FEATURE(dc, VIS1);
4381 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4382 break;
4383 case 0x06f: /* VIS I fnands */
4384 CHECK_FPU_FEATURE(dc, VIS1);
4385 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4386 break;
4387 case 0x070: /* VIS I fand */
4388 CHECK_FPU_FEATURE(dc, VIS1);
4389 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4390 break;
4391 case 0x071: /* VIS I fands */
4392 CHECK_FPU_FEATURE(dc, VIS1);
4393 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4394 break;
4395 case 0x072: /* VIS I fxnor */
4396 CHECK_FPU_FEATURE(dc, VIS1);
4397 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4398 break;
4399 case 0x073: /* VIS I fxnors */
4400 CHECK_FPU_FEATURE(dc, VIS1);
4401 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4402 break;
4403 case 0x074: /* VIS I fsrc1 */
4404 CHECK_FPU_FEATURE(dc, VIS1);
4405 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4406 gen_store_fpr_D(dc, rd, cpu_src1_64);
4407 break;
4408 case 0x075: /* VIS I fsrc1s */
4409 CHECK_FPU_FEATURE(dc, VIS1);
4410 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4411 gen_store_fpr_F(dc, rd, cpu_src1_32);
4412 break;
4413 case 0x076: /* VIS I fornot2 */
4414 CHECK_FPU_FEATURE(dc, VIS1);
4415 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4416 break;
4417 case 0x077: /* VIS I fornot2s */
4418 CHECK_FPU_FEATURE(dc, VIS1);
4419 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4420 break;
4421 case 0x078: /* VIS I fsrc2 */
4422 CHECK_FPU_FEATURE(dc, VIS1);
4423 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4424 gen_store_fpr_D(dc, rd, cpu_src1_64);
4425 break;
4426 case 0x079: /* VIS I fsrc2s */
4427 CHECK_FPU_FEATURE(dc, VIS1);
4428 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4429 gen_store_fpr_F(dc, rd, cpu_src1_32);
4430 break;
4431 case 0x07a: /* VIS I fornot1 */
4432 CHECK_FPU_FEATURE(dc, VIS1);
4433 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4434 break;
4435 case 0x07b: /* VIS I fornot1s */
4436 CHECK_FPU_FEATURE(dc, VIS1);
4437 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4438 break;
4439 case 0x07c: /* VIS I for */
4440 CHECK_FPU_FEATURE(dc, VIS1);
4441 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4442 break;
4443 case 0x07d: /* VIS I fors */
4444 CHECK_FPU_FEATURE(dc, VIS1);
4445 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4446 break;
4447 case 0x07e: /* VIS I fone */
4448 CHECK_FPU_FEATURE(dc, VIS1);
4449 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4450 tcg_gen_movi_i64(cpu_dst_64, -1);
4451 gen_store_fpr_D(dc, rd, cpu_dst_64);
4452 break;
4453 case 0x07f: /* VIS I fones */
4454 CHECK_FPU_FEATURE(dc, VIS1);
4455 cpu_dst_32 = gen_dest_fpr_F(dc);
4456 tcg_gen_movi_i32(cpu_dst_32, -1);
4457 gen_store_fpr_F(dc, rd, cpu_dst_32);
4458 break;
4459 case 0x080: /* VIS I shutdown */
4460 case 0x081: /* VIS II siam */
4461 // XXX
4462 goto illegal_insn;
4463 default:
4464 goto illegal_insn;
4466 #else
4467 goto ncp_insn;
4468 #endif
4469 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4470 #ifdef TARGET_SPARC64
4471 goto illegal_insn;
4472 #else
4473 goto ncp_insn;
4474 #endif
4475 #ifdef TARGET_SPARC64
4476 } else if (xop == 0x39) { /* V9 return */
4477 TCGv_i32 r_const;
4479 save_state(dc);
4480 cpu_src1 = get_src1(dc, insn);
4481 cpu_tmp0 = get_temp_tl(dc);
4482 if (IS_IMM) { /* immediate */
4483 simm = GET_FIELDs(insn, 19, 31);
4484 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4485 } else { /* register */
4486 rs2 = GET_FIELD(insn, 27, 31);
4487 if (rs2) {
4488 cpu_src2 = gen_load_gpr(dc, rs2);
4489 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4490 } else {
4491 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4494 gen_helper_restore(cpu_env);
4495 gen_mov_pc_npc(dc);
4496 r_const = tcg_const_i32(3);
4497 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4498 tcg_temp_free_i32(r_const);
4499 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4500 dc->npc = DYNAMIC_PC;
4501 goto jmp_insn;
4502 #endif
4503 } else {
4504 cpu_src1 = get_src1(dc, insn);
4505 cpu_tmp0 = get_temp_tl(dc);
4506 if (IS_IMM) { /* immediate */
4507 simm = GET_FIELDs(insn, 19, 31);
4508 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4509 } else { /* register */
4510 rs2 = GET_FIELD(insn, 27, 31);
4511 if (rs2) {
4512 cpu_src2 = gen_load_gpr(dc, rs2);
4513 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4514 } else {
4515 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4518 switch (xop) {
4519 case 0x38: /* jmpl */
4521 TCGv t;
4522 TCGv_i32 r_const;
4524 t = gen_dest_gpr(dc, rd);
4525 tcg_gen_movi_tl(t, dc->pc);
4526 gen_store_gpr(dc, rd, t);
4527 gen_mov_pc_npc(dc);
4528 r_const = tcg_const_i32(3);
4529 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4530 tcg_temp_free_i32(r_const);
4531 gen_address_mask(dc, cpu_tmp0);
4532 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4533 dc->npc = DYNAMIC_PC;
4535 goto jmp_insn;
4536 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4537 case 0x39: /* rett, V9 return */
4539 TCGv_i32 r_const;
4541 if (!supervisor(dc))
4542 goto priv_insn;
4543 gen_mov_pc_npc(dc);
4544 r_const = tcg_const_i32(3);
4545 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4546 tcg_temp_free_i32(r_const);
4547 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4548 dc->npc = DYNAMIC_PC;
4549 gen_helper_rett(cpu_env);
4551 goto jmp_insn;
4552 #endif
4553 case 0x3b: /* flush */
4554 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4555 goto unimp_flush;
4556 /* nop */
4557 break;
4558 case 0x3c: /* save */
4559 save_state(dc);
4560 gen_helper_save(cpu_env);
4561 gen_store_gpr(dc, rd, cpu_tmp0);
4562 break;
4563 case 0x3d: /* restore */
4564 save_state(dc);
4565 gen_helper_restore(cpu_env);
4566 gen_store_gpr(dc, rd, cpu_tmp0);
4567 break;
4568 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4569 case 0x3e: /* V9 done/retry */
4571 switch (rd) {
4572 case 0:
4573 if (!supervisor(dc))
4574 goto priv_insn;
4575 dc->npc = DYNAMIC_PC;
4576 dc->pc = DYNAMIC_PC;
4577 gen_helper_done(cpu_env);
4578 goto jmp_insn;
4579 case 1:
4580 if (!supervisor(dc))
4581 goto priv_insn;
4582 dc->npc = DYNAMIC_PC;
4583 dc->pc = DYNAMIC_PC;
4584 gen_helper_retry(cpu_env);
4585 goto jmp_insn;
4586 default:
4587 goto illegal_insn;
4590 break;
4591 #endif
4592 default:
4593 goto illegal_insn;
4596 break;
4598 break;
4599 case 3: /* load/store instructions */
4601 unsigned int xop = GET_FIELD(insn, 7, 12);
4602 /* ??? gen_address_mask prevents us from using a source
4603 register directly. Always generate a temporary. */
4604 TCGv cpu_addr = get_temp_tl(dc);
4606 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
4607 if (xop == 0x3c || xop == 0x3e) {
4608 /* V9 casa/casxa : no offset */
4609 } else if (IS_IMM) { /* immediate */
4610 simm = GET_FIELDs(insn, 19, 31);
4611 if (simm != 0) {
4612 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
4614 } else { /* register */
4615 rs2 = GET_FIELD(insn, 27, 31);
4616 if (rs2 != 0) {
4617 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4620 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4621 (xop > 0x17 && xop <= 0x1d ) ||
4622 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4623 TCGv cpu_val = gen_dest_gpr(dc, rd);
4625 switch (xop) {
4626 case 0x0: /* ld, V9 lduw, load unsigned word */
4627 gen_address_mask(dc, cpu_addr);
4628 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4629 break;
4630 case 0x1: /* ldub, load unsigned byte */
4631 gen_address_mask(dc, cpu_addr);
4632 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4633 break;
4634 case 0x2: /* lduh, load unsigned halfword */
4635 gen_address_mask(dc, cpu_addr);
4636 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4637 break;
4638 case 0x3: /* ldd, load double word */
4639 if (rd & 1)
4640 goto illegal_insn;
4641 else {
4642 TCGv_i32 r_const;
4643 TCGv_i64 t64;
4645 save_state(dc);
4646 r_const = tcg_const_i32(7);
4647 /* XXX remove alignment check */
4648 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4649 tcg_temp_free_i32(r_const);
4650 gen_address_mask(dc, cpu_addr);
4651 t64 = tcg_temp_new_i64();
4652 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4653 tcg_gen_trunc_i64_tl(cpu_val, t64);
4654 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4655 gen_store_gpr(dc, rd + 1, cpu_val);
4656 tcg_gen_shri_i64(t64, t64, 32);
4657 tcg_gen_trunc_i64_tl(cpu_val, t64);
4658 tcg_temp_free_i64(t64);
4659 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4661 break;
4662 case 0x9: /* ldsb, load signed byte */
4663 gen_address_mask(dc, cpu_addr);
4664 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4665 break;
4666 case 0xa: /* ldsh, load signed halfword */
4667 gen_address_mask(dc, cpu_addr);
4668 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4669 break;
4670 case 0xd: /* ldstub -- XXX: should be atomically */
4672 TCGv r_const;
4674 gen_address_mask(dc, cpu_addr);
4675 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4676 r_const = tcg_const_tl(0xff);
4677 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4678 tcg_temp_free(r_const);
4680 break;
4681 case 0x0f:
4682 /* swap, swap register with memory. Also atomically */
4684 TCGv t0 = get_temp_tl(dc);
4685 CHECK_IU_FEATURE(dc, SWAP);
4686 cpu_src1 = gen_load_gpr(dc, rd);
4687 gen_address_mask(dc, cpu_addr);
4688 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4689 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4690 tcg_gen_mov_tl(cpu_val, t0);
4692 break;
4693 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4694 case 0x10: /* lda, V9 lduwa, load word alternate */
4695 #ifndef TARGET_SPARC64
4696 if (IS_IMM)
4697 goto illegal_insn;
4698 if (!supervisor(dc))
4699 goto priv_insn;
4700 #endif
4701 save_state(dc);
4702 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4703 break;
4704 case 0x11: /* lduba, load unsigned byte alternate */
4705 #ifndef TARGET_SPARC64
4706 if (IS_IMM)
4707 goto illegal_insn;
4708 if (!supervisor(dc))
4709 goto priv_insn;
4710 #endif
4711 save_state(dc);
4712 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4713 break;
4714 case 0x12: /* lduha, load unsigned halfword alternate */
4715 #ifndef TARGET_SPARC64
4716 if (IS_IMM)
4717 goto illegal_insn;
4718 if (!supervisor(dc))
4719 goto priv_insn;
4720 #endif
4721 save_state(dc);
4722 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4723 break;
4724 case 0x13: /* ldda, load double word alternate */
4725 #ifndef TARGET_SPARC64
4726 if (IS_IMM)
4727 goto illegal_insn;
4728 if (!supervisor(dc))
4729 goto priv_insn;
4730 #endif
4731 if (rd & 1)
4732 goto illegal_insn;
4733 save_state(dc);
4734 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4735 goto skip_move;
4736 case 0x19: /* ldsba, load signed byte alternate */
4737 #ifndef TARGET_SPARC64
4738 if (IS_IMM)
4739 goto illegal_insn;
4740 if (!supervisor(dc))
4741 goto priv_insn;
4742 #endif
4743 save_state(dc);
4744 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4745 break;
4746 case 0x1a: /* ldsha, load signed halfword alternate */
4747 #ifndef TARGET_SPARC64
4748 if (IS_IMM)
4749 goto illegal_insn;
4750 if (!supervisor(dc))
4751 goto priv_insn;
4752 #endif
4753 save_state(dc);
4754 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4755 break;
4756 case 0x1d: /* ldstuba -- XXX: should be atomically */
4757 #ifndef TARGET_SPARC64
4758 if (IS_IMM)
4759 goto illegal_insn;
4760 if (!supervisor(dc))
4761 goto priv_insn;
4762 #endif
4763 save_state(dc);
4764 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4765 break;
4766 case 0x1f: /* swapa, swap reg with alt. memory. Also
4767 atomically */
4768 CHECK_IU_FEATURE(dc, SWAP);
4769 #ifndef TARGET_SPARC64
4770 if (IS_IMM)
4771 goto illegal_insn;
4772 if (!supervisor(dc))
4773 goto priv_insn;
4774 #endif
4775 save_state(dc);
4776 cpu_src1 = gen_load_gpr(dc, rd);
4777 gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
4778 break;
4780 #ifndef TARGET_SPARC64
4781 case 0x30: /* ldc */
4782 case 0x31: /* ldcsr */
4783 case 0x33: /* lddc */
4784 goto ncp_insn;
4785 #endif
4786 #endif
4787 #ifdef TARGET_SPARC64
4788 case 0x08: /* V9 ldsw */
4789 gen_address_mask(dc, cpu_addr);
4790 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4791 break;
4792 case 0x0b: /* V9 ldx */
4793 gen_address_mask(dc, cpu_addr);
4794 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4795 break;
4796 case 0x18: /* V9 ldswa */
4797 save_state(dc);
4798 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4799 break;
4800 case 0x1b: /* V9 ldxa */
4801 save_state(dc);
4802 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4803 break;
4804 case 0x2d: /* V9 prefetch, no effect */
4805 goto skip_move;
4806 case 0x30: /* V9 ldfa */
4807 if (gen_trap_ifnofpu(dc)) {
4808 goto jmp_insn;
4810 save_state(dc);
4811 gen_ldf_asi(cpu_addr, insn, 4, rd);
4812 gen_update_fprs_dirty(rd);
4813 goto skip_move;
4814 case 0x33: /* V9 lddfa */
4815 if (gen_trap_ifnofpu(dc)) {
4816 goto jmp_insn;
4818 save_state(dc);
4819 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4820 gen_update_fprs_dirty(DFPREG(rd));
4821 goto skip_move;
4822 case 0x3d: /* V9 prefetcha, no effect */
4823 goto skip_move;
4824 case 0x32: /* V9 ldqfa */
4825 CHECK_FPU_FEATURE(dc, FLOAT128);
4826 if (gen_trap_ifnofpu(dc)) {
4827 goto jmp_insn;
4829 save_state(dc);
4830 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4831 gen_update_fprs_dirty(QFPREG(rd));
4832 goto skip_move;
4833 #endif
4834 default:
4835 goto illegal_insn;
4837 gen_store_gpr(dc, rd, cpu_val);
4838 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4839 skip_move: ;
4840 #endif
4841 } else if (xop >= 0x20 && xop < 0x24) {
4842 TCGv t0;
4844 if (gen_trap_ifnofpu(dc)) {
4845 goto jmp_insn;
4847 save_state(dc);
4848 switch (xop) {
4849 case 0x20: /* ldf, load fpreg */
4850 gen_address_mask(dc, cpu_addr);
4851 t0 = get_temp_tl(dc);
4852 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4853 cpu_dst_32 = gen_dest_fpr_F(dc);
4854 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4855 gen_store_fpr_F(dc, rd, cpu_dst_32);
4856 break;
4857 case 0x21: /* ldfsr, V9 ldxfsr */
4858 #ifdef TARGET_SPARC64
4859 gen_address_mask(dc, cpu_addr);
4860 if (rd == 1) {
4861 TCGv_i64 t64 = tcg_temp_new_i64();
4862 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4863 gen_helper_ldxfsr(cpu_env, t64);
4864 tcg_temp_free_i64(t64);
4865 break;
4867 #endif
4868 cpu_dst_32 = get_temp_i32(dc);
4869 t0 = get_temp_tl(dc);
4870 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4871 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4872 gen_helper_ldfsr(cpu_env, cpu_dst_32);
4873 break;
4874 case 0x22: /* ldqf, load quad fpreg */
4876 TCGv_i32 r_const;
4878 CHECK_FPU_FEATURE(dc, FLOAT128);
4879 r_const = tcg_const_i32(dc->mem_idx);
4880 gen_address_mask(dc, cpu_addr);
4881 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4882 tcg_temp_free_i32(r_const);
4883 gen_op_store_QT0_fpr(QFPREG(rd));
4884 gen_update_fprs_dirty(QFPREG(rd));
4886 break;
4887 case 0x23: /* lddf, load double fpreg */
4888 gen_address_mask(dc, cpu_addr);
4889 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4890 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4891 gen_store_fpr_D(dc, rd, cpu_dst_64);
4892 break;
4893 default:
4894 goto illegal_insn;
4896 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4897 xop == 0xe || xop == 0x1e) {
4898 TCGv cpu_val = gen_load_gpr(dc, rd);
4900 switch (xop) {
4901 case 0x4: /* st, store word */
4902 gen_address_mask(dc, cpu_addr);
4903 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4904 break;
4905 case 0x5: /* stb, store byte */
4906 gen_address_mask(dc, cpu_addr);
4907 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4908 break;
4909 case 0x6: /* sth, store halfword */
4910 gen_address_mask(dc, cpu_addr);
4911 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4912 break;
4913 case 0x7: /* std, store double word */
4914 if (rd & 1)
4915 goto illegal_insn;
4916 else {
4917 TCGv_i32 r_const;
4918 TCGv_i64 t64;
4919 TCGv lo;
4921 save_state(dc);
4922 gen_address_mask(dc, cpu_addr);
4923 r_const = tcg_const_i32(7);
4924 /* XXX remove alignment check */
4925 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4926 tcg_temp_free_i32(r_const);
4927 lo = gen_load_gpr(dc, rd + 1);
4929 t64 = tcg_temp_new_i64();
4930 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
4931 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
4932 tcg_temp_free_i64(t64);
4934 break;
4935 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4936 case 0x14: /* sta, V9 stwa, store word alternate */
4937 #ifndef TARGET_SPARC64
4938 if (IS_IMM)
4939 goto illegal_insn;
4940 if (!supervisor(dc))
4941 goto priv_insn;
4942 #endif
4943 save_state(dc);
4944 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4945 dc->npc = DYNAMIC_PC;
4946 break;
4947 case 0x15: /* stba, store byte alternate */
4948 #ifndef TARGET_SPARC64
4949 if (IS_IMM)
4950 goto illegal_insn;
4951 if (!supervisor(dc))
4952 goto priv_insn;
4953 #endif
4954 save_state(dc);
4955 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4956 dc->npc = DYNAMIC_PC;
4957 break;
4958 case 0x16: /* stha, store halfword alternate */
4959 #ifndef TARGET_SPARC64
4960 if (IS_IMM)
4961 goto illegal_insn;
4962 if (!supervisor(dc))
4963 goto priv_insn;
4964 #endif
4965 save_state(dc);
4966 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4967 dc->npc = DYNAMIC_PC;
4968 break;
4969 case 0x17: /* stda, store double word alternate */
4970 #ifndef TARGET_SPARC64
4971 if (IS_IMM)
4972 goto illegal_insn;
4973 if (!supervisor(dc))
4974 goto priv_insn;
4975 #endif
4976 if (rd & 1)
4977 goto illegal_insn;
4978 else {
4979 save_state(dc);
4980 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4982 break;
4983 #endif
4984 #ifdef TARGET_SPARC64
4985 case 0x0e: /* V9 stx */
4986 gen_address_mask(dc, cpu_addr);
4987 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4988 break;
4989 case 0x1e: /* V9 stxa */
4990 save_state(dc);
4991 gen_st_asi(cpu_val, cpu_addr, insn, 8);
4992 dc->npc = DYNAMIC_PC;
4993 break;
4994 #endif
4995 default:
4996 goto illegal_insn;
4998 } else if (xop > 0x23 && xop < 0x28) {
4999 if (gen_trap_ifnofpu(dc)) {
5000 goto jmp_insn;
5002 save_state(dc);
5003 switch (xop) {
5004 case 0x24: /* stf, store fpreg */
5006 TCGv t = get_temp_tl(dc);
5007 gen_address_mask(dc, cpu_addr);
5008 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5009 tcg_gen_ext_i32_tl(t, cpu_src1_32);
5010 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5012 break;
5013 case 0x25: /* stfsr, V9 stxfsr */
5015 TCGv t = get_temp_tl(dc);
5017 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUSPARCState, fsr));
5018 #ifdef TARGET_SPARC64
5019 gen_address_mask(dc, cpu_addr);
5020 if (rd == 1) {
5021 tcg_gen_qemu_st64(t, cpu_addr, dc->mem_idx);
5022 break;
5024 #endif
5025 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5027 break;
5028 case 0x26:
5029 #ifdef TARGET_SPARC64
5030 /* V9 stqf, store quad fpreg */
5032 TCGv_i32 r_const;
5034 CHECK_FPU_FEATURE(dc, FLOAT128);
5035 gen_op_load_fpr_QT0(QFPREG(rd));
5036 r_const = tcg_const_i32(dc->mem_idx);
5037 gen_address_mask(dc, cpu_addr);
5038 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5039 tcg_temp_free_i32(r_const);
5041 break;
5042 #else /* !TARGET_SPARC64 */
5043 /* stdfq, store floating point queue */
5044 #if defined(CONFIG_USER_ONLY)
5045 goto illegal_insn;
5046 #else
5047 if (!supervisor(dc))
5048 goto priv_insn;
5049 if (gen_trap_ifnofpu(dc)) {
5050 goto jmp_insn;
5052 goto nfq_insn;
5053 #endif
5054 #endif
5055 case 0x27: /* stdf, store double fpreg */
5056 gen_address_mask(dc, cpu_addr);
5057 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5058 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5059 break;
5060 default:
5061 goto illegal_insn;
5063 } else if (xop > 0x33 && xop < 0x3f) {
5064 save_state(dc);
5065 switch (xop) {
5066 #ifdef TARGET_SPARC64
5067 case 0x34: /* V9 stfa */
5068 if (gen_trap_ifnofpu(dc)) {
5069 goto jmp_insn;
5071 gen_stf_asi(cpu_addr, insn, 4, rd);
5072 break;
5073 case 0x36: /* V9 stqfa */
5075 TCGv_i32 r_const;
5077 CHECK_FPU_FEATURE(dc, FLOAT128);
5078 if (gen_trap_ifnofpu(dc)) {
5079 goto jmp_insn;
5081 r_const = tcg_const_i32(7);
5082 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5083 tcg_temp_free_i32(r_const);
5084 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5086 break;
5087 case 0x37: /* V9 stdfa */
5088 if (gen_trap_ifnofpu(dc)) {
5089 goto jmp_insn;
5091 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5092 break;
5093 case 0x3e: /* V9 casxa */
5094 rs2 = GET_FIELD(insn, 27, 31);
5095 cpu_src2 = gen_load_gpr(dc, rs2);
5096 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5097 break;
5098 #else
5099 case 0x34: /* stc */
5100 case 0x35: /* stcsr */
5101 case 0x36: /* stdcq */
5102 case 0x37: /* stdc */
5103 goto ncp_insn;
5104 #endif
5105 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5106 case 0x3c: /* V9 or LEON3 casa */
5107 #ifndef TARGET_SPARC64
5108 CHECK_IU_FEATURE(dc, CASA);
5109 if (IS_IMM) {
5110 goto illegal_insn;
5112 /* LEON3 allows CASA from user space with ASI 0xa */
5113 if ((GET_FIELD(insn, 19, 26) != 0xa) && !supervisor(dc)) {
5114 goto priv_insn;
5116 #endif
5117 rs2 = GET_FIELD(insn, 27, 31);
5118 cpu_src2 = gen_load_gpr(dc, rs2);
5119 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5120 break;
5121 #endif
5122 default:
5123 goto illegal_insn;
5125 } else {
5126 goto illegal_insn;
5129 break;
5131 /* default case for non jump instructions */
5132 if (dc->npc == DYNAMIC_PC) {
5133 dc->pc = DYNAMIC_PC;
5134 gen_op_next_insn();
5135 } else if (dc->npc == JUMP_PC) {
5136 /* we can do a static jump */
5137 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5138 dc->is_br = 1;
5139 } else {
5140 dc->pc = dc->npc;
5141 dc->npc = dc->npc + 4;
5143 jmp_insn:
5144 goto egress;
5145 illegal_insn:
5147 TCGv_i32 r_const;
5149 save_state(dc);
5150 r_const = tcg_const_i32(TT_ILL_INSN);
5151 gen_helper_raise_exception(cpu_env, r_const);
5152 tcg_temp_free_i32(r_const);
5153 dc->is_br = 1;
5155 goto egress;
5156 unimp_flush:
5158 TCGv_i32 r_const;
5160 save_state(dc);
5161 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5162 gen_helper_raise_exception(cpu_env, r_const);
5163 tcg_temp_free_i32(r_const);
5164 dc->is_br = 1;
5166 goto egress;
5167 #if !defined(CONFIG_USER_ONLY)
5168 priv_insn:
5170 TCGv_i32 r_const;
5172 save_state(dc);
5173 r_const = tcg_const_i32(TT_PRIV_INSN);
5174 gen_helper_raise_exception(cpu_env, r_const);
5175 tcg_temp_free_i32(r_const);
5176 dc->is_br = 1;
5178 goto egress;
5179 #endif
5180 nfpu_insn:
5181 save_state(dc);
5182 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5183 dc->is_br = 1;
5184 goto egress;
5185 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5186 nfq_insn:
5187 save_state(dc);
5188 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5189 dc->is_br = 1;
5190 goto egress;
5191 #endif
5192 #ifndef TARGET_SPARC64
5193 ncp_insn:
5195 TCGv r_const;
5197 save_state(dc);
5198 r_const = tcg_const_i32(TT_NCP_INSN);
5199 gen_helper_raise_exception(cpu_env, r_const);
5200 tcg_temp_free(r_const);
5201 dc->is_br = 1;
5203 goto egress;
5204 #endif
5205 egress:
5206 if (dc->n_t32 != 0) {
5207 int i;
5208 for (i = dc->n_t32 - 1; i >= 0; --i) {
5209 tcg_temp_free_i32(dc->t32[i]);
5211 dc->n_t32 = 0;
5213 if (dc->n_ttl != 0) {
5214 int i;
5215 for (i = dc->n_ttl - 1; i >= 0; --i) {
5216 tcg_temp_free(dc->ttl[i]);
5218 dc->n_ttl = 0;
5222 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5224 SPARCCPU *cpu = sparc_env_get_cpu(env);
5225 CPUState *cs = CPU(cpu);
5226 target_ulong pc_start, last_pc;
5227 DisasContext dc1, *dc = &dc1;
5228 int num_insns;
5229 int max_insns;
5230 unsigned int insn;
5232 memset(dc, 0, sizeof(DisasContext));
5233 dc->tb = tb;
5234 pc_start = tb->pc;
5235 dc->pc = pc_start;
5236 last_pc = dc->pc;
5237 dc->npc = (target_ulong) tb->cs_base;
5238 dc->cc_op = CC_OP_DYNAMIC;
5239 dc->mem_idx = cpu_mmu_index(env, false);
5240 dc->def = env->def;
5241 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5242 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5243 dc->singlestep = (cs->singlestep_enabled || singlestep);
5245 num_insns = 0;
5246 max_insns = tb->cflags & CF_COUNT_MASK;
5247 if (max_insns == 0) {
5248 max_insns = CF_COUNT_MASK;
5250 if (max_insns > TCG_MAX_INSNS) {
5251 max_insns = TCG_MAX_INSNS;
5254 gen_tb_start(tb);
5255 do {
5256 if (dc->npc & JUMP_PC) {
5257 assert(dc->jump_pc[1] == dc->pc + 4);
5258 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5259 } else {
5260 tcg_gen_insn_start(dc->pc, dc->npc);
5262 num_insns++;
5263 last_pc = dc->pc;
5265 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5266 if (dc->pc != pc_start) {
5267 save_state(dc);
5269 gen_helper_debug(cpu_env);
5270 tcg_gen_exit_tb(0);
5271 dc->is_br = 1;
5272 goto exit_gen_loop;
5275 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5276 gen_io_start();
5279 insn = cpu_ldl_code(env, dc->pc);
5281 disas_sparc_insn(dc, insn);
5283 if (dc->is_br)
5284 break;
5285 /* if the next PC is different, we abort now */
5286 if (dc->pc != (last_pc + 4))
5287 break;
5288 /* if we reach a page boundary, we stop generation so that the
5289 PC of a TT_TFAULT exception is always in the right page */
5290 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5291 break;
5292 /* if single step mode, we generate only one instruction and
5293 generate an exception */
5294 if (dc->singlestep) {
5295 break;
5297 } while (!tcg_op_buf_full() &&
5298 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5299 num_insns < max_insns);
5301 exit_gen_loop:
5302 if (tb->cflags & CF_LAST_IO) {
5303 gen_io_end();
5305 if (!dc->is_br) {
5306 if (dc->pc != DYNAMIC_PC &&
5307 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5308 /* static PC and NPC: we can use direct chaining */
5309 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5310 } else {
5311 if (dc->pc != DYNAMIC_PC) {
5312 tcg_gen_movi_tl(cpu_pc, dc->pc);
5314 save_npc(dc);
5315 tcg_gen_exit_tb(0);
5318 gen_tb_end(tb, num_insns);
5320 tb->size = last_pc + 4 - pc_start;
5321 tb->icount = num_insns;
5323 #ifdef DEBUG_DISAS
5324 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5325 qemu_log("--------------\n");
5326 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5327 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5328 qemu_log("\n");
5330 #endif
5333 void gen_intermediate_code_init(CPUSPARCState *env)
5335 unsigned int i;
5336 static int inited;
5337 static const char * const gregnames[8] = {
5338 NULL, // g0 not used
5339 "g1",
5340 "g2",
5341 "g3",
5342 "g4",
5343 "g5",
5344 "g6",
5345 "g7",
5347 static const char * const fregnames[32] = {
5348 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5349 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5350 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5351 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5354 /* init various static tables */
5355 if (!inited) {
5356 inited = 1;
5358 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5359 cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
5360 offsetof(CPUSPARCState, regwptr),
5361 "regwptr");
5362 #ifdef TARGET_SPARC64
5363 cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, xcc),
5364 "xcc");
5365 cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, asi),
5366 "asi");
5367 cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, fprs),
5368 "fprs");
5369 cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, gsr),
5370 "gsr");
5371 cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
5372 offsetof(CPUSPARCState, tick_cmpr),
5373 "tick_cmpr");
5374 cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
5375 offsetof(CPUSPARCState, stick_cmpr),
5376 "stick_cmpr");
5377 cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
5378 offsetof(CPUSPARCState, hstick_cmpr),
5379 "hstick_cmpr");
5380 cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hintp),
5381 "hintp");
5382 cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, htba),
5383 "htba");
5384 cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hver),
5385 "hver");
5386 cpu_ssr = tcg_global_mem_new(TCG_AREG0,
5387 offsetof(CPUSPARCState, ssr), "ssr");
5388 cpu_ver = tcg_global_mem_new(TCG_AREG0,
5389 offsetof(CPUSPARCState, version), "ver");
5390 cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
5391 offsetof(CPUSPARCState, softint),
5392 "softint");
5393 #else
5394 cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, wim),
5395 "wim");
5396 #endif
5397 cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cond),
5398 "cond");
5399 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_src),
5400 "cc_src");
5401 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
5402 offsetof(CPUSPARCState, cc_src2),
5403 "cc_src2");
5404 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
5405 "cc_dst");
5406 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, cc_op),
5407 "cc_op");
5408 cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, psr),
5409 "psr");
5410 cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, fsr),
5411 "fsr");
5412 cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, pc),
5413 "pc");
5414 cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, npc),
5415 "npc");
5416 cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, y), "y");
5417 #ifndef CONFIG_USER_ONLY
5418 cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, tbr),
5419 "tbr");
5420 #endif
5421 for (i = 1; i < 8; i++) {
5422 cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
5423 offsetof(CPUSPARCState, gregs[i]),
5424 gregnames[i]);
5426 for (i = 0; i < TARGET_DPREGS; i++) {
5427 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
5428 offsetof(CPUSPARCState, fpr[i]),
5429 fregnames[i]);
5434 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5435 target_ulong *data)
5437 target_ulong pc = data[0];
5438 target_ulong npc = data[1];
5440 env->pc = pc;
5441 if (npc == DYNAMIC_PC) {
5442 /* dynamic NPC: already stored */
5443 } else if (npc & JUMP_PC) {
5444 /* jump PC: use 'cond' and the jump targets of the translation */
5445 if (env->cond) {
5446 env->npc = npc & ~3;
5447 } else {
5448 env->npc = pc + 4;
5450 } else {
5451 env->npc = npc;