target-arm: Rename check_s2_startlevel to check_s2_mmu_setup
[qemu/ar7.git] / target-sparc / translate.c
blob747f94d60e3a5a5423f89cf55a070056b46e98bb
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
34 #define DEBUG_DISAS
36 #define DYNAMIC_PC 1 /* dynamic pc value */
37 #define JUMP_PC 2 /* dynamic pc value which takes only two values
38 according to jump_pc[T2] */
40 /* global register indexes */
41 static TCGv_ptr cpu_env, cpu_regwptr;
42 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
43 static TCGv_i32 cpu_cc_op;
44 static TCGv_i32 cpu_psr;
45 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
46 static TCGv cpu_y;
47 #ifndef CONFIG_USER_ONLY
48 static TCGv cpu_tbr;
49 #endif
50 static TCGv cpu_cond;
51 #ifdef TARGET_SPARC64
52 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
53 static TCGv cpu_gsr;
54 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
55 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
56 static TCGv_i32 cpu_softint;
57 #else
58 static TCGv cpu_wim;
59 #endif
60 /* Floating point registers */
61 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
63 #include "exec/gen-icount.h"
65 typedef struct DisasContext {
66 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
67 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
68 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
69 int is_br;
70 int mem_idx;
71 int fpu_enabled;
72 int address_mask_32bit;
73 int singlestep;
74 uint32_t cc_op; /* current CC operation */
75 struct TranslationBlock *tb;
76 sparc_def_t *def;
77 TCGv_i32 t32[3];
78 TCGv ttl[5];
79 int n_t32;
80 int n_ttl;
81 } DisasContext;
83 typedef struct {
84 TCGCond cond;
85 bool is_bool;
86 bool g1, g2;
87 TCGv c1, c2;
88 } DisasCompare;
90 // This function uses non-native bit order
91 #define GET_FIELD(X, FROM, TO) \
92 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
94 // This function uses the order in the manuals, i.e. bit 0 is 2^0
95 #define GET_FIELD_SP(X, FROM, TO) \
96 GET_FIELD(X, 31 - (TO), 31 - (FROM))
98 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
99 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
101 #ifdef TARGET_SPARC64
102 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
103 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
104 #else
105 #define DFPREG(r) (r & 0x1e)
106 #define QFPREG(r) (r & 0x1c)
107 #endif
109 #define UA2005_HTRAP_MASK 0xff
110 #define V8_TRAP_MASK 0x7f
112 static int sign_extend(int x, int len)
114 len = 32 - len;
115 return (x << len) >> len;
118 #define IS_IMM (insn & (1<<13))
120 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
122 TCGv_i32 t;
123 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
124 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
125 return t;
128 static inline TCGv get_temp_tl(DisasContext *dc)
130 TCGv t;
131 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
132 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
133 return t;
136 static inline void gen_update_fprs_dirty(int rd)
138 #if defined(TARGET_SPARC64)
139 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
140 #endif
143 /* floating point registers moves */
144 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
146 #if TCG_TARGET_REG_BITS == 32
147 if (src & 1) {
148 return TCGV_LOW(cpu_fpr[src / 2]);
149 } else {
150 return TCGV_HIGH(cpu_fpr[src / 2]);
152 #else
153 if (src & 1) {
154 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
155 } else {
156 TCGv_i32 ret = get_temp_i32(dc);
157 TCGv_i64 t = tcg_temp_new_i64();
159 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
160 tcg_gen_extrl_i64_i32(ret, t);
161 tcg_temp_free_i64(t);
163 return ret;
165 #endif
168 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
170 #if TCG_TARGET_REG_BITS == 32
171 if (dst & 1) {
172 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
173 } else {
174 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
176 #else
177 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
178 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
179 (dst & 1 ? 0 : 32), 32);
180 #endif
181 gen_update_fprs_dirty(dst);
184 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
186 return get_temp_i32(dc);
189 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
191 src = DFPREG(src);
192 return cpu_fpr[src / 2];
195 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
197 dst = DFPREG(dst);
198 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
199 gen_update_fprs_dirty(dst);
202 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
204 return cpu_fpr[DFPREG(dst) / 2];
207 static void gen_op_load_fpr_QT0(unsigned int src)
209 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
210 offsetof(CPU_QuadU, ll.upper));
211 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
212 offsetof(CPU_QuadU, ll.lower));
215 static void gen_op_load_fpr_QT1(unsigned int src)
217 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
218 offsetof(CPU_QuadU, ll.upper));
219 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
220 offsetof(CPU_QuadU, ll.lower));
223 static void gen_op_store_QT0_fpr(unsigned int dst)
225 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
226 offsetof(CPU_QuadU, ll.upper));
227 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
228 offsetof(CPU_QuadU, ll.lower));
231 #ifdef TARGET_SPARC64
232 static void gen_move_Q(unsigned int rd, unsigned int rs)
234 rd = QFPREG(rd);
235 rs = QFPREG(rs);
237 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
238 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
239 gen_update_fprs_dirty(rd);
241 #endif
243 /* moves */
244 #ifdef CONFIG_USER_ONLY
245 #define supervisor(dc) 0
246 #ifdef TARGET_SPARC64
247 #define hypervisor(dc) 0
248 #endif
249 #else
250 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
251 #ifdef TARGET_SPARC64
252 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
253 #else
254 #endif
255 #endif
257 #ifdef TARGET_SPARC64
258 #ifndef TARGET_ABI32
259 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
260 #else
261 #define AM_CHECK(dc) (1)
262 #endif
263 #endif
265 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
267 #ifdef TARGET_SPARC64
268 if (AM_CHECK(dc))
269 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
270 #endif
273 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
275 if (reg == 0 || reg >= 8) {
276 TCGv t = get_temp_tl(dc);
277 if (reg == 0) {
278 tcg_gen_movi_tl(t, 0);
279 } else {
280 tcg_gen_ld_tl(t, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
282 return t;
283 } else {
284 return cpu_gregs[reg];
288 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
290 if (reg > 0) {
291 if (reg < 8) {
292 tcg_gen_mov_tl(cpu_gregs[reg], v);
293 } else {
294 tcg_gen_st_tl(v, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
299 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
301 if (reg == 0 || reg >= 8) {
302 return get_temp_tl(dc);
303 } else {
304 return cpu_gregs[reg];
308 static inline void gen_goto_tb(DisasContext *s, int tb_num,
309 target_ulong pc, target_ulong npc)
311 TranslationBlock *tb;
313 tb = s->tb;
314 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
315 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
316 !s->singlestep) {
317 /* jump to same page: we can use a direct jump */
318 tcg_gen_goto_tb(tb_num);
319 tcg_gen_movi_tl(cpu_pc, pc);
320 tcg_gen_movi_tl(cpu_npc, npc);
321 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
322 } else {
323 /* jump to another page: currently not optimized */
324 tcg_gen_movi_tl(cpu_pc, pc);
325 tcg_gen_movi_tl(cpu_npc, npc);
326 tcg_gen_exit_tb(0);
330 // XXX suboptimal
331 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
333 tcg_gen_extu_i32_tl(reg, src);
334 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
335 tcg_gen_andi_tl(reg, reg, 0x1);
338 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
340 tcg_gen_extu_i32_tl(reg, src);
341 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
342 tcg_gen_andi_tl(reg, reg, 0x1);
345 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
347 tcg_gen_extu_i32_tl(reg, src);
348 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
349 tcg_gen_andi_tl(reg, reg, 0x1);
352 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
354 tcg_gen_extu_i32_tl(reg, src);
355 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
356 tcg_gen_andi_tl(reg, reg, 0x1);
359 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
361 tcg_gen_mov_tl(cpu_cc_src, src1);
362 tcg_gen_mov_tl(cpu_cc_src2, src2);
363 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
364 tcg_gen_mov_tl(dst, cpu_cc_dst);
367 static TCGv_i32 gen_add32_carry32(void)
369 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
371 /* Carry is computed from a previous add: (dst < src) */
372 #if TARGET_LONG_BITS == 64
373 cc_src1_32 = tcg_temp_new_i32();
374 cc_src2_32 = tcg_temp_new_i32();
375 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
376 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
377 #else
378 cc_src1_32 = cpu_cc_dst;
379 cc_src2_32 = cpu_cc_src;
380 #endif
382 carry_32 = tcg_temp_new_i32();
383 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
385 #if TARGET_LONG_BITS == 64
386 tcg_temp_free_i32(cc_src1_32);
387 tcg_temp_free_i32(cc_src2_32);
388 #endif
390 return carry_32;
393 static TCGv_i32 gen_sub32_carry32(void)
395 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
397 /* Carry is computed from a previous borrow: (src1 < src2) */
398 #if TARGET_LONG_BITS == 64
399 cc_src1_32 = tcg_temp_new_i32();
400 cc_src2_32 = tcg_temp_new_i32();
401 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
402 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
403 #else
404 cc_src1_32 = cpu_cc_src;
405 cc_src2_32 = cpu_cc_src2;
406 #endif
408 carry_32 = tcg_temp_new_i32();
409 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
411 #if TARGET_LONG_BITS == 64
412 tcg_temp_free_i32(cc_src1_32);
413 tcg_temp_free_i32(cc_src2_32);
414 #endif
416 return carry_32;
419 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
420 TCGv src2, int update_cc)
422 TCGv_i32 carry_32;
423 TCGv carry;
425 switch (dc->cc_op) {
426 case CC_OP_DIV:
427 case CC_OP_LOGIC:
428 /* Carry is known to be zero. Fall back to plain ADD. */
429 if (update_cc) {
430 gen_op_add_cc(dst, src1, src2);
431 } else {
432 tcg_gen_add_tl(dst, src1, src2);
434 return;
436 case CC_OP_ADD:
437 case CC_OP_TADD:
438 case CC_OP_TADDTV:
439 if (TARGET_LONG_BITS == 32) {
440 /* We can re-use the host's hardware carry generation by using
441 an ADD2 opcode. We discard the low part of the output.
442 Ideally we'd combine this operation with the add that
443 generated the carry in the first place. */
444 carry = tcg_temp_new();
445 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
446 tcg_temp_free(carry);
447 goto add_done;
449 carry_32 = gen_add32_carry32();
450 break;
452 case CC_OP_SUB:
453 case CC_OP_TSUB:
454 case CC_OP_TSUBTV:
455 carry_32 = gen_sub32_carry32();
456 break;
458 default:
459 /* We need external help to produce the carry. */
460 carry_32 = tcg_temp_new_i32();
461 gen_helper_compute_C_icc(carry_32, cpu_env);
462 break;
465 #if TARGET_LONG_BITS == 64
466 carry = tcg_temp_new();
467 tcg_gen_extu_i32_i64(carry, carry_32);
468 #else
469 carry = carry_32;
470 #endif
472 tcg_gen_add_tl(dst, src1, src2);
473 tcg_gen_add_tl(dst, dst, carry);
475 tcg_temp_free_i32(carry_32);
476 #if TARGET_LONG_BITS == 64
477 tcg_temp_free(carry);
478 #endif
480 add_done:
481 if (update_cc) {
482 tcg_gen_mov_tl(cpu_cc_src, src1);
483 tcg_gen_mov_tl(cpu_cc_src2, src2);
484 tcg_gen_mov_tl(cpu_cc_dst, dst);
485 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
486 dc->cc_op = CC_OP_ADDX;
490 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
492 tcg_gen_mov_tl(cpu_cc_src, src1);
493 tcg_gen_mov_tl(cpu_cc_src2, src2);
494 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
495 tcg_gen_mov_tl(dst, cpu_cc_dst);
498 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
499 TCGv src2, int update_cc)
501 TCGv_i32 carry_32;
502 TCGv carry;
504 switch (dc->cc_op) {
505 case CC_OP_DIV:
506 case CC_OP_LOGIC:
507 /* Carry is known to be zero. Fall back to plain SUB. */
508 if (update_cc) {
509 gen_op_sub_cc(dst, src1, src2);
510 } else {
511 tcg_gen_sub_tl(dst, src1, src2);
513 return;
515 case CC_OP_ADD:
516 case CC_OP_TADD:
517 case CC_OP_TADDTV:
518 carry_32 = gen_add32_carry32();
519 break;
521 case CC_OP_SUB:
522 case CC_OP_TSUB:
523 case CC_OP_TSUBTV:
524 if (TARGET_LONG_BITS == 32) {
525 /* We can re-use the host's hardware carry generation by using
526 a SUB2 opcode. We discard the low part of the output.
527 Ideally we'd combine this operation with the add that
528 generated the carry in the first place. */
529 carry = tcg_temp_new();
530 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
531 tcg_temp_free(carry);
532 goto sub_done;
534 carry_32 = gen_sub32_carry32();
535 break;
537 default:
538 /* We need external help to produce the carry. */
539 carry_32 = tcg_temp_new_i32();
540 gen_helper_compute_C_icc(carry_32, cpu_env);
541 break;
544 #if TARGET_LONG_BITS == 64
545 carry = tcg_temp_new();
546 tcg_gen_extu_i32_i64(carry, carry_32);
547 #else
548 carry = carry_32;
549 #endif
551 tcg_gen_sub_tl(dst, src1, src2);
552 tcg_gen_sub_tl(dst, dst, carry);
554 tcg_temp_free_i32(carry_32);
555 #if TARGET_LONG_BITS == 64
556 tcg_temp_free(carry);
557 #endif
559 sub_done:
560 if (update_cc) {
561 tcg_gen_mov_tl(cpu_cc_src, src1);
562 tcg_gen_mov_tl(cpu_cc_src2, src2);
563 tcg_gen_mov_tl(cpu_cc_dst, dst);
564 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
565 dc->cc_op = CC_OP_SUBX;
569 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
571 TCGv r_temp, zero, t0;
573 r_temp = tcg_temp_new();
574 t0 = tcg_temp_new();
576 /* old op:
577 if (!(env->y & 1))
578 T1 = 0;
580 zero = tcg_const_tl(0);
581 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
582 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
583 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
584 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
585 zero, cpu_cc_src2);
586 tcg_temp_free(zero);
588 // b2 = T0 & 1;
589 // env->y = (b2 << 31) | (env->y >> 1);
590 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
591 tcg_gen_shli_tl(r_temp, r_temp, 31);
592 tcg_gen_shri_tl(t0, cpu_y, 1);
593 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
594 tcg_gen_or_tl(t0, t0, r_temp);
595 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
597 // b1 = N ^ V;
598 gen_mov_reg_N(t0, cpu_psr);
599 gen_mov_reg_V(r_temp, cpu_psr);
600 tcg_gen_xor_tl(t0, t0, r_temp);
601 tcg_temp_free(r_temp);
603 // T0 = (b1 << 31) | (T0 >> 1);
604 // src1 = T0;
605 tcg_gen_shli_tl(t0, t0, 31);
606 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
607 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
608 tcg_temp_free(t0);
610 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
612 tcg_gen_mov_tl(dst, cpu_cc_dst);
615 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
617 #if TARGET_LONG_BITS == 32
618 if (sign_ext) {
619 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
620 } else {
621 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
623 #else
624 TCGv t0 = tcg_temp_new_i64();
625 TCGv t1 = tcg_temp_new_i64();
627 if (sign_ext) {
628 tcg_gen_ext32s_i64(t0, src1);
629 tcg_gen_ext32s_i64(t1, src2);
630 } else {
631 tcg_gen_ext32u_i64(t0, src1);
632 tcg_gen_ext32u_i64(t1, src2);
635 tcg_gen_mul_i64(dst, t0, t1);
636 tcg_temp_free(t0);
637 tcg_temp_free(t1);
639 tcg_gen_shri_i64(cpu_y, dst, 32);
640 #endif
643 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
645 /* zero-extend truncated operands before multiplication */
646 gen_op_multiply(dst, src1, src2, 0);
649 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
651 /* sign-extend truncated operands before multiplication */
652 gen_op_multiply(dst, src1, src2, 1);
655 // 1
656 static inline void gen_op_eval_ba(TCGv dst)
658 tcg_gen_movi_tl(dst, 1);
661 // Z
662 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
664 gen_mov_reg_Z(dst, src);
667 // Z | (N ^ V)
668 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
670 TCGv t0 = tcg_temp_new();
671 gen_mov_reg_N(t0, src);
672 gen_mov_reg_V(dst, src);
673 tcg_gen_xor_tl(dst, dst, t0);
674 gen_mov_reg_Z(t0, src);
675 tcg_gen_or_tl(dst, dst, t0);
676 tcg_temp_free(t0);
679 // N ^ V
680 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
682 TCGv t0 = tcg_temp_new();
683 gen_mov_reg_V(t0, src);
684 gen_mov_reg_N(dst, src);
685 tcg_gen_xor_tl(dst, dst, t0);
686 tcg_temp_free(t0);
689 // C | Z
690 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
692 TCGv t0 = tcg_temp_new();
693 gen_mov_reg_Z(t0, src);
694 gen_mov_reg_C(dst, src);
695 tcg_gen_or_tl(dst, dst, t0);
696 tcg_temp_free(t0);
699 // C
700 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
702 gen_mov_reg_C(dst, src);
705 // V
706 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
708 gen_mov_reg_V(dst, src);
711 // 0
712 static inline void gen_op_eval_bn(TCGv dst)
714 tcg_gen_movi_tl(dst, 0);
717 // N
718 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
720 gen_mov_reg_N(dst, src);
723 // !Z
724 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
726 gen_mov_reg_Z(dst, src);
727 tcg_gen_xori_tl(dst, dst, 0x1);
730 // !(Z | (N ^ V))
731 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
733 gen_op_eval_ble(dst, src);
734 tcg_gen_xori_tl(dst, dst, 0x1);
737 // !(N ^ V)
738 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
740 gen_op_eval_bl(dst, src);
741 tcg_gen_xori_tl(dst, dst, 0x1);
744 // !(C | Z)
745 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
747 gen_op_eval_bleu(dst, src);
748 tcg_gen_xori_tl(dst, dst, 0x1);
751 // !C
752 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
754 gen_mov_reg_C(dst, src);
755 tcg_gen_xori_tl(dst, dst, 0x1);
758 // !N
759 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
761 gen_mov_reg_N(dst, src);
762 tcg_gen_xori_tl(dst, dst, 0x1);
765 // !V
766 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
768 gen_mov_reg_V(dst, src);
769 tcg_gen_xori_tl(dst, dst, 0x1);
773 FPSR bit field FCC1 | FCC0:
777 3 unordered
779 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
780 unsigned int fcc_offset)
782 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
783 tcg_gen_andi_tl(reg, reg, 0x1);
786 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
787 unsigned int fcc_offset)
789 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
790 tcg_gen_andi_tl(reg, reg, 0x1);
793 // !0: FCC0 | FCC1
794 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
795 unsigned int fcc_offset)
797 TCGv t0 = tcg_temp_new();
798 gen_mov_reg_FCC0(dst, src, fcc_offset);
799 gen_mov_reg_FCC1(t0, src, fcc_offset);
800 tcg_gen_or_tl(dst, dst, t0);
801 tcg_temp_free(t0);
804 // 1 or 2: FCC0 ^ FCC1
805 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
806 unsigned int fcc_offset)
808 TCGv t0 = tcg_temp_new();
809 gen_mov_reg_FCC0(dst, src, fcc_offset);
810 gen_mov_reg_FCC1(t0, src, fcc_offset);
811 tcg_gen_xor_tl(dst, dst, t0);
812 tcg_temp_free(t0);
815 // 1 or 3: FCC0
816 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
817 unsigned int fcc_offset)
819 gen_mov_reg_FCC0(dst, src, fcc_offset);
822 // 1: FCC0 & !FCC1
823 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
824 unsigned int fcc_offset)
826 TCGv t0 = tcg_temp_new();
827 gen_mov_reg_FCC0(dst, src, fcc_offset);
828 gen_mov_reg_FCC1(t0, src, fcc_offset);
829 tcg_gen_andc_tl(dst, dst, t0);
830 tcg_temp_free(t0);
833 // 2 or 3: FCC1
834 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
835 unsigned int fcc_offset)
837 gen_mov_reg_FCC1(dst, src, fcc_offset);
840 // 2: !FCC0 & FCC1
841 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
842 unsigned int fcc_offset)
844 TCGv t0 = tcg_temp_new();
845 gen_mov_reg_FCC0(dst, src, fcc_offset);
846 gen_mov_reg_FCC1(t0, src, fcc_offset);
847 tcg_gen_andc_tl(dst, t0, dst);
848 tcg_temp_free(t0);
851 // 3: FCC0 & FCC1
852 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
853 unsigned int fcc_offset)
855 TCGv t0 = tcg_temp_new();
856 gen_mov_reg_FCC0(dst, src, fcc_offset);
857 gen_mov_reg_FCC1(t0, src, fcc_offset);
858 tcg_gen_and_tl(dst, dst, t0);
859 tcg_temp_free(t0);
862 // 0: !(FCC0 | FCC1)
863 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
864 unsigned int fcc_offset)
866 TCGv t0 = tcg_temp_new();
867 gen_mov_reg_FCC0(dst, src, fcc_offset);
868 gen_mov_reg_FCC1(t0, src, fcc_offset);
869 tcg_gen_or_tl(dst, dst, t0);
870 tcg_gen_xori_tl(dst, dst, 0x1);
871 tcg_temp_free(t0);
874 // 0 or 3: !(FCC0 ^ FCC1)
875 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
876 unsigned int fcc_offset)
878 TCGv t0 = tcg_temp_new();
879 gen_mov_reg_FCC0(dst, src, fcc_offset);
880 gen_mov_reg_FCC1(t0, src, fcc_offset);
881 tcg_gen_xor_tl(dst, dst, t0);
882 tcg_gen_xori_tl(dst, dst, 0x1);
883 tcg_temp_free(t0);
886 // 0 or 2: !FCC0
887 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
888 unsigned int fcc_offset)
890 gen_mov_reg_FCC0(dst, src, fcc_offset);
891 tcg_gen_xori_tl(dst, dst, 0x1);
894 // !1: !(FCC0 & !FCC1)
895 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
896 unsigned int fcc_offset)
898 TCGv t0 = tcg_temp_new();
899 gen_mov_reg_FCC0(dst, src, fcc_offset);
900 gen_mov_reg_FCC1(t0, src, fcc_offset);
901 tcg_gen_andc_tl(dst, dst, t0);
902 tcg_gen_xori_tl(dst, dst, 0x1);
903 tcg_temp_free(t0);
906 // 0 or 1: !FCC1
907 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
908 unsigned int fcc_offset)
910 gen_mov_reg_FCC1(dst, src, fcc_offset);
911 tcg_gen_xori_tl(dst, dst, 0x1);
914 // !2: !(!FCC0 & FCC1)
915 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
916 unsigned int fcc_offset)
918 TCGv t0 = tcg_temp_new();
919 gen_mov_reg_FCC0(dst, src, fcc_offset);
920 gen_mov_reg_FCC1(t0, src, fcc_offset);
921 tcg_gen_andc_tl(dst, t0, dst);
922 tcg_gen_xori_tl(dst, dst, 0x1);
923 tcg_temp_free(t0);
926 // !3: !(FCC0 & FCC1)
927 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
928 unsigned int fcc_offset)
930 TCGv t0 = tcg_temp_new();
931 gen_mov_reg_FCC0(dst, src, fcc_offset);
932 gen_mov_reg_FCC1(t0, src, fcc_offset);
933 tcg_gen_and_tl(dst, dst, t0);
934 tcg_gen_xori_tl(dst, dst, 0x1);
935 tcg_temp_free(t0);
938 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
939 target_ulong pc2, TCGv r_cond)
941 TCGLabel *l1 = gen_new_label();
943 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
945 gen_goto_tb(dc, 0, pc1, pc1 + 4);
947 gen_set_label(l1);
948 gen_goto_tb(dc, 1, pc2, pc2 + 4);
951 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
953 TCGLabel *l1 = gen_new_label();
954 target_ulong npc = dc->npc;
956 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
958 gen_goto_tb(dc, 0, npc, pc1);
960 gen_set_label(l1);
961 gen_goto_tb(dc, 1, npc + 4, npc + 8);
963 dc->is_br = 1;
966 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
968 target_ulong npc = dc->npc;
970 if (likely(npc != DYNAMIC_PC)) {
971 dc->pc = npc;
972 dc->jump_pc[0] = pc1;
973 dc->jump_pc[1] = npc + 4;
974 dc->npc = JUMP_PC;
975 } else {
976 TCGv t, z;
978 tcg_gen_mov_tl(cpu_pc, cpu_npc);
980 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
981 t = tcg_const_tl(pc1);
982 z = tcg_const_tl(0);
983 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
984 tcg_temp_free(t);
985 tcg_temp_free(z);
987 dc->pc = DYNAMIC_PC;
991 static inline void gen_generic_branch(DisasContext *dc)
993 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
994 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
995 TCGv zero = tcg_const_tl(0);
997 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
999 tcg_temp_free(npc0);
1000 tcg_temp_free(npc1);
1001 tcg_temp_free(zero);
1004 /* call this function before using the condition register as it may
1005 have been set for a jump */
1006 static inline void flush_cond(DisasContext *dc)
1008 if (dc->npc == JUMP_PC) {
1009 gen_generic_branch(dc);
1010 dc->npc = DYNAMIC_PC;
1014 static inline void save_npc(DisasContext *dc)
1016 if (dc->npc == JUMP_PC) {
1017 gen_generic_branch(dc);
1018 dc->npc = DYNAMIC_PC;
1019 } else if (dc->npc != DYNAMIC_PC) {
1020 tcg_gen_movi_tl(cpu_npc, dc->npc);
1024 static inline void update_psr(DisasContext *dc)
1026 if (dc->cc_op != CC_OP_FLAGS) {
1027 dc->cc_op = CC_OP_FLAGS;
1028 gen_helper_compute_psr(cpu_env);
1032 static inline void save_state(DisasContext *dc)
1034 tcg_gen_movi_tl(cpu_pc, dc->pc);
1035 save_npc(dc);
1038 static inline void gen_mov_pc_npc(DisasContext *dc)
1040 if (dc->npc == JUMP_PC) {
1041 gen_generic_branch(dc);
1042 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1043 dc->pc = DYNAMIC_PC;
1044 } else if (dc->npc == DYNAMIC_PC) {
1045 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1046 dc->pc = DYNAMIC_PC;
1047 } else {
1048 dc->pc = dc->npc;
1052 static inline void gen_op_next_insn(void)
1054 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1055 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1058 static void free_compare(DisasCompare *cmp)
1060 if (!cmp->g1) {
1061 tcg_temp_free(cmp->c1);
1063 if (!cmp->g2) {
1064 tcg_temp_free(cmp->c2);
1068 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1069 DisasContext *dc)
1071 static int subcc_cond[16] = {
1072 TCG_COND_NEVER,
1073 TCG_COND_EQ,
1074 TCG_COND_LE,
1075 TCG_COND_LT,
1076 TCG_COND_LEU,
1077 TCG_COND_LTU,
1078 -1, /* neg */
1079 -1, /* overflow */
1080 TCG_COND_ALWAYS,
1081 TCG_COND_NE,
1082 TCG_COND_GT,
1083 TCG_COND_GE,
1084 TCG_COND_GTU,
1085 TCG_COND_GEU,
1086 -1, /* pos */
1087 -1, /* no overflow */
1090 static int logic_cond[16] = {
1091 TCG_COND_NEVER,
1092 TCG_COND_EQ, /* eq: Z */
1093 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1094 TCG_COND_LT, /* lt: N ^ V -> N */
1095 TCG_COND_EQ, /* leu: C | Z -> Z */
1096 TCG_COND_NEVER, /* ltu: C -> 0 */
1097 TCG_COND_LT, /* neg: N */
1098 TCG_COND_NEVER, /* vs: V -> 0 */
1099 TCG_COND_ALWAYS,
1100 TCG_COND_NE, /* ne: !Z */
1101 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1102 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1103 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1104 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1105 TCG_COND_GE, /* pos: !N */
1106 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1109 TCGv_i32 r_src;
1110 TCGv r_dst;
1112 #ifdef TARGET_SPARC64
1113 if (xcc) {
1114 r_src = cpu_xcc;
1115 } else {
1116 r_src = cpu_psr;
1118 #else
1119 r_src = cpu_psr;
1120 #endif
1122 switch (dc->cc_op) {
1123 case CC_OP_LOGIC:
1124 cmp->cond = logic_cond[cond];
1125 do_compare_dst_0:
1126 cmp->is_bool = false;
1127 cmp->g2 = false;
1128 cmp->c2 = tcg_const_tl(0);
1129 #ifdef TARGET_SPARC64
1130 if (!xcc) {
1131 cmp->g1 = false;
1132 cmp->c1 = tcg_temp_new();
1133 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1134 break;
1136 #endif
1137 cmp->g1 = true;
1138 cmp->c1 = cpu_cc_dst;
1139 break;
1141 case CC_OP_SUB:
1142 switch (cond) {
1143 case 6: /* neg */
1144 case 14: /* pos */
1145 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1146 goto do_compare_dst_0;
1148 case 7: /* overflow */
1149 case 15: /* !overflow */
1150 goto do_dynamic;
1152 default:
1153 cmp->cond = subcc_cond[cond];
1154 cmp->is_bool = false;
1155 #ifdef TARGET_SPARC64
1156 if (!xcc) {
1157 /* Note that sign-extension works for unsigned compares as
1158 long as both operands are sign-extended. */
1159 cmp->g1 = cmp->g2 = false;
1160 cmp->c1 = tcg_temp_new();
1161 cmp->c2 = tcg_temp_new();
1162 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1163 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1164 break;
1166 #endif
1167 cmp->g1 = cmp->g2 = true;
1168 cmp->c1 = cpu_cc_src;
1169 cmp->c2 = cpu_cc_src2;
1170 break;
1172 break;
1174 default:
1175 do_dynamic:
1176 gen_helper_compute_psr(cpu_env);
1177 dc->cc_op = CC_OP_FLAGS;
1178 /* FALLTHRU */
1180 case CC_OP_FLAGS:
1181 /* We're going to generate a boolean result. */
1182 cmp->cond = TCG_COND_NE;
1183 cmp->is_bool = true;
1184 cmp->g1 = cmp->g2 = false;
1185 cmp->c1 = r_dst = tcg_temp_new();
1186 cmp->c2 = tcg_const_tl(0);
1188 switch (cond) {
1189 case 0x0:
1190 gen_op_eval_bn(r_dst);
1191 break;
1192 case 0x1:
1193 gen_op_eval_be(r_dst, r_src);
1194 break;
1195 case 0x2:
1196 gen_op_eval_ble(r_dst, r_src);
1197 break;
1198 case 0x3:
1199 gen_op_eval_bl(r_dst, r_src);
1200 break;
1201 case 0x4:
1202 gen_op_eval_bleu(r_dst, r_src);
1203 break;
1204 case 0x5:
1205 gen_op_eval_bcs(r_dst, r_src);
1206 break;
1207 case 0x6:
1208 gen_op_eval_bneg(r_dst, r_src);
1209 break;
1210 case 0x7:
1211 gen_op_eval_bvs(r_dst, r_src);
1212 break;
1213 case 0x8:
1214 gen_op_eval_ba(r_dst);
1215 break;
1216 case 0x9:
1217 gen_op_eval_bne(r_dst, r_src);
1218 break;
1219 case 0xa:
1220 gen_op_eval_bg(r_dst, r_src);
1221 break;
1222 case 0xb:
1223 gen_op_eval_bge(r_dst, r_src);
1224 break;
1225 case 0xc:
1226 gen_op_eval_bgu(r_dst, r_src);
1227 break;
1228 case 0xd:
1229 gen_op_eval_bcc(r_dst, r_src);
1230 break;
1231 case 0xe:
1232 gen_op_eval_bpos(r_dst, r_src);
1233 break;
1234 case 0xf:
1235 gen_op_eval_bvc(r_dst, r_src);
1236 break;
1238 break;
1242 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1244 unsigned int offset;
1245 TCGv r_dst;
1247 /* For now we still generate a straight boolean result. */
1248 cmp->cond = TCG_COND_NE;
1249 cmp->is_bool = true;
1250 cmp->g1 = cmp->g2 = false;
1251 cmp->c1 = r_dst = tcg_temp_new();
1252 cmp->c2 = tcg_const_tl(0);
1254 switch (cc) {
1255 default:
1256 case 0x0:
1257 offset = 0;
1258 break;
1259 case 0x1:
1260 offset = 32 - 10;
1261 break;
1262 case 0x2:
1263 offset = 34 - 10;
1264 break;
1265 case 0x3:
1266 offset = 36 - 10;
1267 break;
1270 switch (cond) {
1271 case 0x0:
1272 gen_op_eval_bn(r_dst);
1273 break;
1274 case 0x1:
1275 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1276 break;
1277 case 0x2:
1278 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1279 break;
1280 case 0x3:
1281 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1282 break;
1283 case 0x4:
1284 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1285 break;
1286 case 0x5:
1287 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1288 break;
1289 case 0x6:
1290 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1291 break;
1292 case 0x7:
1293 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1294 break;
1295 case 0x8:
1296 gen_op_eval_ba(r_dst);
1297 break;
1298 case 0x9:
1299 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1300 break;
1301 case 0xa:
1302 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1303 break;
1304 case 0xb:
1305 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1306 break;
1307 case 0xc:
1308 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1309 break;
1310 case 0xd:
1311 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1312 break;
1313 case 0xe:
1314 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1315 break;
1316 case 0xf:
1317 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1318 break;
1322 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1323 DisasContext *dc)
1325 DisasCompare cmp;
1326 gen_compare(&cmp, cc, cond, dc);
1328 /* The interface is to return a boolean in r_dst. */
1329 if (cmp.is_bool) {
1330 tcg_gen_mov_tl(r_dst, cmp.c1);
1331 } else {
1332 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1335 free_compare(&cmp);
1338 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1340 DisasCompare cmp;
1341 gen_fcompare(&cmp, cc, cond);
1343 /* The interface is to return a boolean in r_dst. */
1344 if (cmp.is_bool) {
1345 tcg_gen_mov_tl(r_dst, cmp.c1);
1346 } else {
1347 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1350 free_compare(&cmp);
1353 #ifdef TARGET_SPARC64
1354 // Inverted logic
1355 static const int gen_tcg_cond_reg[8] = {
1357 TCG_COND_NE,
1358 TCG_COND_GT,
1359 TCG_COND_GE,
1361 TCG_COND_EQ,
1362 TCG_COND_LE,
1363 TCG_COND_LT,
1366 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1368 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1369 cmp->is_bool = false;
1370 cmp->g1 = true;
1371 cmp->g2 = false;
1372 cmp->c1 = r_src;
1373 cmp->c2 = tcg_const_tl(0);
1376 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1378 DisasCompare cmp;
1379 gen_compare_reg(&cmp, cond, r_src);
1381 /* The interface is to return a boolean in r_dst. */
1382 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1384 free_compare(&cmp);
1386 #endif
1388 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1390 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1391 target_ulong target = dc->pc + offset;
1393 #ifdef TARGET_SPARC64
1394 if (unlikely(AM_CHECK(dc))) {
1395 target &= 0xffffffffULL;
1397 #endif
1398 if (cond == 0x0) {
1399 /* unconditional not taken */
1400 if (a) {
1401 dc->pc = dc->npc + 4;
1402 dc->npc = dc->pc + 4;
1403 } else {
1404 dc->pc = dc->npc;
1405 dc->npc = dc->pc + 4;
1407 } else if (cond == 0x8) {
1408 /* unconditional taken */
1409 if (a) {
1410 dc->pc = target;
1411 dc->npc = dc->pc + 4;
1412 } else {
1413 dc->pc = dc->npc;
1414 dc->npc = target;
1415 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1417 } else {
1418 flush_cond(dc);
1419 gen_cond(cpu_cond, cc, cond, dc);
1420 if (a) {
1421 gen_branch_a(dc, target);
1422 } else {
1423 gen_branch_n(dc, target);
1428 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1430 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1431 target_ulong target = dc->pc + offset;
1433 #ifdef TARGET_SPARC64
1434 if (unlikely(AM_CHECK(dc))) {
1435 target &= 0xffffffffULL;
1437 #endif
1438 if (cond == 0x0) {
1439 /* unconditional not taken */
1440 if (a) {
1441 dc->pc = dc->npc + 4;
1442 dc->npc = dc->pc + 4;
1443 } else {
1444 dc->pc = dc->npc;
1445 dc->npc = dc->pc + 4;
1447 } else if (cond == 0x8) {
1448 /* unconditional taken */
1449 if (a) {
1450 dc->pc = target;
1451 dc->npc = dc->pc + 4;
1452 } else {
1453 dc->pc = dc->npc;
1454 dc->npc = target;
1455 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1457 } else {
1458 flush_cond(dc);
1459 gen_fcond(cpu_cond, cc, cond);
1460 if (a) {
1461 gen_branch_a(dc, target);
1462 } else {
1463 gen_branch_n(dc, target);
1468 #ifdef TARGET_SPARC64
1469 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1470 TCGv r_reg)
1472 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1473 target_ulong target = dc->pc + offset;
1475 if (unlikely(AM_CHECK(dc))) {
1476 target &= 0xffffffffULL;
1478 flush_cond(dc);
1479 gen_cond_reg(cpu_cond, cond, r_reg);
1480 if (a) {
1481 gen_branch_a(dc, target);
1482 } else {
1483 gen_branch_n(dc, target);
1487 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1489 switch (fccno) {
1490 case 0:
1491 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1492 break;
1493 case 1:
1494 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1495 break;
1496 case 2:
1497 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1498 break;
1499 case 3:
1500 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1501 break;
1505 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1507 switch (fccno) {
1508 case 0:
1509 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1510 break;
1511 case 1:
1512 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1513 break;
1514 case 2:
1515 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1516 break;
1517 case 3:
1518 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1519 break;
1523 static inline void gen_op_fcmpq(int fccno)
1525 switch (fccno) {
1526 case 0:
1527 gen_helper_fcmpq(cpu_env);
1528 break;
1529 case 1:
1530 gen_helper_fcmpq_fcc1(cpu_env);
1531 break;
1532 case 2:
1533 gen_helper_fcmpq_fcc2(cpu_env);
1534 break;
1535 case 3:
1536 gen_helper_fcmpq_fcc3(cpu_env);
1537 break;
1541 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1543 switch (fccno) {
1544 case 0:
1545 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1546 break;
1547 case 1:
1548 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1549 break;
1550 case 2:
1551 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1552 break;
1553 case 3:
1554 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1555 break;
1559 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1561 switch (fccno) {
1562 case 0:
1563 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1564 break;
1565 case 1:
1566 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1567 break;
1568 case 2:
1569 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1570 break;
1571 case 3:
1572 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1573 break;
1577 static inline void gen_op_fcmpeq(int fccno)
1579 switch (fccno) {
1580 case 0:
1581 gen_helper_fcmpeq(cpu_env);
1582 break;
1583 case 1:
1584 gen_helper_fcmpeq_fcc1(cpu_env);
1585 break;
1586 case 2:
1587 gen_helper_fcmpeq_fcc2(cpu_env);
1588 break;
1589 case 3:
1590 gen_helper_fcmpeq_fcc3(cpu_env);
1591 break;
1595 #else
1597 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1599 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1602 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1604 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1607 static inline void gen_op_fcmpq(int fccno)
1609 gen_helper_fcmpq(cpu_env);
1612 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1614 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1617 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1619 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1622 static inline void gen_op_fcmpeq(int fccno)
1624 gen_helper_fcmpeq(cpu_env);
1626 #endif
1628 static inline void gen_op_fpexception_im(int fsr_flags)
1630 TCGv_i32 r_const;
1632 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1633 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1634 r_const = tcg_const_i32(TT_FP_EXCP);
1635 gen_helper_raise_exception(cpu_env, r_const);
1636 tcg_temp_free_i32(r_const);
1639 static int gen_trap_ifnofpu(DisasContext *dc)
1641 #if !defined(CONFIG_USER_ONLY)
1642 if (!dc->fpu_enabled) {
1643 TCGv_i32 r_const;
1645 save_state(dc);
1646 r_const = tcg_const_i32(TT_NFPU_INSN);
1647 gen_helper_raise_exception(cpu_env, r_const);
1648 tcg_temp_free_i32(r_const);
1649 dc->is_br = 1;
1650 return 1;
1652 #endif
1653 return 0;
1656 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1658 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1661 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1662 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1664 TCGv_i32 dst, src;
1666 src = gen_load_fpr_F(dc, rs);
1667 dst = gen_dest_fpr_F(dc);
1669 gen(dst, cpu_env, src);
1671 gen_store_fpr_F(dc, rd, dst);
1674 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1675 void (*gen)(TCGv_i32, TCGv_i32))
1677 TCGv_i32 dst, src;
1679 src = gen_load_fpr_F(dc, rs);
1680 dst = gen_dest_fpr_F(dc);
1682 gen(dst, src);
1684 gen_store_fpr_F(dc, rd, dst);
1687 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1688 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1690 TCGv_i32 dst, src1, src2;
1692 src1 = gen_load_fpr_F(dc, rs1);
1693 src2 = gen_load_fpr_F(dc, rs2);
1694 dst = gen_dest_fpr_F(dc);
1696 gen(dst, cpu_env, src1, src2);
1698 gen_store_fpr_F(dc, rd, dst);
1701 #ifdef TARGET_SPARC64
1702 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1703 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1705 TCGv_i32 dst, src1, src2;
1707 src1 = gen_load_fpr_F(dc, rs1);
1708 src2 = gen_load_fpr_F(dc, rs2);
1709 dst = gen_dest_fpr_F(dc);
1711 gen(dst, src1, src2);
1713 gen_store_fpr_F(dc, rd, dst);
1715 #endif
1717 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1718 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1720 TCGv_i64 dst, src;
1722 src = gen_load_fpr_D(dc, rs);
1723 dst = gen_dest_fpr_D(dc, rd);
1725 gen(dst, cpu_env, src);
1727 gen_store_fpr_D(dc, rd, dst);
1730 #ifdef TARGET_SPARC64
1731 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1732 void (*gen)(TCGv_i64, TCGv_i64))
1734 TCGv_i64 dst, src;
1736 src = gen_load_fpr_D(dc, rs);
1737 dst = gen_dest_fpr_D(dc, rd);
1739 gen(dst, src);
1741 gen_store_fpr_D(dc, rd, dst);
1743 #endif
1745 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1746 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1748 TCGv_i64 dst, src1, src2;
1750 src1 = gen_load_fpr_D(dc, rs1);
1751 src2 = gen_load_fpr_D(dc, rs2);
1752 dst = gen_dest_fpr_D(dc, rd);
1754 gen(dst, cpu_env, src1, src2);
1756 gen_store_fpr_D(dc, rd, dst);
1759 #ifdef TARGET_SPARC64
1760 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1761 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1763 TCGv_i64 dst, src1, src2;
1765 src1 = gen_load_fpr_D(dc, rs1);
1766 src2 = gen_load_fpr_D(dc, rs2);
1767 dst = gen_dest_fpr_D(dc, rd);
1769 gen(dst, src1, src2);
1771 gen_store_fpr_D(dc, rd, dst);
1774 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1775 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1777 TCGv_i64 dst, src1, src2;
1779 src1 = gen_load_fpr_D(dc, rs1);
1780 src2 = gen_load_fpr_D(dc, rs2);
1781 dst = gen_dest_fpr_D(dc, rd);
1783 gen(dst, cpu_gsr, src1, src2);
1785 gen_store_fpr_D(dc, rd, dst);
1788 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1789 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1791 TCGv_i64 dst, src0, src1, src2;
1793 src1 = gen_load_fpr_D(dc, rs1);
1794 src2 = gen_load_fpr_D(dc, rs2);
1795 src0 = gen_load_fpr_D(dc, rd);
1796 dst = gen_dest_fpr_D(dc, rd);
1798 gen(dst, src0, src1, src2);
1800 gen_store_fpr_D(dc, rd, dst);
1802 #endif
1804 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1805 void (*gen)(TCGv_ptr))
1807 gen_op_load_fpr_QT1(QFPREG(rs));
1809 gen(cpu_env);
1811 gen_op_store_QT0_fpr(QFPREG(rd));
1812 gen_update_fprs_dirty(QFPREG(rd));
1815 #ifdef TARGET_SPARC64
1816 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1817 void (*gen)(TCGv_ptr))
1819 gen_op_load_fpr_QT1(QFPREG(rs));
1821 gen(cpu_env);
1823 gen_op_store_QT0_fpr(QFPREG(rd));
1824 gen_update_fprs_dirty(QFPREG(rd));
1826 #endif
1828 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1829 void (*gen)(TCGv_ptr))
1831 gen_op_load_fpr_QT0(QFPREG(rs1));
1832 gen_op_load_fpr_QT1(QFPREG(rs2));
1834 gen(cpu_env);
1836 gen_op_store_QT0_fpr(QFPREG(rd));
1837 gen_update_fprs_dirty(QFPREG(rd));
1840 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1841 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1843 TCGv_i64 dst;
1844 TCGv_i32 src1, src2;
1846 src1 = gen_load_fpr_F(dc, rs1);
1847 src2 = gen_load_fpr_F(dc, rs2);
1848 dst = gen_dest_fpr_D(dc, rd);
1850 gen(dst, cpu_env, src1, src2);
1852 gen_store_fpr_D(dc, rd, dst);
1855 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1856 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1858 TCGv_i64 src1, src2;
1860 src1 = gen_load_fpr_D(dc, rs1);
1861 src2 = gen_load_fpr_D(dc, rs2);
1863 gen(cpu_env, src1, src2);
1865 gen_op_store_QT0_fpr(QFPREG(rd));
1866 gen_update_fprs_dirty(QFPREG(rd));
1869 #ifdef TARGET_SPARC64
1870 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1871 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1873 TCGv_i64 dst;
1874 TCGv_i32 src;
1876 src = gen_load_fpr_F(dc, rs);
1877 dst = gen_dest_fpr_D(dc, rd);
1879 gen(dst, cpu_env, src);
1881 gen_store_fpr_D(dc, rd, dst);
1883 #endif
1885 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1886 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1888 TCGv_i64 dst;
1889 TCGv_i32 src;
1891 src = gen_load_fpr_F(dc, rs);
1892 dst = gen_dest_fpr_D(dc, rd);
1894 gen(dst, cpu_env, src);
1896 gen_store_fpr_D(dc, rd, dst);
1899 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1900 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1902 TCGv_i32 dst;
1903 TCGv_i64 src;
1905 src = gen_load_fpr_D(dc, rs);
1906 dst = gen_dest_fpr_F(dc);
1908 gen(dst, cpu_env, src);
1910 gen_store_fpr_F(dc, rd, dst);
1913 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1914 void (*gen)(TCGv_i32, TCGv_ptr))
1916 TCGv_i32 dst;
1918 gen_op_load_fpr_QT1(QFPREG(rs));
1919 dst = gen_dest_fpr_F(dc);
1921 gen(dst, cpu_env);
1923 gen_store_fpr_F(dc, rd, dst);
1926 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1927 void (*gen)(TCGv_i64, TCGv_ptr))
1929 TCGv_i64 dst;
1931 gen_op_load_fpr_QT1(QFPREG(rs));
1932 dst = gen_dest_fpr_D(dc, rd);
1934 gen(dst, cpu_env);
1936 gen_store_fpr_D(dc, rd, dst);
1939 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1940 void (*gen)(TCGv_ptr, TCGv_i32))
1942 TCGv_i32 src;
1944 src = gen_load_fpr_F(dc, rs);
1946 gen(cpu_env, src);
1948 gen_op_store_QT0_fpr(QFPREG(rd));
1949 gen_update_fprs_dirty(QFPREG(rd));
1952 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1953 void (*gen)(TCGv_ptr, TCGv_i64))
1955 TCGv_i64 src;
1957 src = gen_load_fpr_D(dc, rs);
1959 gen(cpu_env, src);
1961 gen_op_store_QT0_fpr(QFPREG(rd));
1962 gen_update_fprs_dirty(QFPREG(rd));
1965 /* asi moves */
1966 #ifdef TARGET_SPARC64
1967 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
1969 int asi;
1970 TCGv_i32 r_asi;
1972 if (IS_IMM) {
1973 r_asi = tcg_temp_new_i32();
1974 tcg_gen_mov_i32(r_asi, cpu_asi);
1975 } else {
1976 asi = GET_FIELD(insn, 19, 26);
1977 r_asi = tcg_const_i32(asi);
1979 return r_asi;
1982 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
1983 int sign)
1985 TCGv_i32 r_asi, r_size, r_sign;
1987 r_asi = gen_get_asi(insn, addr);
1988 r_size = tcg_const_i32(size);
1989 r_sign = tcg_const_i32(sign);
1990 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
1991 tcg_temp_free_i32(r_sign);
1992 tcg_temp_free_i32(r_size);
1993 tcg_temp_free_i32(r_asi);
1996 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
1998 TCGv_i32 r_asi, r_size;
2000 r_asi = gen_get_asi(insn, addr);
2001 r_size = tcg_const_i32(size);
2002 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2003 tcg_temp_free_i32(r_size);
2004 tcg_temp_free_i32(r_asi);
2007 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2009 TCGv_i32 r_asi, r_size, r_rd;
2011 r_asi = gen_get_asi(insn, addr);
2012 r_size = tcg_const_i32(size);
2013 r_rd = tcg_const_i32(rd);
2014 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2015 tcg_temp_free_i32(r_rd);
2016 tcg_temp_free_i32(r_size);
2017 tcg_temp_free_i32(r_asi);
2020 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2022 TCGv_i32 r_asi, r_size, r_rd;
2024 r_asi = gen_get_asi(insn, addr);
2025 r_size = tcg_const_i32(size);
2026 r_rd = tcg_const_i32(rd);
2027 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2028 tcg_temp_free_i32(r_rd);
2029 tcg_temp_free_i32(r_size);
2030 tcg_temp_free_i32(r_asi);
2033 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2035 TCGv_i32 r_asi, r_size, r_sign;
2036 TCGv_i64 t64 = tcg_temp_new_i64();
2038 r_asi = gen_get_asi(insn, addr);
2039 r_size = tcg_const_i32(4);
2040 r_sign = tcg_const_i32(0);
2041 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2042 tcg_temp_free_i32(r_sign);
2043 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2044 tcg_temp_free_i32(r_size);
2045 tcg_temp_free_i32(r_asi);
2046 tcg_gen_trunc_i64_tl(dst, t64);
2047 tcg_temp_free_i64(t64);
2050 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2051 int insn, int rd)
2053 TCGv_i32 r_asi, r_rd;
2055 r_asi = gen_get_asi(insn, addr);
2056 r_rd = tcg_const_i32(rd);
2057 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2058 tcg_temp_free_i32(r_rd);
2059 tcg_temp_free_i32(r_asi);
2062 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2063 int insn, int rd)
2065 TCGv_i32 r_asi, r_size;
2066 TCGv lo = gen_load_gpr(dc, rd + 1);
2067 TCGv_i64 t64 = tcg_temp_new_i64();
2069 tcg_gen_concat_tl_i64(t64, lo, hi);
2070 r_asi = gen_get_asi(insn, addr);
2071 r_size = tcg_const_i32(8);
2072 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2073 tcg_temp_free_i32(r_size);
2074 tcg_temp_free_i32(r_asi);
2075 tcg_temp_free_i64(t64);
2078 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2079 TCGv val2, int insn, int rd)
2081 TCGv val1 = gen_load_gpr(dc, rd);
2082 TCGv dst = gen_dest_gpr(dc, rd);
2083 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2085 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2086 tcg_temp_free_i32(r_asi);
2087 gen_store_gpr(dc, rd, dst);
2090 #elif !defined(CONFIG_USER_ONLY)
2092 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2093 int sign)
2095 TCGv_i32 r_asi, r_size, r_sign;
2096 TCGv_i64 t64 = tcg_temp_new_i64();
2098 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2099 r_size = tcg_const_i32(size);
2100 r_sign = tcg_const_i32(sign);
2101 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2102 tcg_temp_free_i32(r_sign);
2103 tcg_temp_free_i32(r_size);
2104 tcg_temp_free_i32(r_asi);
2105 tcg_gen_trunc_i64_tl(dst, t64);
2106 tcg_temp_free_i64(t64);
2109 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2111 TCGv_i32 r_asi, r_size;
2112 TCGv_i64 t64 = tcg_temp_new_i64();
2114 tcg_gen_extu_tl_i64(t64, src);
2115 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2116 r_size = tcg_const_i32(size);
2117 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2118 tcg_temp_free_i32(r_size);
2119 tcg_temp_free_i32(r_asi);
2120 tcg_temp_free_i64(t64);
2123 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2125 TCGv_i32 r_asi, r_size, r_sign;
2126 TCGv_i64 r_val, t64;
2128 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2129 r_size = tcg_const_i32(4);
2130 r_sign = tcg_const_i32(0);
2131 t64 = tcg_temp_new_i64();
2132 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2133 tcg_temp_free(r_sign);
2134 r_val = tcg_temp_new_i64();
2135 tcg_gen_extu_tl_i64(r_val, src);
2136 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2137 tcg_temp_free_i64(r_val);
2138 tcg_temp_free_i32(r_size);
2139 tcg_temp_free_i32(r_asi);
2140 tcg_gen_trunc_i64_tl(dst, t64);
2141 tcg_temp_free_i64(t64);
2144 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2145 int insn, int rd)
2147 TCGv_i32 r_asi, r_size, r_sign;
2148 TCGv t;
2149 TCGv_i64 t64;
2151 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2152 r_size = tcg_const_i32(8);
2153 r_sign = tcg_const_i32(0);
2154 t64 = tcg_temp_new_i64();
2155 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2156 tcg_temp_free_i32(r_sign);
2157 tcg_temp_free_i32(r_size);
2158 tcg_temp_free_i32(r_asi);
2160 t = gen_dest_gpr(dc, rd + 1);
2161 tcg_gen_trunc_i64_tl(t, t64);
2162 gen_store_gpr(dc, rd + 1, t);
2164 tcg_gen_shri_i64(t64, t64, 32);
2165 tcg_gen_trunc_i64_tl(hi, t64);
2166 tcg_temp_free_i64(t64);
2167 gen_store_gpr(dc, rd, hi);
2170 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2171 int insn, int rd)
2173 TCGv_i32 r_asi, r_size;
2174 TCGv lo = gen_load_gpr(dc, rd + 1);
2175 TCGv_i64 t64 = tcg_temp_new_i64();
2177 tcg_gen_concat_tl_i64(t64, lo, hi);
2178 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2179 r_size = tcg_const_i32(8);
2180 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2181 tcg_temp_free_i32(r_size);
2182 tcg_temp_free_i32(r_asi);
2183 tcg_temp_free_i64(t64);
2185 #endif
2187 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2188 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2189 TCGv val2, int insn, int rd)
2191 TCGv val1 = gen_load_gpr(dc, rd);
2192 TCGv dst = gen_dest_gpr(dc, rd);
2193 #ifdef TARGET_SPARC64
2194 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2195 #else
2196 TCGv_i32 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2197 #endif
2199 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2200 tcg_temp_free_i32(r_asi);
2201 gen_store_gpr(dc, rd, dst);
2204 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2206 TCGv_i64 r_val;
2207 TCGv_i32 r_asi, r_size;
2209 gen_ld_asi(dst, addr, insn, 1, 0);
2211 r_val = tcg_const_i64(0xffULL);
2212 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2213 r_size = tcg_const_i32(1);
2214 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2215 tcg_temp_free_i32(r_size);
2216 tcg_temp_free_i32(r_asi);
2217 tcg_temp_free_i64(r_val);
2219 #endif
2221 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2223 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2224 return gen_load_gpr(dc, rs1);
2227 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2229 if (IS_IMM) { /* immediate */
2230 target_long simm = GET_FIELDs(insn, 19, 31);
2231 TCGv t = get_temp_tl(dc);
2232 tcg_gen_movi_tl(t, simm);
2233 return t;
2234 } else { /* register */
2235 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2236 return gen_load_gpr(dc, rs2);
2240 #ifdef TARGET_SPARC64
2241 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2243 TCGv_i32 c32, zero, dst, s1, s2;
2245 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2246 or fold the comparison down to 32 bits and use movcond_i32. Choose
2247 the later. */
2248 c32 = tcg_temp_new_i32();
2249 if (cmp->is_bool) {
2250 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2251 } else {
2252 TCGv_i64 c64 = tcg_temp_new_i64();
2253 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2254 tcg_gen_extrl_i64_i32(c32, c64);
2255 tcg_temp_free_i64(c64);
2258 s1 = gen_load_fpr_F(dc, rs);
2259 s2 = gen_load_fpr_F(dc, rd);
2260 dst = gen_dest_fpr_F(dc);
2261 zero = tcg_const_i32(0);
2263 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2265 tcg_temp_free_i32(c32);
2266 tcg_temp_free_i32(zero);
2267 gen_store_fpr_F(dc, rd, dst);
2270 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2272 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2273 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2274 gen_load_fpr_D(dc, rs),
2275 gen_load_fpr_D(dc, rd));
2276 gen_store_fpr_D(dc, rd, dst);
2279 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2281 int qd = QFPREG(rd);
2282 int qs = QFPREG(rs);
2284 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2285 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2286 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2287 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2289 gen_update_fprs_dirty(qd);
2292 #ifndef CONFIG_USER_ONLY
2293 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2295 TCGv_i32 r_tl = tcg_temp_new_i32();
2297 /* load env->tl into r_tl */
2298 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2300 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2301 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2303 /* calculate offset to current trap state from env->ts, reuse r_tl */
2304 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2305 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2307 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2309 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2310 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2311 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2312 tcg_temp_free_ptr(r_tl_tmp);
2315 tcg_temp_free_i32(r_tl);
2317 #endif
2319 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2320 int width, bool cc, bool left)
2322 TCGv lo1, lo2, t1, t2;
2323 uint64_t amask, tabl, tabr;
2324 int shift, imask, omask;
2326 if (cc) {
2327 tcg_gen_mov_tl(cpu_cc_src, s1);
2328 tcg_gen_mov_tl(cpu_cc_src2, s2);
2329 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2330 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2331 dc->cc_op = CC_OP_SUB;
2334 /* Theory of operation: there are two tables, left and right (not to
2335 be confused with the left and right versions of the opcode). These
2336 are indexed by the low 3 bits of the inputs. To make things "easy",
2337 these tables are loaded into two constants, TABL and TABR below.
2338 The operation index = (input & imask) << shift calculates the index
2339 into the constant, while val = (table >> index) & omask calculates
2340 the value we're looking for. */
2341 switch (width) {
2342 case 8:
2343 imask = 0x7;
2344 shift = 3;
2345 omask = 0xff;
2346 if (left) {
2347 tabl = 0x80c0e0f0f8fcfeffULL;
2348 tabr = 0xff7f3f1f0f070301ULL;
2349 } else {
2350 tabl = 0x0103070f1f3f7fffULL;
2351 tabr = 0xfffefcf8f0e0c080ULL;
2353 break;
2354 case 16:
2355 imask = 0x6;
2356 shift = 1;
2357 omask = 0xf;
2358 if (left) {
2359 tabl = 0x8cef;
2360 tabr = 0xf731;
2361 } else {
2362 tabl = 0x137f;
2363 tabr = 0xfec8;
2365 break;
2366 case 32:
2367 imask = 0x4;
2368 shift = 0;
2369 omask = 0x3;
2370 if (left) {
2371 tabl = (2 << 2) | 3;
2372 tabr = (3 << 2) | 1;
2373 } else {
2374 tabl = (1 << 2) | 3;
2375 tabr = (3 << 2) | 2;
2377 break;
2378 default:
2379 abort();
2382 lo1 = tcg_temp_new();
2383 lo2 = tcg_temp_new();
2384 tcg_gen_andi_tl(lo1, s1, imask);
2385 tcg_gen_andi_tl(lo2, s2, imask);
2386 tcg_gen_shli_tl(lo1, lo1, shift);
2387 tcg_gen_shli_tl(lo2, lo2, shift);
2389 t1 = tcg_const_tl(tabl);
2390 t2 = tcg_const_tl(tabr);
2391 tcg_gen_shr_tl(lo1, t1, lo1);
2392 tcg_gen_shr_tl(lo2, t2, lo2);
2393 tcg_gen_andi_tl(dst, lo1, omask);
2394 tcg_gen_andi_tl(lo2, lo2, omask);
2396 amask = -8;
2397 if (AM_CHECK(dc)) {
2398 amask &= 0xffffffffULL;
2400 tcg_gen_andi_tl(s1, s1, amask);
2401 tcg_gen_andi_tl(s2, s2, amask);
2403 /* We want to compute
2404 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2405 We've already done dst = lo1, so this reduces to
2406 dst &= (s1 == s2 ? -1 : lo2)
2407 Which we perform by
2408 lo2 |= -(s1 == s2)
2409 dst &= lo2
2411 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2412 tcg_gen_neg_tl(t1, t1);
2413 tcg_gen_or_tl(lo2, lo2, t1);
2414 tcg_gen_and_tl(dst, dst, lo2);
2416 tcg_temp_free(lo1);
2417 tcg_temp_free(lo2);
2418 tcg_temp_free(t1);
2419 tcg_temp_free(t2);
2422 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2424 TCGv tmp = tcg_temp_new();
2426 tcg_gen_add_tl(tmp, s1, s2);
2427 tcg_gen_andi_tl(dst, tmp, -8);
2428 if (left) {
2429 tcg_gen_neg_tl(tmp, tmp);
2431 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2433 tcg_temp_free(tmp);
2436 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2438 TCGv t1, t2, shift;
2440 t1 = tcg_temp_new();
2441 t2 = tcg_temp_new();
2442 shift = tcg_temp_new();
2444 tcg_gen_andi_tl(shift, gsr, 7);
2445 tcg_gen_shli_tl(shift, shift, 3);
2446 tcg_gen_shl_tl(t1, s1, shift);
2448 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2449 shift of (up to 63) followed by a constant shift of 1. */
2450 tcg_gen_xori_tl(shift, shift, 63);
2451 tcg_gen_shr_tl(t2, s2, shift);
2452 tcg_gen_shri_tl(t2, t2, 1);
2454 tcg_gen_or_tl(dst, t1, t2);
2456 tcg_temp_free(t1);
2457 tcg_temp_free(t2);
2458 tcg_temp_free(shift);
2460 #endif
2462 #define CHECK_IU_FEATURE(dc, FEATURE) \
2463 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2464 goto illegal_insn;
2465 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2466 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2467 goto nfpu_insn;
2469 /* before an instruction, dc->pc must be static */
2470 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2472 unsigned int opc, rs1, rs2, rd;
2473 TCGv cpu_src1, cpu_src2;
2474 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2475 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2476 target_long simm;
2478 opc = GET_FIELD(insn, 0, 1);
2479 rd = GET_FIELD(insn, 2, 6);
2481 switch (opc) {
2482 case 0: /* branches/sethi */
2484 unsigned int xop = GET_FIELD(insn, 7, 9);
2485 int32_t target;
2486 switch (xop) {
2487 #ifdef TARGET_SPARC64
2488 case 0x1: /* V9 BPcc */
2490 int cc;
2492 target = GET_FIELD_SP(insn, 0, 18);
2493 target = sign_extend(target, 19);
2494 target <<= 2;
2495 cc = GET_FIELD_SP(insn, 20, 21);
2496 if (cc == 0)
2497 do_branch(dc, target, insn, 0);
2498 else if (cc == 2)
2499 do_branch(dc, target, insn, 1);
2500 else
2501 goto illegal_insn;
2502 goto jmp_insn;
2504 case 0x3: /* V9 BPr */
2506 target = GET_FIELD_SP(insn, 0, 13) |
2507 (GET_FIELD_SP(insn, 20, 21) << 14);
2508 target = sign_extend(target, 16);
2509 target <<= 2;
2510 cpu_src1 = get_src1(dc, insn);
2511 do_branch_reg(dc, target, insn, cpu_src1);
2512 goto jmp_insn;
2514 case 0x5: /* V9 FBPcc */
2516 int cc = GET_FIELD_SP(insn, 20, 21);
2517 if (gen_trap_ifnofpu(dc)) {
2518 goto jmp_insn;
2520 target = GET_FIELD_SP(insn, 0, 18);
2521 target = sign_extend(target, 19);
2522 target <<= 2;
2523 do_fbranch(dc, target, insn, cc);
2524 goto jmp_insn;
2526 #else
2527 case 0x7: /* CBN+x */
2529 goto ncp_insn;
2531 #endif
2532 case 0x2: /* BN+x */
2534 target = GET_FIELD(insn, 10, 31);
2535 target = sign_extend(target, 22);
2536 target <<= 2;
2537 do_branch(dc, target, insn, 0);
2538 goto jmp_insn;
2540 case 0x6: /* FBN+x */
2542 if (gen_trap_ifnofpu(dc)) {
2543 goto jmp_insn;
2545 target = GET_FIELD(insn, 10, 31);
2546 target = sign_extend(target, 22);
2547 target <<= 2;
2548 do_fbranch(dc, target, insn, 0);
2549 goto jmp_insn;
2551 case 0x4: /* SETHI */
2552 /* Special-case %g0 because that's the canonical nop. */
2553 if (rd) {
2554 uint32_t value = GET_FIELD(insn, 10, 31);
2555 TCGv t = gen_dest_gpr(dc, rd);
2556 tcg_gen_movi_tl(t, value << 10);
2557 gen_store_gpr(dc, rd, t);
2559 break;
2560 case 0x0: /* UNIMPL */
2561 default:
2562 goto illegal_insn;
2564 break;
2566 break;
2567 case 1: /*CALL*/
2569 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2570 TCGv o7 = gen_dest_gpr(dc, 15);
2572 tcg_gen_movi_tl(o7, dc->pc);
2573 gen_store_gpr(dc, 15, o7);
2574 target += dc->pc;
2575 gen_mov_pc_npc(dc);
2576 #ifdef TARGET_SPARC64
2577 if (unlikely(AM_CHECK(dc))) {
2578 target &= 0xffffffffULL;
2580 #endif
2581 dc->npc = target;
2583 goto jmp_insn;
2584 case 2: /* FPU & Logical Operations */
2586 unsigned int xop = GET_FIELD(insn, 7, 12);
2587 TCGv cpu_dst = get_temp_tl(dc);
2588 TCGv cpu_tmp0;
2590 if (xop == 0x3a) { /* generate trap */
2591 int cond = GET_FIELD(insn, 3, 6);
2592 TCGv_i32 trap;
2593 TCGLabel *l1 = NULL;
2594 int mask;
2596 if (cond == 0) {
2597 /* Trap never. */
2598 break;
2601 save_state(dc);
2603 if (cond != 8) {
2604 /* Conditional trap. */
2605 DisasCompare cmp;
2606 #ifdef TARGET_SPARC64
2607 /* V9 icc/xcc */
2608 int cc = GET_FIELD_SP(insn, 11, 12);
2609 if (cc == 0) {
2610 gen_compare(&cmp, 0, cond, dc);
2611 } else if (cc == 2) {
2612 gen_compare(&cmp, 1, cond, dc);
2613 } else {
2614 goto illegal_insn;
2616 #else
2617 gen_compare(&cmp, 0, cond, dc);
2618 #endif
2619 l1 = gen_new_label();
2620 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2621 cmp.c1, cmp.c2, l1);
2622 free_compare(&cmp);
2625 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2626 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2628 /* Don't use the normal temporaries, as they may well have
2629 gone out of scope with the branch above. While we're
2630 doing that we might as well pre-truncate to 32-bit. */
2631 trap = tcg_temp_new_i32();
2633 rs1 = GET_FIELD_SP(insn, 14, 18);
2634 if (IS_IMM) {
2635 rs2 = GET_FIELD_SP(insn, 0, 6);
2636 if (rs1 == 0) {
2637 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2638 /* Signal that the trap value is fully constant. */
2639 mask = 0;
2640 } else {
2641 TCGv t1 = gen_load_gpr(dc, rs1);
2642 tcg_gen_trunc_tl_i32(trap, t1);
2643 tcg_gen_addi_i32(trap, trap, rs2);
2645 } else {
2646 TCGv t1, t2;
2647 rs2 = GET_FIELD_SP(insn, 0, 4);
2648 t1 = gen_load_gpr(dc, rs1);
2649 t2 = gen_load_gpr(dc, rs2);
2650 tcg_gen_add_tl(t1, t1, t2);
2651 tcg_gen_trunc_tl_i32(trap, t1);
2653 if (mask != 0) {
2654 tcg_gen_andi_i32(trap, trap, mask);
2655 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2658 gen_helper_raise_exception(cpu_env, trap);
2659 tcg_temp_free_i32(trap);
2661 if (cond == 8) {
2662 /* An unconditional trap ends the TB. */
2663 dc->is_br = 1;
2664 goto jmp_insn;
2665 } else {
2666 /* A conditional trap falls through to the next insn. */
2667 gen_set_label(l1);
2668 break;
2670 } else if (xop == 0x28) {
2671 rs1 = GET_FIELD(insn, 13, 17);
2672 switch(rs1) {
2673 case 0: /* rdy */
2674 #ifndef TARGET_SPARC64
2675 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2676 manual, rdy on the microSPARC
2677 II */
2678 case 0x0f: /* stbar in the SPARCv8 manual,
2679 rdy on the microSPARC II */
2680 case 0x10 ... 0x1f: /* implementation-dependent in the
2681 SPARCv8 manual, rdy on the
2682 microSPARC II */
2683 /* Read Asr17 */
2684 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2685 TCGv t = gen_dest_gpr(dc, rd);
2686 /* Read Asr17 for a Leon3 monoprocessor */
2687 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2688 gen_store_gpr(dc, rd, t);
2689 break;
2691 #endif
2692 gen_store_gpr(dc, rd, cpu_y);
2693 break;
2694 #ifdef TARGET_SPARC64
2695 case 0x2: /* V9 rdccr */
2696 update_psr(dc);
2697 gen_helper_rdccr(cpu_dst, cpu_env);
2698 gen_store_gpr(dc, rd, cpu_dst);
2699 break;
2700 case 0x3: /* V9 rdasi */
2701 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2702 gen_store_gpr(dc, rd, cpu_dst);
2703 break;
2704 case 0x4: /* V9 rdtick */
2706 TCGv_ptr r_tickptr;
2707 TCGv_i32 r_const;
2709 r_tickptr = tcg_temp_new_ptr();
2710 r_const = tcg_const_i32(dc->mem_idx);
2711 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2712 offsetof(CPUSPARCState, tick));
2713 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2714 r_const);
2715 tcg_temp_free_ptr(r_tickptr);
2716 tcg_temp_free_i32(r_const);
2717 gen_store_gpr(dc, rd, cpu_dst);
2719 break;
2720 case 0x5: /* V9 rdpc */
2722 TCGv t = gen_dest_gpr(dc, rd);
2723 if (unlikely(AM_CHECK(dc))) {
2724 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2725 } else {
2726 tcg_gen_movi_tl(t, dc->pc);
2728 gen_store_gpr(dc, rd, t);
2730 break;
2731 case 0x6: /* V9 rdfprs */
2732 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2733 gen_store_gpr(dc, rd, cpu_dst);
2734 break;
2735 case 0xf: /* V9 membar */
2736 break; /* no effect */
2737 case 0x13: /* Graphics Status */
2738 if (gen_trap_ifnofpu(dc)) {
2739 goto jmp_insn;
2741 gen_store_gpr(dc, rd, cpu_gsr);
2742 break;
2743 case 0x16: /* Softint */
2744 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2745 gen_store_gpr(dc, rd, cpu_dst);
2746 break;
2747 case 0x17: /* Tick compare */
2748 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2749 break;
2750 case 0x18: /* System tick */
2752 TCGv_ptr r_tickptr;
2753 TCGv_i32 r_const;
2755 r_tickptr = tcg_temp_new_ptr();
2756 r_const = tcg_const_i32(dc->mem_idx);
2757 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2758 offsetof(CPUSPARCState, stick));
2759 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2760 r_const);
2761 tcg_temp_free_ptr(r_tickptr);
2762 tcg_temp_free_i32(r_const);
2763 gen_store_gpr(dc, rd, cpu_dst);
2765 break;
2766 case 0x19: /* System tick compare */
2767 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2768 break;
2769 case 0x10: /* Performance Control */
2770 case 0x11: /* Performance Instrumentation Counter */
2771 case 0x12: /* Dispatch Control */
2772 case 0x14: /* Softint set, WO */
2773 case 0x15: /* Softint clear, WO */
2774 #endif
2775 default:
2776 goto illegal_insn;
2778 #if !defined(CONFIG_USER_ONLY)
2779 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2780 #ifndef TARGET_SPARC64
2781 if (!supervisor(dc)) {
2782 goto priv_insn;
2784 update_psr(dc);
2785 gen_helper_rdpsr(cpu_dst, cpu_env);
2786 #else
2787 CHECK_IU_FEATURE(dc, HYPV);
2788 if (!hypervisor(dc))
2789 goto priv_insn;
2790 rs1 = GET_FIELD(insn, 13, 17);
2791 switch (rs1) {
2792 case 0: // hpstate
2793 // gen_op_rdhpstate();
2794 break;
2795 case 1: // htstate
2796 // gen_op_rdhtstate();
2797 break;
2798 case 3: // hintp
2799 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2800 break;
2801 case 5: // htba
2802 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2803 break;
2804 case 6: // hver
2805 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2806 break;
2807 case 31: // hstick_cmpr
2808 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2809 break;
2810 default:
2811 goto illegal_insn;
2813 #endif
2814 gen_store_gpr(dc, rd, cpu_dst);
2815 break;
2816 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2817 if (!supervisor(dc)) {
2818 goto priv_insn;
2820 cpu_tmp0 = get_temp_tl(dc);
2821 #ifdef TARGET_SPARC64
2822 rs1 = GET_FIELD(insn, 13, 17);
2823 switch (rs1) {
2824 case 0: // tpc
2826 TCGv_ptr r_tsptr;
2828 r_tsptr = tcg_temp_new_ptr();
2829 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2830 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2831 offsetof(trap_state, tpc));
2832 tcg_temp_free_ptr(r_tsptr);
2834 break;
2835 case 1: // tnpc
2837 TCGv_ptr r_tsptr;
2839 r_tsptr = tcg_temp_new_ptr();
2840 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2841 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2842 offsetof(trap_state, tnpc));
2843 tcg_temp_free_ptr(r_tsptr);
2845 break;
2846 case 2: // tstate
2848 TCGv_ptr r_tsptr;
2850 r_tsptr = tcg_temp_new_ptr();
2851 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2852 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2853 offsetof(trap_state, tstate));
2854 tcg_temp_free_ptr(r_tsptr);
2856 break;
2857 case 3: // tt
2859 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2861 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2862 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2863 offsetof(trap_state, tt));
2864 tcg_temp_free_ptr(r_tsptr);
2866 break;
2867 case 4: // tick
2869 TCGv_ptr r_tickptr;
2870 TCGv_i32 r_const;
2872 r_tickptr = tcg_temp_new_ptr();
2873 r_const = tcg_const_i32(dc->mem_idx);
2874 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2875 offsetof(CPUSPARCState, tick));
2876 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
2877 r_tickptr, r_const);
2878 tcg_temp_free_ptr(r_tickptr);
2879 tcg_temp_free_i32(r_const);
2881 break;
2882 case 5: // tba
2883 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2884 break;
2885 case 6: // pstate
2886 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2887 offsetof(CPUSPARCState, pstate));
2888 break;
2889 case 7: // tl
2890 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2891 offsetof(CPUSPARCState, tl));
2892 break;
2893 case 8: // pil
2894 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2895 offsetof(CPUSPARCState, psrpil));
2896 break;
2897 case 9: // cwp
2898 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2899 break;
2900 case 10: // cansave
2901 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2902 offsetof(CPUSPARCState, cansave));
2903 break;
2904 case 11: // canrestore
2905 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2906 offsetof(CPUSPARCState, canrestore));
2907 break;
2908 case 12: // cleanwin
2909 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2910 offsetof(CPUSPARCState, cleanwin));
2911 break;
2912 case 13: // otherwin
2913 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2914 offsetof(CPUSPARCState, otherwin));
2915 break;
2916 case 14: // wstate
2917 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2918 offsetof(CPUSPARCState, wstate));
2919 break;
2920 case 16: // UA2005 gl
2921 CHECK_IU_FEATURE(dc, GL);
2922 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2923 offsetof(CPUSPARCState, gl));
2924 break;
2925 case 26: // UA2005 strand status
2926 CHECK_IU_FEATURE(dc, HYPV);
2927 if (!hypervisor(dc))
2928 goto priv_insn;
2929 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2930 break;
2931 case 31: // ver
2932 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2933 break;
2934 case 15: // fq
2935 default:
2936 goto illegal_insn;
2938 #else
2939 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2940 #endif
2941 gen_store_gpr(dc, rd, cpu_tmp0);
2942 break;
2943 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2944 #ifdef TARGET_SPARC64
2945 save_state(dc);
2946 gen_helper_flushw(cpu_env);
2947 #else
2948 if (!supervisor(dc))
2949 goto priv_insn;
2950 gen_store_gpr(dc, rd, cpu_tbr);
2951 #endif
2952 break;
2953 #endif
2954 } else if (xop == 0x34) { /* FPU Operations */
2955 if (gen_trap_ifnofpu(dc)) {
2956 goto jmp_insn;
2958 gen_op_clear_ieee_excp_and_FTT();
2959 rs1 = GET_FIELD(insn, 13, 17);
2960 rs2 = GET_FIELD(insn, 27, 31);
2961 xop = GET_FIELD(insn, 18, 26);
2962 save_state(dc);
2963 switch (xop) {
2964 case 0x1: /* fmovs */
2965 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2966 gen_store_fpr_F(dc, rd, cpu_src1_32);
2967 break;
2968 case 0x5: /* fnegs */
2969 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2970 break;
2971 case 0x9: /* fabss */
2972 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2973 break;
2974 case 0x29: /* fsqrts */
2975 CHECK_FPU_FEATURE(dc, FSQRT);
2976 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2977 break;
2978 case 0x2a: /* fsqrtd */
2979 CHECK_FPU_FEATURE(dc, FSQRT);
2980 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2981 break;
2982 case 0x2b: /* fsqrtq */
2983 CHECK_FPU_FEATURE(dc, FLOAT128);
2984 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2985 break;
2986 case 0x41: /* fadds */
2987 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
2988 break;
2989 case 0x42: /* faddd */
2990 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
2991 break;
2992 case 0x43: /* faddq */
2993 CHECK_FPU_FEATURE(dc, FLOAT128);
2994 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
2995 break;
2996 case 0x45: /* fsubs */
2997 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
2998 break;
2999 case 0x46: /* fsubd */
3000 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3001 break;
3002 case 0x47: /* fsubq */
3003 CHECK_FPU_FEATURE(dc, FLOAT128);
3004 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3005 break;
3006 case 0x49: /* fmuls */
3007 CHECK_FPU_FEATURE(dc, FMUL);
3008 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3009 break;
3010 case 0x4a: /* fmuld */
3011 CHECK_FPU_FEATURE(dc, FMUL);
3012 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3013 break;
3014 case 0x4b: /* fmulq */
3015 CHECK_FPU_FEATURE(dc, FLOAT128);
3016 CHECK_FPU_FEATURE(dc, FMUL);
3017 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3018 break;
3019 case 0x4d: /* fdivs */
3020 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3021 break;
3022 case 0x4e: /* fdivd */
3023 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3024 break;
3025 case 0x4f: /* fdivq */
3026 CHECK_FPU_FEATURE(dc, FLOAT128);
3027 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3028 break;
3029 case 0x69: /* fsmuld */
3030 CHECK_FPU_FEATURE(dc, FSMULD);
3031 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3032 break;
3033 case 0x6e: /* fdmulq */
3034 CHECK_FPU_FEATURE(dc, FLOAT128);
3035 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3036 break;
3037 case 0xc4: /* fitos */
3038 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3039 break;
3040 case 0xc6: /* fdtos */
3041 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3042 break;
3043 case 0xc7: /* fqtos */
3044 CHECK_FPU_FEATURE(dc, FLOAT128);
3045 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3046 break;
3047 case 0xc8: /* fitod */
3048 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3049 break;
3050 case 0xc9: /* fstod */
3051 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3052 break;
3053 case 0xcb: /* fqtod */
3054 CHECK_FPU_FEATURE(dc, FLOAT128);
3055 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3056 break;
3057 case 0xcc: /* fitoq */
3058 CHECK_FPU_FEATURE(dc, FLOAT128);
3059 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3060 break;
3061 case 0xcd: /* fstoq */
3062 CHECK_FPU_FEATURE(dc, FLOAT128);
3063 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3064 break;
3065 case 0xce: /* fdtoq */
3066 CHECK_FPU_FEATURE(dc, FLOAT128);
3067 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3068 break;
3069 case 0xd1: /* fstoi */
3070 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3071 break;
3072 case 0xd2: /* fdtoi */
3073 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3074 break;
3075 case 0xd3: /* fqtoi */
3076 CHECK_FPU_FEATURE(dc, FLOAT128);
3077 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3078 break;
3079 #ifdef TARGET_SPARC64
3080 case 0x2: /* V9 fmovd */
3081 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3082 gen_store_fpr_D(dc, rd, cpu_src1_64);
3083 break;
3084 case 0x3: /* V9 fmovq */
3085 CHECK_FPU_FEATURE(dc, FLOAT128);
3086 gen_move_Q(rd, rs2);
3087 break;
3088 case 0x6: /* V9 fnegd */
3089 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3090 break;
3091 case 0x7: /* V9 fnegq */
3092 CHECK_FPU_FEATURE(dc, FLOAT128);
3093 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3094 break;
3095 case 0xa: /* V9 fabsd */
3096 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3097 break;
3098 case 0xb: /* V9 fabsq */
3099 CHECK_FPU_FEATURE(dc, FLOAT128);
3100 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3101 break;
3102 case 0x81: /* V9 fstox */
3103 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3104 break;
3105 case 0x82: /* V9 fdtox */
3106 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3107 break;
3108 case 0x83: /* V9 fqtox */
3109 CHECK_FPU_FEATURE(dc, FLOAT128);
3110 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3111 break;
3112 case 0x84: /* V9 fxtos */
3113 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3114 break;
3115 case 0x88: /* V9 fxtod */
3116 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3117 break;
3118 case 0x8c: /* V9 fxtoq */
3119 CHECK_FPU_FEATURE(dc, FLOAT128);
3120 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3121 break;
3122 #endif
3123 default:
3124 goto illegal_insn;
3126 } else if (xop == 0x35) { /* FPU Operations */
3127 #ifdef TARGET_SPARC64
3128 int cond;
3129 #endif
3130 if (gen_trap_ifnofpu(dc)) {
3131 goto jmp_insn;
3133 gen_op_clear_ieee_excp_and_FTT();
3134 rs1 = GET_FIELD(insn, 13, 17);
3135 rs2 = GET_FIELD(insn, 27, 31);
3136 xop = GET_FIELD(insn, 18, 26);
3137 save_state(dc);
3139 #ifdef TARGET_SPARC64
3140 #define FMOVR(sz) \
3141 do { \
3142 DisasCompare cmp; \
3143 cond = GET_FIELD_SP(insn, 10, 12); \
3144 cpu_src1 = get_src1(dc, insn); \
3145 gen_compare_reg(&cmp, cond, cpu_src1); \
3146 gen_fmov##sz(dc, &cmp, rd, rs2); \
3147 free_compare(&cmp); \
3148 } while (0)
3150 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3151 FMOVR(s);
3152 break;
3153 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3154 FMOVR(d);
3155 break;
3156 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3157 CHECK_FPU_FEATURE(dc, FLOAT128);
3158 FMOVR(q);
3159 break;
3161 #undef FMOVR
3162 #endif
3163 switch (xop) {
3164 #ifdef TARGET_SPARC64
3165 #define FMOVCC(fcc, sz) \
3166 do { \
3167 DisasCompare cmp; \
3168 cond = GET_FIELD_SP(insn, 14, 17); \
3169 gen_fcompare(&cmp, fcc, cond); \
3170 gen_fmov##sz(dc, &cmp, rd, rs2); \
3171 free_compare(&cmp); \
3172 } while (0)
3174 case 0x001: /* V9 fmovscc %fcc0 */
3175 FMOVCC(0, s);
3176 break;
3177 case 0x002: /* V9 fmovdcc %fcc0 */
3178 FMOVCC(0, d);
3179 break;
3180 case 0x003: /* V9 fmovqcc %fcc0 */
3181 CHECK_FPU_FEATURE(dc, FLOAT128);
3182 FMOVCC(0, q);
3183 break;
3184 case 0x041: /* V9 fmovscc %fcc1 */
3185 FMOVCC(1, s);
3186 break;
3187 case 0x042: /* V9 fmovdcc %fcc1 */
3188 FMOVCC(1, d);
3189 break;
3190 case 0x043: /* V9 fmovqcc %fcc1 */
3191 CHECK_FPU_FEATURE(dc, FLOAT128);
3192 FMOVCC(1, q);
3193 break;
3194 case 0x081: /* V9 fmovscc %fcc2 */
3195 FMOVCC(2, s);
3196 break;
3197 case 0x082: /* V9 fmovdcc %fcc2 */
3198 FMOVCC(2, d);
3199 break;
3200 case 0x083: /* V9 fmovqcc %fcc2 */
3201 CHECK_FPU_FEATURE(dc, FLOAT128);
3202 FMOVCC(2, q);
3203 break;
3204 case 0x0c1: /* V9 fmovscc %fcc3 */
3205 FMOVCC(3, s);
3206 break;
3207 case 0x0c2: /* V9 fmovdcc %fcc3 */
3208 FMOVCC(3, d);
3209 break;
3210 case 0x0c3: /* V9 fmovqcc %fcc3 */
3211 CHECK_FPU_FEATURE(dc, FLOAT128);
3212 FMOVCC(3, q);
3213 break;
3214 #undef FMOVCC
3215 #define FMOVCC(xcc, sz) \
3216 do { \
3217 DisasCompare cmp; \
3218 cond = GET_FIELD_SP(insn, 14, 17); \
3219 gen_compare(&cmp, xcc, cond, dc); \
3220 gen_fmov##sz(dc, &cmp, rd, rs2); \
3221 free_compare(&cmp); \
3222 } while (0)
3224 case 0x101: /* V9 fmovscc %icc */
3225 FMOVCC(0, s);
3226 break;
3227 case 0x102: /* V9 fmovdcc %icc */
3228 FMOVCC(0, d);
3229 break;
3230 case 0x103: /* V9 fmovqcc %icc */
3231 CHECK_FPU_FEATURE(dc, FLOAT128);
3232 FMOVCC(0, q);
3233 break;
3234 case 0x181: /* V9 fmovscc %xcc */
3235 FMOVCC(1, s);
3236 break;
3237 case 0x182: /* V9 fmovdcc %xcc */
3238 FMOVCC(1, d);
3239 break;
3240 case 0x183: /* V9 fmovqcc %xcc */
3241 CHECK_FPU_FEATURE(dc, FLOAT128);
3242 FMOVCC(1, q);
3243 break;
3244 #undef FMOVCC
3245 #endif
3246 case 0x51: /* fcmps, V9 %fcc */
3247 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3248 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3249 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3250 break;
3251 case 0x52: /* fcmpd, V9 %fcc */
3252 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3253 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3254 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3255 break;
3256 case 0x53: /* fcmpq, V9 %fcc */
3257 CHECK_FPU_FEATURE(dc, FLOAT128);
3258 gen_op_load_fpr_QT0(QFPREG(rs1));
3259 gen_op_load_fpr_QT1(QFPREG(rs2));
3260 gen_op_fcmpq(rd & 3);
3261 break;
3262 case 0x55: /* fcmpes, V9 %fcc */
3263 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3264 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3265 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3266 break;
3267 case 0x56: /* fcmped, V9 %fcc */
3268 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3269 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3270 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3271 break;
3272 case 0x57: /* fcmpeq, V9 %fcc */
3273 CHECK_FPU_FEATURE(dc, FLOAT128);
3274 gen_op_load_fpr_QT0(QFPREG(rs1));
3275 gen_op_load_fpr_QT1(QFPREG(rs2));
3276 gen_op_fcmpeq(rd & 3);
3277 break;
3278 default:
3279 goto illegal_insn;
3281 } else if (xop == 0x2) {
3282 TCGv dst = gen_dest_gpr(dc, rd);
3283 rs1 = GET_FIELD(insn, 13, 17);
3284 if (rs1 == 0) {
3285 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3286 if (IS_IMM) { /* immediate */
3287 simm = GET_FIELDs(insn, 19, 31);
3288 tcg_gen_movi_tl(dst, simm);
3289 gen_store_gpr(dc, rd, dst);
3290 } else { /* register */
3291 rs2 = GET_FIELD(insn, 27, 31);
3292 if (rs2 == 0) {
3293 tcg_gen_movi_tl(dst, 0);
3294 gen_store_gpr(dc, rd, dst);
3295 } else {
3296 cpu_src2 = gen_load_gpr(dc, rs2);
3297 gen_store_gpr(dc, rd, cpu_src2);
3300 } else {
3301 cpu_src1 = get_src1(dc, insn);
3302 if (IS_IMM) { /* immediate */
3303 simm = GET_FIELDs(insn, 19, 31);
3304 tcg_gen_ori_tl(dst, cpu_src1, simm);
3305 gen_store_gpr(dc, rd, dst);
3306 } else { /* register */
3307 rs2 = GET_FIELD(insn, 27, 31);
3308 if (rs2 == 0) {
3309 /* mov shortcut: or x, %g0, y -> mov x, y */
3310 gen_store_gpr(dc, rd, cpu_src1);
3311 } else {
3312 cpu_src2 = gen_load_gpr(dc, rs2);
3313 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3314 gen_store_gpr(dc, rd, dst);
3318 #ifdef TARGET_SPARC64
3319 } else if (xop == 0x25) { /* sll, V9 sllx */
3320 cpu_src1 = get_src1(dc, insn);
3321 if (IS_IMM) { /* immediate */
3322 simm = GET_FIELDs(insn, 20, 31);
3323 if (insn & (1 << 12)) {
3324 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3325 } else {
3326 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3328 } else { /* register */
3329 rs2 = GET_FIELD(insn, 27, 31);
3330 cpu_src2 = gen_load_gpr(dc, rs2);
3331 cpu_tmp0 = get_temp_tl(dc);
3332 if (insn & (1 << 12)) {
3333 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3334 } else {
3335 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3337 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3339 gen_store_gpr(dc, rd, cpu_dst);
3340 } else if (xop == 0x26) { /* srl, V9 srlx */
3341 cpu_src1 = get_src1(dc, insn);
3342 if (IS_IMM) { /* immediate */
3343 simm = GET_FIELDs(insn, 20, 31);
3344 if (insn & (1 << 12)) {
3345 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3346 } else {
3347 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3348 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3350 } else { /* register */
3351 rs2 = GET_FIELD(insn, 27, 31);
3352 cpu_src2 = gen_load_gpr(dc, rs2);
3353 cpu_tmp0 = get_temp_tl(dc);
3354 if (insn & (1 << 12)) {
3355 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3356 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3357 } else {
3358 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3359 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3360 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3363 gen_store_gpr(dc, rd, cpu_dst);
3364 } else if (xop == 0x27) { /* sra, V9 srax */
3365 cpu_src1 = get_src1(dc, insn);
3366 if (IS_IMM) { /* immediate */
3367 simm = GET_FIELDs(insn, 20, 31);
3368 if (insn & (1 << 12)) {
3369 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3370 } else {
3371 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3372 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3374 } else { /* register */
3375 rs2 = GET_FIELD(insn, 27, 31);
3376 cpu_src2 = gen_load_gpr(dc, rs2);
3377 cpu_tmp0 = get_temp_tl(dc);
3378 if (insn & (1 << 12)) {
3379 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3380 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3381 } else {
3382 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3383 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3384 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3387 gen_store_gpr(dc, rd, cpu_dst);
3388 #endif
3389 } else if (xop < 0x36) {
3390 if (xop < 0x20) {
3391 cpu_src1 = get_src1(dc, insn);
3392 cpu_src2 = get_src2(dc, insn);
3393 switch (xop & ~0x10) {
3394 case 0x0: /* add */
3395 if (xop & 0x10) {
3396 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3397 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3398 dc->cc_op = CC_OP_ADD;
3399 } else {
3400 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3402 break;
3403 case 0x1: /* and */
3404 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3405 if (xop & 0x10) {
3406 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3407 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3408 dc->cc_op = CC_OP_LOGIC;
3410 break;
3411 case 0x2: /* or */
3412 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3413 if (xop & 0x10) {
3414 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3415 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3416 dc->cc_op = CC_OP_LOGIC;
3418 break;
3419 case 0x3: /* xor */
3420 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3421 if (xop & 0x10) {
3422 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3423 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3424 dc->cc_op = CC_OP_LOGIC;
3426 break;
3427 case 0x4: /* sub */
3428 if (xop & 0x10) {
3429 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3430 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3431 dc->cc_op = CC_OP_SUB;
3432 } else {
3433 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3435 break;
3436 case 0x5: /* andn */
3437 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3438 if (xop & 0x10) {
3439 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3440 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3441 dc->cc_op = CC_OP_LOGIC;
3443 break;
3444 case 0x6: /* orn */
3445 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3446 if (xop & 0x10) {
3447 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3448 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3449 dc->cc_op = CC_OP_LOGIC;
3451 break;
3452 case 0x7: /* xorn */
3453 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3454 if (xop & 0x10) {
3455 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3456 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3457 dc->cc_op = CC_OP_LOGIC;
3459 break;
3460 case 0x8: /* addx, V9 addc */
3461 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3462 (xop & 0x10));
3463 break;
3464 #ifdef TARGET_SPARC64
3465 case 0x9: /* V9 mulx */
3466 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3467 break;
3468 #endif
3469 case 0xa: /* umul */
3470 CHECK_IU_FEATURE(dc, MUL);
3471 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3472 if (xop & 0x10) {
3473 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3474 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3475 dc->cc_op = CC_OP_LOGIC;
3477 break;
3478 case 0xb: /* smul */
3479 CHECK_IU_FEATURE(dc, MUL);
3480 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3481 if (xop & 0x10) {
3482 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3483 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3484 dc->cc_op = CC_OP_LOGIC;
3486 break;
3487 case 0xc: /* subx, V9 subc */
3488 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3489 (xop & 0x10));
3490 break;
3491 #ifdef TARGET_SPARC64
3492 case 0xd: /* V9 udivx */
3493 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3494 break;
3495 #endif
3496 case 0xe: /* udiv */
3497 CHECK_IU_FEATURE(dc, DIV);
3498 if (xop & 0x10) {
3499 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3500 cpu_src2);
3501 dc->cc_op = CC_OP_DIV;
3502 } else {
3503 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3504 cpu_src2);
3506 break;
3507 case 0xf: /* sdiv */
3508 CHECK_IU_FEATURE(dc, DIV);
3509 if (xop & 0x10) {
3510 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3511 cpu_src2);
3512 dc->cc_op = CC_OP_DIV;
3513 } else {
3514 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3515 cpu_src2);
3517 break;
3518 default:
3519 goto illegal_insn;
3521 gen_store_gpr(dc, rd, cpu_dst);
3522 } else {
3523 cpu_src1 = get_src1(dc, insn);
3524 cpu_src2 = get_src2(dc, insn);
3525 switch (xop) {
3526 case 0x20: /* taddcc */
3527 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3528 gen_store_gpr(dc, rd, cpu_dst);
3529 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3530 dc->cc_op = CC_OP_TADD;
3531 break;
3532 case 0x21: /* tsubcc */
3533 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3534 gen_store_gpr(dc, rd, cpu_dst);
3535 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3536 dc->cc_op = CC_OP_TSUB;
3537 break;
3538 case 0x22: /* taddcctv */
3539 gen_helper_taddcctv(cpu_dst, cpu_env,
3540 cpu_src1, cpu_src2);
3541 gen_store_gpr(dc, rd, cpu_dst);
3542 dc->cc_op = CC_OP_TADDTV;
3543 break;
3544 case 0x23: /* tsubcctv */
3545 gen_helper_tsubcctv(cpu_dst, cpu_env,
3546 cpu_src1, cpu_src2);
3547 gen_store_gpr(dc, rd, cpu_dst);
3548 dc->cc_op = CC_OP_TSUBTV;
3549 break;
3550 case 0x24: /* mulscc */
3551 update_psr(dc);
3552 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3553 gen_store_gpr(dc, rd, cpu_dst);
3554 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3555 dc->cc_op = CC_OP_ADD;
3556 break;
3557 #ifndef TARGET_SPARC64
3558 case 0x25: /* sll */
3559 if (IS_IMM) { /* immediate */
3560 simm = GET_FIELDs(insn, 20, 31);
3561 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3562 } else { /* register */
3563 cpu_tmp0 = get_temp_tl(dc);
3564 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3565 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3567 gen_store_gpr(dc, rd, cpu_dst);
3568 break;
3569 case 0x26: /* srl */
3570 if (IS_IMM) { /* immediate */
3571 simm = GET_FIELDs(insn, 20, 31);
3572 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3573 } else { /* register */
3574 cpu_tmp0 = get_temp_tl(dc);
3575 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3576 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3578 gen_store_gpr(dc, rd, cpu_dst);
3579 break;
3580 case 0x27: /* sra */
3581 if (IS_IMM) { /* immediate */
3582 simm = GET_FIELDs(insn, 20, 31);
3583 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3584 } else { /* register */
3585 cpu_tmp0 = get_temp_tl(dc);
3586 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3587 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3589 gen_store_gpr(dc, rd, cpu_dst);
3590 break;
3591 #endif
3592 case 0x30:
3594 cpu_tmp0 = get_temp_tl(dc);
3595 switch(rd) {
3596 case 0: /* wry */
3597 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3598 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3599 break;
3600 #ifndef TARGET_SPARC64
3601 case 0x01 ... 0x0f: /* undefined in the
3602 SPARCv8 manual, nop
3603 on the microSPARC
3604 II */
3605 case 0x10 ... 0x1f: /* implementation-dependent
3606 in the SPARCv8
3607 manual, nop on the
3608 microSPARC II */
3609 if ((rd == 0x13) && (dc->def->features &
3610 CPU_FEATURE_POWERDOWN)) {
3611 /* LEON3 power-down */
3612 save_state(dc);
3613 gen_helper_power_down(cpu_env);
3615 break;
3616 #else
3617 case 0x2: /* V9 wrccr */
3618 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3619 gen_helper_wrccr(cpu_env, cpu_tmp0);
3620 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3621 dc->cc_op = CC_OP_FLAGS;
3622 break;
3623 case 0x3: /* V9 wrasi */
3624 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3625 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
3626 tcg_gen_trunc_tl_i32(cpu_asi, cpu_tmp0);
3627 break;
3628 case 0x6: /* V9 wrfprs */
3629 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3630 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
3631 save_state(dc);
3632 gen_op_next_insn();
3633 tcg_gen_exit_tb(0);
3634 dc->is_br = 1;
3635 break;
3636 case 0xf: /* V9 sir, nop if user */
3637 #if !defined(CONFIG_USER_ONLY)
3638 if (supervisor(dc)) {
3639 ; // XXX
3641 #endif
3642 break;
3643 case 0x13: /* Graphics Status */
3644 if (gen_trap_ifnofpu(dc)) {
3645 goto jmp_insn;
3647 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3648 break;
3649 case 0x14: /* Softint set */
3650 if (!supervisor(dc))
3651 goto illegal_insn;
3652 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3653 gen_helper_set_softint(cpu_env, cpu_tmp0);
3654 break;
3655 case 0x15: /* Softint clear */
3656 if (!supervisor(dc))
3657 goto illegal_insn;
3658 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3659 gen_helper_clear_softint(cpu_env, cpu_tmp0);
3660 break;
3661 case 0x16: /* Softint write */
3662 if (!supervisor(dc))
3663 goto illegal_insn;
3664 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3665 gen_helper_write_softint(cpu_env, cpu_tmp0);
3666 break;
3667 case 0x17: /* Tick compare */
3668 #if !defined(CONFIG_USER_ONLY)
3669 if (!supervisor(dc))
3670 goto illegal_insn;
3671 #endif
3673 TCGv_ptr r_tickptr;
3675 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3676 cpu_src2);
3677 r_tickptr = tcg_temp_new_ptr();
3678 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3679 offsetof(CPUSPARCState, tick));
3680 gen_helper_tick_set_limit(r_tickptr,
3681 cpu_tick_cmpr);
3682 tcg_temp_free_ptr(r_tickptr);
3684 break;
3685 case 0x18: /* System tick */
3686 #if !defined(CONFIG_USER_ONLY)
3687 if (!supervisor(dc))
3688 goto illegal_insn;
3689 #endif
3691 TCGv_ptr r_tickptr;
3693 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
3694 cpu_src2);
3695 r_tickptr = tcg_temp_new_ptr();
3696 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3697 offsetof(CPUSPARCState, stick));
3698 gen_helper_tick_set_count(r_tickptr,
3699 cpu_tmp0);
3700 tcg_temp_free_ptr(r_tickptr);
3702 break;
3703 case 0x19: /* System tick compare */
3704 #if !defined(CONFIG_USER_ONLY)
3705 if (!supervisor(dc))
3706 goto illegal_insn;
3707 #endif
3709 TCGv_ptr r_tickptr;
3711 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3712 cpu_src2);
3713 r_tickptr = tcg_temp_new_ptr();
3714 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3715 offsetof(CPUSPARCState, stick));
3716 gen_helper_tick_set_limit(r_tickptr,
3717 cpu_stick_cmpr);
3718 tcg_temp_free_ptr(r_tickptr);
3720 break;
3722 case 0x10: /* Performance Control */
3723 case 0x11: /* Performance Instrumentation
3724 Counter */
3725 case 0x12: /* Dispatch Control */
3726 #endif
3727 default:
3728 goto illegal_insn;
3731 break;
3732 #if !defined(CONFIG_USER_ONLY)
3733 case 0x31: /* wrpsr, V9 saved, restored */
3735 if (!supervisor(dc))
3736 goto priv_insn;
3737 #ifdef TARGET_SPARC64
3738 switch (rd) {
3739 case 0:
3740 gen_helper_saved(cpu_env);
3741 break;
3742 case 1:
3743 gen_helper_restored(cpu_env);
3744 break;
3745 case 2: /* UA2005 allclean */
3746 case 3: /* UA2005 otherw */
3747 case 4: /* UA2005 normalw */
3748 case 5: /* UA2005 invalw */
3749 // XXX
3750 default:
3751 goto illegal_insn;
3753 #else
3754 cpu_tmp0 = get_temp_tl(dc);
3755 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3756 gen_helper_wrpsr(cpu_env, cpu_tmp0);
3757 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3758 dc->cc_op = CC_OP_FLAGS;
3759 save_state(dc);
3760 gen_op_next_insn();
3761 tcg_gen_exit_tb(0);
3762 dc->is_br = 1;
3763 #endif
3765 break;
3766 case 0x32: /* wrwim, V9 wrpr */
3768 if (!supervisor(dc))
3769 goto priv_insn;
3770 cpu_tmp0 = get_temp_tl(dc);
3771 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3772 #ifdef TARGET_SPARC64
3773 switch (rd) {
3774 case 0: // tpc
3776 TCGv_ptr r_tsptr;
3778 r_tsptr = tcg_temp_new_ptr();
3779 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3780 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3781 offsetof(trap_state, tpc));
3782 tcg_temp_free_ptr(r_tsptr);
3784 break;
3785 case 1: // tnpc
3787 TCGv_ptr r_tsptr;
3789 r_tsptr = tcg_temp_new_ptr();
3790 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3791 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3792 offsetof(trap_state, tnpc));
3793 tcg_temp_free_ptr(r_tsptr);
3795 break;
3796 case 2: // tstate
3798 TCGv_ptr r_tsptr;
3800 r_tsptr = tcg_temp_new_ptr();
3801 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3802 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3803 offsetof(trap_state,
3804 tstate));
3805 tcg_temp_free_ptr(r_tsptr);
3807 break;
3808 case 3: // tt
3810 TCGv_ptr r_tsptr;
3812 r_tsptr = tcg_temp_new_ptr();
3813 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3814 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
3815 offsetof(trap_state, tt));
3816 tcg_temp_free_ptr(r_tsptr);
3818 break;
3819 case 4: // tick
3821 TCGv_ptr r_tickptr;
3823 r_tickptr = tcg_temp_new_ptr();
3824 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3825 offsetof(CPUSPARCState, tick));
3826 gen_helper_tick_set_count(r_tickptr,
3827 cpu_tmp0);
3828 tcg_temp_free_ptr(r_tickptr);
3830 break;
3831 case 5: // tba
3832 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3833 break;
3834 case 6: // pstate
3835 save_state(dc);
3836 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3837 dc->npc = DYNAMIC_PC;
3838 break;
3839 case 7: // tl
3840 save_state(dc);
3841 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3842 offsetof(CPUSPARCState, tl));
3843 dc->npc = DYNAMIC_PC;
3844 break;
3845 case 8: // pil
3846 gen_helper_wrpil(cpu_env, cpu_tmp0);
3847 break;
3848 case 9: // cwp
3849 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3850 break;
3851 case 10: // cansave
3852 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3853 offsetof(CPUSPARCState,
3854 cansave));
3855 break;
3856 case 11: // canrestore
3857 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3858 offsetof(CPUSPARCState,
3859 canrestore));
3860 break;
3861 case 12: // cleanwin
3862 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3863 offsetof(CPUSPARCState,
3864 cleanwin));
3865 break;
3866 case 13: // otherwin
3867 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3868 offsetof(CPUSPARCState,
3869 otherwin));
3870 break;
3871 case 14: // wstate
3872 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3873 offsetof(CPUSPARCState,
3874 wstate));
3875 break;
3876 case 16: // UA2005 gl
3877 CHECK_IU_FEATURE(dc, GL);
3878 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3879 offsetof(CPUSPARCState, gl));
3880 break;
3881 case 26: // UA2005 strand status
3882 CHECK_IU_FEATURE(dc, HYPV);
3883 if (!hypervisor(dc))
3884 goto priv_insn;
3885 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3886 break;
3887 default:
3888 goto illegal_insn;
3890 #else
3891 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
3892 if (dc->def->nwindows != 32) {
3893 tcg_gen_andi_tl(cpu_wim, cpu_wim,
3894 (1 << dc->def->nwindows) - 1);
3896 #endif
3898 break;
3899 case 0x33: /* wrtbr, UA2005 wrhpr */
3901 #ifndef TARGET_SPARC64
3902 if (!supervisor(dc))
3903 goto priv_insn;
3904 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3905 #else
3906 CHECK_IU_FEATURE(dc, HYPV);
3907 if (!hypervisor(dc))
3908 goto priv_insn;
3909 cpu_tmp0 = get_temp_tl(dc);
3910 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3911 switch (rd) {
3912 case 0: // hpstate
3913 // XXX gen_op_wrhpstate();
3914 save_state(dc);
3915 gen_op_next_insn();
3916 tcg_gen_exit_tb(0);
3917 dc->is_br = 1;
3918 break;
3919 case 1: // htstate
3920 // XXX gen_op_wrhtstate();
3921 break;
3922 case 3: // hintp
3923 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3924 break;
3925 case 5: // htba
3926 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3927 break;
3928 case 31: // hstick_cmpr
3930 TCGv_ptr r_tickptr;
3932 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3933 r_tickptr = tcg_temp_new_ptr();
3934 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3935 offsetof(CPUSPARCState, hstick));
3936 gen_helper_tick_set_limit(r_tickptr,
3937 cpu_hstick_cmpr);
3938 tcg_temp_free_ptr(r_tickptr);
3940 break;
3941 case 6: // hver readonly
3942 default:
3943 goto illegal_insn;
3945 #endif
3947 break;
3948 #endif
3949 #ifdef TARGET_SPARC64
3950 case 0x2c: /* V9 movcc */
3952 int cc = GET_FIELD_SP(insn, 11, 12);
3953 int cond = GET_FIELD_SP(insn, 14, 17);
3954 DisasCompare cmp;
3955 TCGv dst;
3957 if (insn & (1 << 18)) {
3958 if (cc == 0) {
3959 gen_compare(&cmp, 0, cond, dc);
3960 } else if (cc == 2) {
3961 gen_compare(&cmp, 1, cond, dc);
3962 } else {
3963 goto illegal_insn;
3965 } else {
3966 gen_fcompare(&cmp, cc, cond);
3969 /* The get_src2 above loaded the normal 13-bit
3970 immediate field, not the 11-bit field we have
3971 in movcc. But it did handle the reg case. */
3972 if (IS_IMM) {
3973 simm = GET_FIELD_SPs(insn, 0, 10);
3974 tcg_gen_movi_tl(cpu_src2, simm);
3977 dst = gen_load_gpr(dc, rd);
3978 tcg_gen_movcond_tl(cmp.cond, dst,
3979 cmp.c1, cmp.c2,
3980 cpu_src2, dst);
3981 free_compare(&cmp);
3982 gen_store_gpr(dc, rd, dst);
3983 break;
3985 case 0x2d: /* V9 sdivx */
3986 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3987 gen_store_gpr(dc, rd, cpu_dst);
3988 break;
3989 case 0x2e: /* V9 popc */
3990 gen_helper_popc(cpu_dst, cpu_src2);
3991 gen_store_gpr(dc, rd, cpu_dst);
3992 break;
3993 case 0x2f: /* V9 movr */
3995 int cond = GET_FIELD_SP(insn, 10, 12);
3996 DisasCompare cmp;
3997 TCGv dst;
3999 gen_compare_reg(&cmp, cond, cpu_src1);
4001 /* The get_src2 above loaded the normal 13-bit
4002 immediate field, not the 10-bit field we have
4003 in movr. But it did handle the reg case. */
4004 if (IS_IMM) {
4005 simm = GET_FIELD_SPs(insn, 0, 9);
4006 tcg_gen_movi_tl(cpu_src2, simm);
4009 dst = gen_load_gpr(dc, rd);
4010 tcg_gen_movcond_tl(cmp.cond, dst,
4011 cmp.c1, cmp.c2,
4012 cpu_src2, dst);
4013 free_compare(&cmp);
4014 gen_store_gpr(dc, rd, dst);
4015 break;
4017 #endif
4018 default:
4019 goto illegal_insn;
4022 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4023 #ifdef TARGET_SPARC64
4024 int opf = GET_FIELD_SP(insn, 5, 13);
4025 rs1 = GET_FIELD(insn, 13, 17);
4026 rs2 = GET_FIELD(insn, 27, 31);
4027 if (gen_trap_ifnofpu(dc)) {
4028 goto jmp_insn;
4031 switch (opf) {
4032 case 0x000: /* VIS I edge8cc */
4033 CHECK_FPU_FEATURE(dc, VIS1);
4034 cpu_src1 = gen_load_gpr(dc, rs1);
4035 cpu_src2 = gen_load_gpr(dc, rs2);
4036 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4037 gen_store_gpr(dc, rd, cpu_dst);
4038 break;
4039 case 0x001: /* VIS II edge8n */
4040 CHECK_FPU_FEATURE(dc, VIS2);
4041 cpu_src1 = gen_load_gpr(dc, rs1);
4042 cpu_src2 = gen_load_gpr(dc, rs2);
4043 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4044 gen_store_gpr(dc, rd, cpu_dst);
4045 break;
4046 case 0x002: /* VIS I edge8lcc */
4047 CHECK_FPU_FEATURE(dc, VIS1);
4048 cpu_src1 = gen_load_gpr(dc, rs1);
4049 cpu_src2 = gen_load_gpr(dc, rs2);
4050 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4051 gen_store_gpr(dc, rd, cpu_dst);
4052 break;
4053 case 0x003: /* VIS II edge8ln */
4054 CHECK_FPU_FEATURE(dc, VIS2);
4055 cpu_src1 = gen_load_gpr(dc, rs1);
4056 cpu_src2 = gen_load_gpr(dc, rs2);
4057 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4058 gen_store_gpr(dc, rd, cpu_dst);
4059 break;
4060 case 0x004: /* VIS I edge16cc */
4061 CHECK_FPU_FEATURE(dc, VIS1);
4062 cpu_src1 = gen_load_gpr(dc, rs1);
4063 cpu_src2 = gen_load_gpr(dc, rs2);
4064 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4065 gen_store_gpr(dc, rd, cpu_dst);
4066 break;
4067 case 0x005: /* VIS II edge16n */
4068 CHECK_FPU_FEATURE(dc, VIS2);
4069 cpu_src1 = gen_load_gpr(dc, rs1);
4070 cpu_src2 = gen_load_gpr(dc, rs2);
4071 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4072 gen_store_gpr(dc, rd, cpu_dst);
4073 break;
4074 case 0x006: /* VIS I edge16lcc */
4075 CHECK_FPU_FEATURE(dc, VIS1);
4076 cpu_src1 = gen_load_gpr(dc, rs1);
4077 cpu_src2 = gen_load_gpr(dc, rs2);
4078 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4079 gen_store_gpr(dc, rd, cpu_dst);
4080 break;
4081 case 0x007: /* VIS II edge16ln */
4082 CHECK_FPU_FEATURE(dc, VIS2);
4083 cpu_src1 = gen_load_gpr(dc, rs1);
4084 cpu_src2 = gen_load_gpr(dc, rs2);
4085 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4086 gen_store_gpr(dc, rd, cpu_dst);
4087 break;
4088 case 0x008: /* VIS I edge32cc */
4089 CHECK_FPU_FEATURE(dc, VIS1);
4090 cpu_src1 = gen_load_gpr(dc, rs1);
4091 cpu_src2 = gen_load_gpr(dc, rs2);
4092 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4093 gen_store_gpr(dc, rd, cpu_dst);
4094 break;
4095 case 0x009: /* VIS II edge32n */
4096 CHECK_FPU_FEATURE(dc, VIS2);
4097 cpu_src1 = gen_load_gpr(dc, rs1);
4098 cpu_src2 = gen_load_gpr(dc, rs2);
4099 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4100 gen_store_gpr(dc, rd, cpu_dst);
4101 break;
4102 case 0x00a: /* VIS I edge32lcc */
4103 CHECK_FPU_FEATURE(dc, VIS1);
4104 cpu_src1 = gen_load_gpr(dc, rs1);
4105 cpu_src2 = gen_load_gpr(dc, rs2);
4106 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4107 gen_store_gpr(dc, rd, cpu_dst);
4108 break;
4109 case 0x00b: /* VIS II edge32ln */
4110 CHECK_FPU_FEATURE(dc, VIS2);
4111 cpu_src1 = gen_load_gpr(dc, rs1);
4112 cpu_src2 = gen_load_gpr(dc, rs2);
4113 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4114 gen_store_gpr(dc, rd, cpu_dst);
4115 break;
4116 case 0x010: /* VIS I array8 */
4117 CHECK_FPU_FEATURE(dc, VIS1);
4118 cpu_src1 = gen_load_gpr(dc, rs1);
4119 cpu_src2 = gen_load_gpr(dc, rs2);
4120 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4121 gen_store_gpr(dc, rd, cpu_dst);
4122 break;
4123 case 0x012: /* VIS I array16 */
4124 CHECK_FPU_FEATURE(dc, VIS1);
4125 cpu_src1 = gen_load_gpr(dc, rs1);
4126 cpu_src2 = gen_load_gpr(dc, rs2);
4127 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4128 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4129 gen_store_gpr(dc, rd, cpu_dst);
4130 break;
4131 case 0x014: /* VIS I array32 */
4132 CHECK_FPU_FEATURE(dc, VIS1);
4133 cpu_src1 = gen_load_gpr(dc, rs1);
4134 cpu_src2 = gen_load_gpr(dc, rs2);
4135 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4136 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4137 gen_store_gpr(dc, rd, cpu_dst);
4138 break;
4139 case 0x018: /* VIS I alignaddr */
4140 CHECK_FPU_FEATURE(dc, VIS1);
4141 cpu_src1 = gen_load_gpr(dc, rs1);
4142 cpu_src2 = gen_load_gpr(dc, rs2);
4143 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4144 gen_store_gpr(dc, rd, cpu_dst);
4145 break;
4146 case 0x01a: /* VIS I alignaddrl */
4147 CHECK_FPU_FEATURE(dc, VIS1);
4148 cpu_src1 = gen_load_gpr(dc, rs1);
4149 cpu_src2 = gen_load_gpr(dc, rs2);
4150 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4151 gen_store_gpr(dc, rd, cpu_dst);
4152 break;
4153 case 0x019: /* VIS II bmask */
4154 CHECK_FPU_FEATURE(dc, VIS2);
4155 cpu_src1 = gen_load_gpr(dc, rs1);
4156 cpu_src2 = gen_load_gpr(dc, rs2);
4157 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4158 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4159 gen_store_gpr(dc, rd, cpu_dst);
4160 break;
4161 case 0x020: /* VIS I fcmple16 */
4162 CHECK_FPU_FEATURE(dc, VIS1);
4163 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4164 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4165 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4166 gen_store_gpr(dc, rd, cpu_dst);
4167 break;
4168 case 0x022: /* VIS I fcmpne16 */
4169 CHECK_FPU_FEATURE(dc, VIS1);
4170 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4171 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4172 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4173 gen_store_gpr(dc, rd, cpu_dst);
4174 break;
4175 case 0x024: /* VIS I fcmple32 */
4176 CHECK_FPU_FEATURE(dc, VIS1);
4177 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4178 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4179 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4180 gen_store_gpr(dc, rd, cpu_dst);
4181 break;
4182 case 0x026: /* VIS I fcmpne32 */
4183 CHECK_FPU_FEATURE(dc, VIS1);
4184 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4185 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4186 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4187 gen_store_gpr(dc, rd, cpu_dst);
4188 break;
4189 case 0x028: /* VIS I fcmpgt16 */
4190 CHECK_FPU_FEATURE(dc, VIS1);
4191 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4192 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4193 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4194 gen_store_gpr(dc, rd, cpu_dst);
4195 break;
4196 case 0x02a: /* VIS I fcmpeq16 */
4197 CHECK_FPU_FEATURE(dc, VIS1);
4198 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4199 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4200 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4201 gen_store_gpr(dc, rd, cpu_dst);
4202 break;
4203 case 0x02c: /* VIS I fcmpgt32 */
4204 CHECK_FPU_FEATURE(dc, VIS1);
4205 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4206 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4207 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4208 gen_store_gpr(dc, rd, cpu_dst);
4209 break;
4210 case 0x02e: /* VIS I fcmpeq32 */
4211 CHECK_FPU_FEATURE(dc, VIS1);
4212 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4213 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4214 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4215 gen_store_gpr(dc, rd, cpu_dst);
4216 break;
4217 case 0x031: /* VIS I fmul8x16 */
4218 CHECK_FPU_FEATURE(dc, VIS1);
4219 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4220 break;
4221 case 0x033: /* VIS I fmul8x16au */
4222 CHECK_FPU_FEATURE(dc, VIS1);
4223 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4224 break;
4225 case 0x035: /* VIS I fmul8x16al */
4226 CHECK_FPU_FEATURE(dc, VIS1);
4227 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4228 break;
4229 case 0x036: /* VIS I fmul8sux16 */
4230 CHECK_FPU_FEATURE(dc, VIS1);
4231 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4232 break;
4233 case 0x037: /* VIS I fmul8ulx16 */
4234 CHECK_FPU_FEATURE(dc, VIS1);
4235 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4236 break;
4237 case 0x038: /* VIS I fmuld8sux16 */
4238 CHECK_FPU_FEATURE(dc, VIS1);
4239 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4240 break;
4241 case 0x039: /* VIS I fmuld8ulx16 */
4242 CHECK_FPU_FEATURE(dc, VIS1);
4243 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4244 break;
4245 case 0x03a: /* VIS I fpack32 */
4246 CHECK_FPU_FEATURE(dc, VIS1);
4247 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4248 break;
4249 case 0x03b: /* VIS I fpack16 */
4250 CHECK_FPU_FEATURE(dc, VIS1);
4251 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4252 cpu_dst_32 = gen_dest_fpr_F(dc);
4253 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4254 gen_store_fpr_F(dc, rd, cpu_dst_32);
4255 break;
4256 case 0x03d: /* VIS I fpackfix */
4257 CHECK_FPU_FEATURE(dc, VIS1);
4258 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4259 cpu_dst_32 = gen_dest_fpr_F(dc);
4260 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4261 gen_store_fpr_F(dc, rd, cpu_dst_32);
4262 break;
4263 case 0x03e: /* VIS I pdist */
4264 CHECK_FPU_FEATURE(dc, VIS1);
4265 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4266 break;
4267 case 0x048: /* VIS I faligndata */
4268 CHECK_FPU_FEATURE(dc, VIS1);
4269 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4270 break;
4271 case 0x04b: /* VIS I fpmerge */
4272 CHECK_FPU_FEATURE(dc, VIS1);
4273 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4274 break;
4275 case 0x04c: /* VIS II bshuffle */
4276 CHECK_FPU_FEATURE(dc, VIS2);
4277 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4278 break;
4279 case 0x04d: /* VIS I fexpand */
4280 CHECK_FPU_FEATURE(dc, VIS1);
4281 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4282 break;
4283 case 0x050: /* VIS I fpadd16 */
4284 CHECK_FPU_FEATURE(dc, VIS1);
4285 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4286 break;
4287 case 0x051: /* VIS I fpadd16s */
4288 CHECK_FPU_FEATURE(dc, VIS1);
4289 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4290 break;
4291 case 0x052: /* VIS I fpadd32 */
4292 CHECK_FPU_FEATURE(dc, VIS1);
4293 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4294 break;
4295 case 0x053: /* VIS I fpadd32s */
4296 CHECK_FPU_FEATURE(dc, VIS1);
4297 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4298 break;
4299 case 0x054: /* VIS I fpsub16 */
4300 CHECK_FPU_FEATURE(dc, VIS1);
4301 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4302 break;
4303 case 0x055: /* VIS I fpsub16s */
4304 CHECK_FPU_FEATURE(dc, VIS1);
4305 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4306 break;
4307 case 0x056: /* VIS I fpsub32 */
4308 CHECK_FPU_FEATURE(dc, VIS1);
4309 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4310 break;
4311 case 0x057: /* VIS I fpsub32s */
4312 CHECK_FPU_FEATURE(dc, VIS1);
4313 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4314 break;
4315 case 0x060: /* VIS I fzero */
4316 CHECK_FPU_FEATURE(dc, VIS1);
4317 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4318 tcg_gen_movi_i64(cpu_dst_64, 0);
4319 gen_store_fpr_D(dc, rd, cpu_dst_64);
4320 break;
4321 case 0x061: /* VIS I fzeros */
4322 CHECK_FPU_FEATURE(dc, VIS1);
4323 cpu_dst_32 = gen_dest_fpr_F(dc);
4324 tcg_gen_movi_i32(cpu_dst_32, 0);
4325 gen_store_fpr_F(dc, rd, cpu_dst_32);
4326 break;
4327 case 0x062: /* VIS I fnor */
4328 CHECK_FPU_FEATURE(dc, VIS1);
4329 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4330 break;
4331 case 0x063: /* VIS I fnors */
4332 CHECK_FPU_FEATURE(dc, VIS1);
4333 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4334 break;
4335 case 0x064: /* VIS I fandnot2 */
4336 CHECK_FPU_FEATURE(dc, VIS1);
4337 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4338 break;
4339 case 0x065: /* VIS I fandnot2s */
4340 CHECK_FPU_FEATURE(dc, VIS1);
4341 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4342 break;
4343 case 0x066: /* VIS I fnot2 */
4344 CHECK_FPU_FEATURE(dc, VIS1);
4345 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4346 break;
4347 case 0x067: /* VIS I fnot2s */
4348 CHECK_FPU_FEATURE(dc, VIS1);
4349 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4350 break;
4351 case 0x068: /* VIS I fandnot1 */
4352 CHECK_FPU_FEATURE(dc, VIS1);
4353 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4354 break;
4355 case 0x069: /* VIS I fandnot1s */
4356 CHECK_FPU_FEATURE(dc, VIS1);
4357 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4358 break;
4359 case 0x06a: /* VIS I fnot1 */
4360 CHECK_FPU_FEATURE(dc, VIS1);
4361 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4362 break;
4363 case 0x06b: /* VIS I fnot1s */
4364 CHECK_FPU_FEATURE(dc, VIS1);
4365 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4366 break;
4367 case 0x06c: /* VIS I fxor */
4368 CHECK_FPU_FEATURE(dc, VIS1);
4369 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4370 break;
4371 case 0x06d: /* VIS I fxors */
4372 CHECK_FPU_FEATURE(dc, VIS1);
4373 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4374 break;
4375 case 0x06e: /* VIS I fnand */
4376 CHECK_FPU_FEATURE(dc, VIS1);
4377 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4378 break;
4379 case 0x06f: /* VIS I fnands */
4380 CHECK_FPU_FEATURE(dc, VIS1);
4381 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4382 break;
4383 case 0x070: /* VIS I fand */
4384 CHECK_FPU_FEATURE(dc, VIS1);
4385 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4386 break;
4387 case 0x071: /* VIS I fands */
4388 CHECK_FPU_FEATURE(dc, VIS1);
4389 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4390 break;
4391 case 0x072: /* VIS I fxnor */
4392 CHECK_FPU_FEATURE(dc, VIS1);
4393 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4394 break;
4395 case 0x073: /* VIS I fxnors */
4396 CHECK_FPU_FEATURE(dc, VIS1);
4397 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4398 break;
4399 case 0x074: /* VIS I fsrc1 */
4400 CHECK_FPU_FEATURE(dc, VIS1);
4401 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4402 gen_store_fpr_D(dc, rd, cpu_src1_64);
4403 break;
4404 case 0x075: /* VIS I fsrc1s */
4405 CHECK_FPU_FEATURE(dc, VIS1);
4406 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4407 gen_store_fpr_F(dc, rd, cpu_src1_32);
4408 break;
4409 case 0x076: /* VIS I fornot2 */
4410 CHECK_FPU_FEATURE(dc, VIS1);
4411 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4412 break;
4413 case 0x077: /* VIS I fornot2s */
4414 CHECK_FPU_FEATURE(dc, VIS1);
4415 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4416 break;
4417 case 0x078: /* VIS I fsrc2 */
4418 CHECK_FPU_FEATURE(dc, VIS1);
4419 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4420 gen_store_fpr_D(dc, rd, cpu_src1_64);
4421 break;
4422 case 0x079: /* VIS I fsrc2s */
4423 CHECK_FPU_FEATURE(dc, VIS1);
4424 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4425 gen_store_fpr_F(dc, rd, cpu_src1_32);
4426 break;
4427 case 0x07a: /* VIS I fornot1 */
4428 CHECK_FPU_FEATURE(dc, VIS1);
4429 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4430 break;
4431 case 0x07b: /* VIS I fornot1s */
4432 CHECK_FPU_FEATURE(dc, VIS1);
4433 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4434 break;
4435 case 0x07c: /* VIS I for */
4436 CHECK_FPU_FEATURE(dc, VIS1);
4437 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4438 break;
4439 case 0x07d: /* VIS I fors */
4440 CHECK_FPU_FEATURE(dc, VIS1);
4441 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4442 break;
4443 case 0x07e: /* VIS I fone */
4444 CHECK_FPU_FEATURE(dc, VIS1);
4445 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4446 tcg_gen_movi_i64(cpu_dst_64, -1);
4447 gen_store_fpr_D(dc, rd, cpu_dst_64);
4448 break;
4449 case 0x07f: /* VIS I fones */
4450 CHECK_FPU_FEATURE(dc, VIS1);
4451 cpu_dst_32 = gen_dest_fpr_F(dc);
4452 tcg_gen_movi_i32(cpu_dst_32, -1);
4453 gen_store_fpr_F(dc, rd, cpu_dst_32);
4454 break;
4455 case 0x080: /* VIS I shutdown */
4456 case 0x081: /* VIS II siam */
4457 // XXX
4458 goto illegal_insn;
4459 default:
4460 goto illegal_insn;
4462 #else
4463 goto ncp_insn;
4464 #endif
4465 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4466 #ifdef TARGET_SPARC64
4467 goto illegal_insn;
4468 #else
4469 goto ncp_insn;
4470 #endif
4471 #ifdef TARGET_SPARC64
4472 } else if (xop == 0x39) { /* V9 return */
4473 TCGv_i32 r_const;
4475 save_state(dc);
4476 cpu_src1 = get_src1(dc, insn);
4477 cpu_tmp0 = get_temp_tl(dc);
4478 if (IS_IMM) { /* immediate */
4479 simm = GET_FIELDs(insn, 19, 31);
4480 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4481 } else { /* register */
4482 rs2 = GET_FIELD(insn, 27, 31);
4483 if (rs2) {
4484 cpu_src2 = gen_load_gpr(dc, rs2);
4485 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4486 } else {
4487 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4490 gen_helper_restore(cpu_env);
4491 gen_mov_pc_npc(dc);
4492 r_const = tcg_const_i32(3);
4493 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4494 tcg_temp_free_i32(r_const);
4495 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4496 dc->npc = DYNAMIC_PC;
4497 goto jmp_insn;
4498 #endif
4499 } else {
4500 cpu_src1 = get_src1(dc, insn);
4501 cpu_tmp0 = get_temp_tl(dc);
4502 if (IS_IMM) { /* immediate */
4503 simm = GET_FIELDs(insn, 19, 31);
4504 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4505 } else { /* register */
4506 rs2 = GET_FIELD(insn, 27, 31);
4507 if (rs2) {
4508 cpu_src2 = gen_load_gpr(dc, rs2);
4509 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4510 } else {
4511 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4514 switch (xop) {
4515 case 0x38: /* jmpl */
4517 TCGv t;
4518 TCGv_i32 r_const;
4520 t = gen_dest_gpr(dc, rd);
4521 tcg_gen_movi_tl(t, dc->pc);
4522 gen_store_gpr(dc, rd, t);
4523 gen_mov_pc_npc(dc);
4524 r_const = tcg_const_i32(3);
4525 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4526 tcg_temp_free_i32(r_const);
4527 gen_address_mask(dc, cpu_tmp0);
4528 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4529 dc->npc = DYNAMIC_PC;
4531 goto jmp_insn;
4532 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4533 case 0x39: /* rett, V9 return */
4535 TCGv_i32 r_const;
4537 if (!supervisor(dc))
4538 goto priv_insn;
4539 gen_mov_pc_npc(dc);
4540 r_const = tcg_const_i32(3);
4541 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4542 tcg_temp_free_i32(r_const);
4543 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4544 dc->npc = DYNAMIC_PC;
4545 gen_helper_rett(cpu_env);
4547 goto jmp_insn;
4548 #endif
4549 case 0x3b: /* flush */
4550 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4551 goto unimp_flush;
4552 /* nop */
4553 break;
4554 case 0x3c: /* save */
4555 save_state(dc);
4556 gen_helper_save(cpu_env);
4557 gen_store_gpr(dc, rd, cpu_tmp0);
4558 break;
4559 case 0x3d: /* restore */
4560 save_state(dc);
4561 gen_helper_restore(cpu_env);
4562 gen_store_gpr(dc, rd, cpu_tmp0);
4563 break;
4564 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4565 case 0x3e: /* V9 done/retry */
4567 switch (rd) {
4568 case 0:
4569 if (!supervisor(dc))
4570 goto priv_insn;
4571 dc->npc = DYNAMIC_PC;
4572 dc->pc = DYNAMIC_PC;
4573 gen_helper_done(cpu_env);
4574 goto jmp_insn;
4575 case 1:
4576 if (!supervisor(dc))
4577 goto priv_insn;
4578 dc->npc = DYNAMIC_PC;
4579 dc->pc = DYNAMIC_PC;
4580 gen_helper_retry(cpu_env);
4581 goto jmp_insn;
4582 default:
4583 goto illegal_insn;
4586 break;
4587 #endif
4588 default:
4589 goto illegal_insn;
4592 break;
4594 break;
4595 case 3: /* load/store instructions */
4597 unsigned int xop = GET_FIELD(insn, 7, 12);
4598 /* ??? gen_address_mask prevents us from using a source
4599 register directly. Always generate a temporary. */
4600 TCGv cpu_addr = get_temp_tl(dc);
4602 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
4603 if (xop == 0x3c || xop == 0x3e) {
4604 /* V9 casa/casxa : no offset */
4605 } else if (IS_IMM) { /* immediate */
4606 simm = GET_FIELDs(insn, 19, 31);
4607 if (simm != 0) {
4608 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
4610 } else { /* register */
4611 rs2 = GET_FIELD(insn, 27, 31);
4612 if (rs2 != 0) {
4613 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4616 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4617 (xop > 0x17 && xop <= 0x1d ) ||
4618 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4619 TCGv cpu_val = gen_dest_gpr(dc, rd);
4621 switch (xop) {
4622 case 0x0: /* ld, V9 lduw, load unsigned word */
4623 gen_address_mask(dc, cpu_addr);
4624 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4625 break;
4626 case 0x1: /* ldub, load unsigned byte */
4627 gen_address_mask(dc, cpu_addr);
4628 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4629 break;
4630 case 0x2: /* lduh, load unsigned halfword */
4631 gen_address_mask(dc, cpu_addr);
4632 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4633 break;
4634 case 0x3: /* ldd, load double word */
4635 if (rd & 1)
4636 goto illegal_insn;
4637 else {
4638 TCGv_i32 r_const;
4639 TCGv_i64 t64;
4641 save_state(dc);
4642 r_const = tcg_const_i32(7);
4643 /* XXX remove alignment check */
4644 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4645 tcg_temp_free_i32(r_const);
4646 gen_address_mask(dc, cpu_addr);
4647 t64 = tcg_temp_new_i64();
4648 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4649 tcg_gen_trunc_i64_tl(cpu_val, t64);
4650 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4651 gen_store_gpr(dc, rd + 1, cpu_val);
4652 tcg_gen_shri_i64(t64, t64, 32);
4653 tcg_gen_trunc_i64_tl(cpu_val, t64);
4654 tcg_temp_free_i64(t64);
4655 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4657 break;
4658 case 0x9: /* ldsb, load signed byte */
4659 gen_address_mask(dc, cpu_addr);
4660 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4661 break;
4662 case 0xa: /* ldsh, load signed halfword */
4663 gen_address_mask(dc, cpu_addr);
4664 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4665 break;
4666 case 0xd: /* ldstub -- XXX: should be atomically */
4668 TCGv r_const;
4670 gen_address_mask(dc, cpu_addr);
4671 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4672 r_const = tcg_const_tl(0xff);
4673 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4674 tcg_temp_free(r_const);
4676 break;
4677 case 0x0f:
4678 /* swap, swap register with memory. Also atomically */
4680 TCGv t0 = get_temp_tl(dc);
4681 CHECK_IU_FEATURE(dc, SWAP);
4682 cpu_src1 = gen_load_gpr(dc, rd);
4683 gen_address_mask(dc, cpu_addr);
4684 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4685 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4686 tcg_gen_mov_tl(cpu_val, t0);
4688 break;
4689 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4690 case 0x10: /* lda, V9 lduwa, load word alternate */
4691 #ifndef TARGET_SPARC64
4692 if (IS_IMM)
4693 goto illegal_insn;
4694 if (!supervisor(dc))
4695 goto priv_insn;
4696 #endif
4697 save_state(dc);
4698 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4699 break;
4700 case 0x11: /* lduba, load unsigned byte alternate */
4701 #ifndef TARGET_SPARC64
4702 if (IS_IMM)
4703 goto illegal_insn;
4704 if (!supervisor(dc))
4705 goto priv_insn;
4706 #endif
4707 save_state(dc);
4708 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4709 break;
4710 case 0x12: /* lduha, load unsigned halfword alternate */
4711 #ifndef TARGET_SPARC64
4712 if (IS_IMM)
4713 goto illegal_insn;
4714 if (!supervisor(dc))
4715 goto priv_insn;
4716 #endif
4717 save_state(dc);
4718 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4719 break;
4720 case 0x13: /* ldda, load double word alternate */
4721 #ifndef TARGET_SPARC64
4722 if (IS_IMM)
4723 goto illegal_insn;
4724 if (!supervisor(dc))
4725 goto priv_insn;
4726 #endif
4727 if (rd & 1)
4728 goto illegal_insn;
4729 save_state(dc);
4730 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4731 goto skip_move;
4732 case 0x19: /* ldsba, load signed byte alternate */
4733 #ifndef TARGET_SPARC64
4734 if (IS_IMM)
4735 goto illegal_insn;
4736 if (!supervisor(dc))
4737 goto priv_insn;
4738 #endif
4739 save_state(dc);
4740 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4741 break;
4742 case 0x1a: /* ldsha, load signed halfword alternate */
4743 #ifndef TARGET_SPARC64
4744 if (IS_IMM)
4745 goto illegal_insn;
4746 if (!supervisor(dc))
4747 goto priv_insn;
4748 #endif
4749 save_state(dc);
4750 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4751 break;
4752 case 0x1d: /* ldstuba -- XXX: should be atomically */
4753 #ifndef TARGET_SPARC64
4754 if (IS_IMM)
4755 goto illegal_insn;
4756 if (!supervisor(dc))
4757 goto priv_insn;
4758 #endif
4759 save_state(dc);
4760 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4761 break;
4762 case 0x1f: /* swapa, swap reg with alt. memory. Also
4763 atomically */
4764 CHECK_IU_FEATURE(dc, SWAP);
4765 #ifndef TARGET_SPARC64
4766 if (IS_IMM)
4767 goto illegal_insn;
4768 if (!supervisor(dc))
4769 goto priv_insn;
4770 #endif
4771 save_state(dc);
4772 cpu_src1 = gen_load_gpr(dc, rd);
4773 gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
4774 break;
4776 #ifndef TARGET_SPARC64
4777 case 0x30: /* ldc */
4778 case 0x31: /* ldcsr */
4779 case 0x33: /* lddc */
4780 goto ncp_insn;
4781 #endif
4782 #endif
4783 #ifdef TARGET_SPARC64
4784 case 0x08: /* V9 ldsw */
4785 gen_address_mask(dc, cpu_addr);
4786 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4787 break;
4788 case 0x0b: /* V9 ldx */
4789 gen_address_mask(dc, cpu_addr);
4790 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4791 break;
4792 case 0x18: /* V9 ldswa */
4793 save_state(dc);
4794 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4795 break;
4796 case 0x1b: /* V9 ldxa */
4797 save_state(dc);
4798 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4799 break;
4800 case 0x2d: /* V9 prefetch, no effect */
4801 goto skip_move;
4802 case 0x30: /* V9 ldfa */
4803 if (gen_trap_ifnofpu(dc)) {
4804 goto jmp_insn;
4806 save_state(dc);
4807 gen_ldf_asi(cpu_addr, insn, 4, rd);
4808 gen_update_fprs_dirty(rd);
4809 goto skip_move;
4810 case 0x33: /* V9 lddfa */
4811 if (gen_trap_ifnofpu(dc)) {
4812 goto jmp_insn;
4814 save_state(dc);
4815 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4816 gen_update_fprs_dirty(DFPREG(rd));
4817 goto skip_move;
4818 case 0x3d: /* V9 prefetcha, no effect */
4819 goto skip_move;
4820 case 0x32: /* V9 ldqfa */
4821 CHECK_FPU_FEATURE(dc, FLOAT128);
4822 if (gen_trap_ifnofpu(dc)) {
4823 goto jmp_insn;
4825 save_state(dc);
4826 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4827 gen_update_fprs_dirty(QFPREG(rd));
4828 goto skip_move;
4829 #endif
4830 default:
4831 goto illegal_insn;
4833 gen_store_gpr(dc, rd, cpu_val);
4834 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4835 skip_move: ;
4836 #endif
4837 } else if (xop >= 0x20 && xop < 0x24) {
4838 TCGv t0;
4840 if (gen_trap_ifnofpu(dc)) {
4841 goto jmp_insn;
4843 save_state(dc);
4844 switch (xop) {
4845 case 0x20: /* ldf, load fpreg */
4846 gen_address_mask(dc, cpu_addr);
4847 t0 = get_temp_tl(dc);
4848 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4849 cpu_dst_32 = gen_dest_fpr_F(dc);
4850 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4851 gen_store_fpr_F(dc, rd, cpu_dst_32);
4852 break;
4853 case 0x21: /* ldfsr, V9 ldxfsr */
4854 #ifdef TARGET_SPARC64
4855 gen_address_mask(dc, cpu_addr);
4856 if (rd == 1) {
4857 TCGv_i64 t64 = tcg_temp_new_i64();
4858 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4859 gen_helper_ldxfsr(cpu_env, t64);
4860 tcg_temp_free_i64(t64);
4861 break;
4863 #endif
4864 cpu_dst_32 = get_temp_i32(dc);
4865 t0 = get_temp_tl(dc);
4866 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4867 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4868 gen_helper_ldfsr(cpu_env, cpu_dst_32);
4869 break;
4870 case 0x22: /* ldqf, load quad fpreg */
4872 TCGv_i32 r_const;
4874 CHECK_FPU_FEATURE(dc, FLOAT128);
4875 r_const = tcg_const_i32(dc->mem_idx);
4876 gen_address_mask(dc, cpu_addr);
4877 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4878 tcg_temp_free_i32(r_const);
4879 gen_op_store_QT0_fpr(QFPREG(rd));
4880 gen_update_fprs_dirty(QFPREG(rd));
4882 break;
4883 case 0x23: /* lddf, load double fpreg */
4884 gen_address_mask(dc, cpu_addr);
4885 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4886 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4887 gen_store_fpr_D(dc, rd, cpu_dst_64);
4888 break;
4889 default:
4890 goto illegal_insn;
4892 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4893 xop == 0xe || xop == 0x1e) {
4894 TCGv cpu_val = gen_load_gpr(dc, rd);
4896 switch (xop) {
4897 case 0x4: /* st, store word */
4898 gen_address_mask(dc, cpu_addr);
4899 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4900 break;
4901 case 0x5: /* stb, store byte */
4902 gen_address_mask(dc, cpu_addr);
4903 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4904 break;
4905 case 0x6: /* sth, store halfword */
4906 gen_address_mask(dc, cpu_addr);
4907 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4908 break;
4909 case 0x7: /* std, store double word */
4910 if (rd & 1)
4911 goto illegal_insn;
4912 else {
4913 TCGv_i32 r_const;
4914 TCGv_i64 t64;
4915 TCGv lo;
4917 save_state(dc);
4918 gen_address_mask(dc, cpu_addr);
4919 r_const = tcg_const_i32(7);
4920 /* XXX remove alignment check */
4921 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4922 tcg_temp_free_i32(r_const);
4923 lo = gen_load_gpr(dc, rd + 1);
4925 t64 = tcg_temp_new_i64();
4926 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
4927 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
4928 tcg_temp_free_i64(t64);
4930 break;
4931 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4932 case 0x14: /* sta, V9 stwa, store word alternate */
4933 #ifndef TARGET_SPARC64
4934 if (IS_IMM)
4935 goto illegal_insn;
4936 if (!supervisor(dc))
4937 goto priv_insn;
4938 #endif
4939 save_state(dc);
4940 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4941 dc->npc = DYNAMIC_PC;
4942 break;
4943 case 0x15: /* stba, store byte alternate */
4944 #ifndef TARGET_SPARC64
4945 if (IS_IMM)
4946 goto illegal_insn;
4947 if (!supervisor(dc))
4948 goto priv_insn;
4949 #endif
4950 save_state(dc);
4951 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4952 dc->npc = DYNAMIC_PC;
4953 break;
4954 case 0x16: /* stha, store halfword alternate */
4955 #ifndef TARGET_SPARC64
4956 if (IS_IMM)
4957 goto illegal_insn;
4958 if (!supervisor(dc))
4959 goto priv_insn;
4960 #endif
4961 save_state(dc);
4962 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4963 dc->npc = DYNAMIC_PC;
4964 break;
4965 case 0x17: /* stda, store double word alternate */
4966 #ifndef TARGET_SPARC64
4967 if (IS_IMM)
4968 goto illegal_insn;
4969 if (!supervisor(dc))
4970 goto priv_insn;
4971 #endif
4972 if (rd & 1)
4973 goto illegal_insn;
4974 else {
4975 save_state(dc);
4976 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4978 break;
4979 #endif
4980 #ifdef TARGET_SPARC64
4981 case 0x0e: /* V9 stx */
4982 gen_address_mask(dc, cpu_addr);
4983 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4984 break;
4985 case 0x1e: /* V9 stxa */
4986 save_state(dc);
4987 gen_st_asi(cpu_val, cpu_addr, insn, 8);
4988 dc->npc = DYNAMIC_PC;
4989 break;
4990 #endif
4991 default:
4992 goto illegal_insn;
4994 } else if (xop > 0x23 && xop < 0x28) {
4995 if (gen_trap_ifnofpu(dc)) {
4996 goto jmp_insn;
4998 save_state(dc);
4999 switch (xop) {
5000 case 0x24: /* stf, store fpreg */
5002 TCGv t = get_temp_tl(dc);
5003 gen_address_mask(dc, cpu_addr);
5004 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5005 tcg_gen_ext_i32_tl(t, cpu_src1_32);
5006 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5008 break;
5009 case 0x25: /* stfsr, V9 stxfsr */
5011 TCGv t = get_temp_tl(dc);
5013 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUSPARCState, fsr));
5014 #ifdef TARGET_SPARC64
5015 gen_address_mask(dc, cpu_addr);
5016 if (rd == 1) {
5017 tcg_gen_qemu_st64(t, cpu_addr, dc->mem_idx);
5018 break;
5020 #endif
5021 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5023 break;
5024 case 0x26:
5025 #ifdef TARGET_SPARC64
5026 /* V9 stqf, store quad fpreg */
5028 TCGv_i32 r_const;
5030 CHECK_FPU_FEATURE(dc, FLOAT128);
5031 gen_op_load_fpr_QT0(QFPREG(rd));
5032 r_const = tcg_const_i32(dc->mem_idx);
5033 gen_address_mask(dc, cpu_addr);
5034 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5035 tcg_temp_free_i32(r_const);
5037 break;
5038 #else /* !TARGET_SPARC64 */
5039 /* stdfq, store floating point queue */
5040 #if defined(CONFIG_USER_ONLY)
5041 goto illegal_insn;
5042 #else
5043 if (!supervisor(dc))
5044 goto priv_insn;
5045 if (gen_trap_ifnofpu(dc)) {
5046 goto jmp_insn;
5048 goto nfq_insn;
5049 #endif
5050 #endif
5051 case 0x27: /* stdf, store double fpreg */
5052 gen_address_mask(dc, cpu_addr);
5053 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5054 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5055 break;
5056 default:
5057 goto illegal_insn;
5059 } else if (xop > 0x33 && xop < 0x3f) {
5060 save_state(dc);
5061 switch (xop) {
5062 #ifdef TARGET_SPARC64
5063 case 0x34: /* V9 stfa */
5064 if (gen_trap_ifnofpu(dc)) {
5065 goto jmp_insn;
5067 gen_stf_asi(cpu_addr, insn, 4, rd);
5068 break;
5069 case 0x36: /* V9 stqfa */
5071 TCGv_i32 r_const;
5073 CHECK_FPU_FEATURE(dc, FLOAT128);
5074 if (gen_trap_ifnofpu(dc)) {
5075 goto jmp_insn;
5077 r_const = tcg_const_i32(7);
5078 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5079 tcg_temp_free_i32(r_const);
5080 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5082 break;
5083 case 0x37: /* V9 stdfa */
5084 if (gen_trap_ifnofpu(dc)) {
5085 goto jmp_insn;
5087 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5088 break;
5089 case 0x3e: /* V9 casxa */
5090 rs2 = GET_FIELD(insn, 27, 31);
5091 cpu_src2 = gen_load_gpr(dc, rs2);
5092 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5093 break;
5094 #else
5095 case 0x34: /* stc */
5096 case 0x35: /* stcsr */
5097 case 0x36: /* stdcq */
5098 case 0x37: /* stdc */
5099 goto ncp_insn;
5100 #endif
5101 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5102 case 0x3c: /* V9 or LEON3 casa */
5103 #ifndef TARGET_SPARC64
5104 CHECK_IU_FEATURE(dc, CASA);
5105 if (IS_IMM) {
5106 goto illegal_insn;
5108 /* LEON3 allows CASA from user space with ASI 0xa */
5109 if ((GET_FIELD(insn, 19, 26) != 0xa) && !supervisor(dc)) {
5110 goto priv_insn;
5112 #endif
5113 rs2 = GET_FIELD(insn, 27, 31);
5114 cpu_src2 = gen_load_gpr(dc, rs2);
5115 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5116 break;
5117 #endif
5118 default:
5119 goto illegal_insn;
5121 } else {
5122 goto illegal_insn;
5125 break;
5127 /* default case for non jump instructions */
5128 if (dc->npc == DYNAMIC_PC) {
5129 dc->pc = DYNAMIC_PC;
5130 gen_op_next_insn();
5131 } else if (dc->npc == JUMP_PC) {
5132 /* we can do a static jump */
5133 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5134 dc->is_br = 1;
5135 } else {
5136 dc->pc = dc->npc;
5137 dc->npc = dc->npc + 4;
5139 jmp_insn:
5140 goto egress;
5141 illegal_insn:
5143 TCGv_i32 r_const;
5145 save_state(dc);
5146 r_const = tcg_const_i32(TT_ILL_INSN);
5147 gen_helper_raise_exception(cpu_env, r_const);
5148 tcg_temp_free_i32(r_const);
5149 dc->is_br = 1;
5151 goto egress;
5152 unimp_flush:
5154 TCGv_i32 r_const;
5156 save_state(dc);
5157 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5158 gen_helper_raise_exception(cpu_env, r_const);
5159 tcg_temp_free_i32(r_const);
5160 dc->is_br = 1;
5162 goto egress;
5163 #if !defined(CONFIG_USER_ONLY)
5164 priv_insn:
5166 TCGv_i32 r_const;
5168 save_state(dc);
5169 r_const = tcg_const_i32(TT_PRIV_INSN);
5170 gen_helper_raise_exception(cpu_env, r_const);
5171 tcg_temp_free_i32(r_const);
5172 dc->is_br = 1;
5174 goto egress;
5175 #endif
5176 nfpu_insn:
5177 save_state(dc);
5178 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5179 dc->is_br = 1;
5180 goto egress;
5181 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5182 nfq_insn:
5183 save_state(dc);
5184 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5185 dc->is_br = 1;
5186 goto egress;
5187 #endif
5188 #ifndef TARGET_SPARC64
5189 ncp_insn:
5191 TCGv r_const;
5193 save_state(dc);
5194 r_const = tcg_const_i32(TT_NCP_INSN);
5195 gen_helper_raise_exception(cpu_env, r_const);
5196 tcg_temp_free(r_const);
5197 dc->is_br = 1;
5199 goto egress;
5200 #endif
5201 egress:
5202 if (dc->n_t32 != 0) {
5203 int i;
5204 for (i = dc->n_t32 - 1; i >= 0; --i) {
5205 tcg_temp_free_i32(dc->t32[i]);
5207 dc->n_t32 = 0;
5209 if (dc->n_ttl != 0) {
5210 int i;
5211 for (i = dc->n_ttl - 1; i >= 0; --i) {
5212 tcg_temp_free(dc->ttl[i]);
5214 dc->n_ttl = 0;
5218 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5220 SPARCCPU *cpu = sparc_env_get_cpu(env);
5221 CPUState *cs = CPU(cpu);
5222 target_ulong pc_start, last_pc;
5223 DisasContext dc1, *dc = &dc1;
5224 int num_insns;
5225 int max_insns;
5226 unsigned int insn;
5228 memset(dc, 0, sizeof(DisasContext));
5229 dc->tb = tb;
5230 pc_start = tb->pc;
5231 dc->pc = pc_start;
5232 last_pc = dc->pc;
5233 dc->npc = (target_ulong) tb->cs_base;
5234 dc->cc_op = CC_OP_DYNAMIC;
5235 dc->mem_idx = cpu_mmu_index(env, false);
5236 dc->def = env->def;
5237 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5238 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5239 dc->singlestep = (cs->singlestep_enabled || singlestep);
5241 num_insns = 0;
5242 max_insns = tb->cflags & CF_COUNT_MASK;
5243 if (max_insns == 0) {
5244 max_insns = CF_COUNT_MASK;
5246 if (max_insns > TCG_MAX_INSNS) {
5247 max_insns = TCG_MAX_INSNS;
5250 gen_tb_start(tb);
5251 do {
5252 if (dc->npc & JUMP_PC) {
5253 assert(dc->jump_pc[1] == dc->pc + 4);
5254 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5255 } else {
5256 tcg_gen_insn_start(dc->pc, dc->npc);
5258 num_insns++;
5259 last_pc = dc->pc;
5261 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5262 if (dc->pc != pc_start) {
5263 save_state(dc);
5265 gen_helper_debug(cpu_env);
5266 tcg_gen_exit_tb(0);
5267 dc->is_br = 1;
5268 goto exit_gen_loop;
5271 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5272 gen_io_start();
5275 insn = cpu_ldl_code(env, dc->pc);
5277 disas_sparc_insn(dc, insn);
5279 if (dc->is_br)
5280 break;
5281 /* if the next PC is different, we abort now */
5282 if (dc->pc != (last_pc + 4))
5283 break;
5284 /* if we reach a page boundary, we stop generation so that the
5285 PC of a TT_TFAULT exception is always in the right page */
5286 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5287 break;
5288 /* if single step mode, we generate only one instruction and
5289 generate an exception */
5290 if (dc->singlestep) {
5291 break;
5293 } while (!tcg_op_buf_full() &&
5294 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5295 num_insns < max_insns);
5297 exit_gen_loop:
5298 if (tb->cflags & CF_LAST_IO) {
5299 gen_io_end();
5301 if (!dc->is_br) {
5302 if (dc->pc != DYNAMIC_PC &&
5303 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5304 /* static PC and NPC: we can use direct chaining */
5305 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5306 } else {
5307 if (dc->pc != DYNAMIC_PC) {
5308 tcg_gen_movi_tl(cpu_pc, dc->pc);
5310 save_npc(dc);
5311 tcg_gen_exit_tb(0);
5314 gen_tb_end(tb, num_insns);
5316 tb->size = last_pc + 4 - pc_start;
5317 tb->icount = num_insns;
5319 #ifdef DEBUG_DISAS
5320 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5321 qemu_log("--------------\n");
5322 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5323 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5324 qemu_log("\n");
5326 #endif
5329 void gen_intermediate_code_init(CPUSPARCState *env)
5331 unsigned int i;
5332 static int inited;
5333 static const char * const gregnames[8] = {
5334 NULL, // g0 not used
5335 "g1",
5336 "g2",
5337 "g3",
5338 "g4",
5339 "g5",
5340 "g6",
5341 "g7",
5343 static const char * const fregnames[32] = {
5344 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5345 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5346 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5347 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5350 /* init various static tables */
5351 if (!inited) {
5352 inited = 1;
5354 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5355 cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
5356 offsetof(CPUSPARCState, regwptr),
5357 "regwptr");
5358 #ifdef TARGET_SPARC64
5359 cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, xcc),
5360 "xcc");
5361 cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, asi),
5362 "asi");
5363 cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, fprs),
5364 "fprs");
5365 cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, gsr),
5366 "gsr");
5367 cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
5368 offsetof(CPUSPARCState, tick_cmpr),
5369 "tick_cmpr");
5370 cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
5371 offsetof(CPUSPARCState, stick_cmpr),
5372 "stick_cmpr");
5373 cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
5374 offsetof(CPUSPARCState, hstick_cmpr),
5375 "hstick_cmpr");
5376 cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hintp),
5377 "hintp");
5378 cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, htba),
5379 "htba");
5380 cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hver),
5381 "hver");
5382 cpu_ssr = tcg_global_mem_new(TCG_AREG0,
5383 offsetof(CPUSPARCState, ssr), "ssr");
5384 cpu_ver = tcg_global_mem_new(TCG_AREG0,
5385 offsetof(CPUSPARCState, version), "ver");
5386 cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
5387 offsetof(CPUSPARCState, softint),
5388 "softint");
5389 #else
5390 cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, wim),
5391 "wim");
5392 #endif
5393 cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cond),
5394 "cond");
5395 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_src),
5396 "cc_src");
5397 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
5398 offsetof(CPUSPARCState, cc_src2),
5399 "cc_src2");
5400 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
5401 "cc_dst");
5402 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, cc_op),
5403 "cc_op");
5404 cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, psr),
5405 "psr");
5406 cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, fsr),
5407 "fsr");
5408 cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, pc),
5409 "pc");
5410 cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, npc),
5411 "npc");
5412 cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, y), "y");
5413 #ifndef CONFIG_USER_ONLY
5414 cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, tbr),
5415 "tbr");
5416 #endif
5417 for (i = 1; i < 8; i++) {
5418 cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
5419 offsetof(CPUSPARCState, gregs[i]),
5420 gregnames[i]);
5422 for (i = 0; i < TARGET_DPREGS; i++) {
5423 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
5424 offsetof(CPUSPARCState, fpr[i]),
5425 fregnames[i]);
5430 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5431 target_ulong *data)
5433 target_ulong pc = data[0];
5434 target_ulong npc = data[1];
5436 env->pc = pc;
5437 if (npc == DYNAMIC_PC) {
5438 /* dynamic NPC: already stored */
5439 } else if (npc & JUMP_PC) {
5440 /* jump PC: use 'cond' and the jump targets of the translation */
5441 if (env->cond) {
5442 env->npc = npc & ~3;
5443 } else {
5444 env->npc = pc + 4;
5446 } else {
5447 env->npc = npc;