pci core: function pci_bus_init() cleanup
[qemu/ar7.git] / target-sparc / translate.c
blob536c4b50d568ad0e50e9299eafdaecaf21ce40fb
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
32 #include "exec/log.h"
35 #define DEBUG_DISAS
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
41 /* global register indexes */
42 static TCGv_ptr cpu_env, cpu_regwptr;
43 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
44 static TCGv_i32 cpu_cc_op;
45 static TCGv_i32 cpu_psr;
46 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
47 static TCGv cpu_y;
48 #ifndef CONFIG_USER_ONLY
49 static TCGv cpu_tbr;
50 #endif
51 static TCGv cpu_cond;
52 #ifdef TARGET_SPARC64
53 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
54 static TCGv cpu_gsr;
55 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
56 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
57 static TCGv_i32 cpu_softint;
58 #else
59 static TCGv cpu_wim;
60 #endif
61 /* Floating point registers */
62 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
64 #include "exec/gen-icount.h"
66 typedef struct DisasContext {
67 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
68 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
69 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
70 int is_br;
71 int mem_idx;
72 int fpu_enabled;
73 int address_mask_32bit;
74 int singlestep;
75 uint32_t cc_op; /* current CC operation */
76 struct TranslationBlock *tb;
77 sparc_def_t *def;
78 TCGv_i32 t32[3];
79 TCGv ttl[5];
80 int n_t32;
81 int n_ttl;
82 } DisasContext;
84 typedef struct {
85 TCGCond cond;
86 bool is_bool;
87 bool g1, g2;
88 TCGv c1, c2;
89 } DisasCompare;
91 // This function uses non-native bit order
92 #define GET_FIELD(X, FROM, TO) \
93 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
95 // This function uses the order in the manuals, i.e. bit 0 is 2^0
96 #define GET_FIELD_SP(X, FROM, TO) \
97 GET_FIELD(X, 31 - (TO), 31 - (FROM))
99 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
100 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
102 #ifdef TARGET_SPARC64
103 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
104 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
105 #else
106 #define DFPREG(r) (r & 0x1e)
107 #define QFPREG(r) (r & 0x1c)
108 #endif
110 #define UA2005_HTRAP_MASK 0xff
111 #define V8_TRAP_MASK 0x7f
113 static int sign_extend(int x, int len)
115 len = 32 - len;
116 return (x << len) >> len;
119 #define IS_IMM (insn & (1<<13))
121 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
123 TCGv_i32 t;
124 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
125 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
126 return t;
129 static inline TCGv get_temp_tl(DisasContext *dc)
131 TCGv t;
132 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
133 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
134 return t;
137 static inline void gen_update_fprs_dirty(int rd)
139 #if defined(TARGET_SPARC64)
140 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
141 #endif
144 /* floating point registers moves */
145 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
147 #if TCG_TARGET_REG_BITS == 32
148 if (src & 1) {
149 return TCGV_LOW(cpu_fpr[src / 2]);
150 } else {
151 return TCGV_HIGH(cpu_fpr[src / 2]);
153 #else
154 if (src & 1) {
155 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
156 } else {
157 TCGv_i32 ret = get_temp_i32(dc);
158 TCGv_i64 t = tcg_temp_new_i64();
160 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
161 tcg_gen_extrl_i64_i32(ret, t);
162 tcg_temp_free_i64(t);
164 return ret;
166 #endif
169 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
171 #if TCG_TARGET_REG_BITS == 32
172 if (dst & 1) {
173 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
174 } else {
175 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
177 #else
178 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
179 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
180 (dst & 1 ? 0 : 32), 32);
181 #endif
182 gen_update_fprs_dirty(dst);
185 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
187 return get_temp_i32(dc);
190 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
192 src = DFPREG(src);
193 return cpu_fpr[src / 2];
196 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
198 dst = DFPREG(dst);
199 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
200 gen_update_fprs_dirty(dst);
203 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
205 return cpu_fpr[DFPREG(dst) / 2];
208 static void gen_op_load_fpr_QT0(unsigned int src)
210 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
211 offsetof(CPU_QuadU, ll.upper));
212 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
213 offsetof(CPU_QuadU, ll.lower));
216 static void gen_op_load_fpr_QT1(unsigned int src)
218 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
219 offsetof(CPU_QuadU, ll.upper));
220 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
221 offsetof(CPU_QuadU, ll.lower));
224 static void gen_op_store_QT0_fpr(unsigned int dst)
226 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
227 offsetof(CPU_QuadU, ll.upper));
228 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
229 offsetof(CPU_QuadU, ll.lower));
232 #ifdef TARGET_SPARC64
233 static void gen_move_Q(unsigned int rd, unsigned int rs)
235 rd = QFPREG(rd);
236 rs = QFPREG(rs);
238 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
239 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
240 gen_update_fprs_dirty(rd);
242 #endif
244 /* moves */
245 #ifdef CONFIG_USER_ONLY
246 #define supervisor(dc) 0
247 #ifdef TARGET_SPARC64
248 #define hypervisor(dc) 0
249 #endif
250 #else
251 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
252 #ifdef TARGET_SPARC64
253 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
254 #else
255 #endif
256 #endif
258 #ifdef TARGET_SPARC64
259 #ifndef TARGET_ABI32
260 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
261 #else
262 #define AM_CHECK(dc) (1)
263 #endif
264 #endif
266 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
268 #ifdef TARGET_SPARC64
269 if (AM_CHECK(dc))
270 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
271 #endif
274 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
276 if (reg == 0 || reg >= 8) {
277 TCGv t = get_temp_tl(dc);
278 if (reg == 0) {
279 tcg_gen_movi_tl(t, 0);
280 } else {
281 tcg_gen_ld_tl(t, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
283 return t;
284 } else {
285 return cpu_gregs[reg];
289 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
291 if (reg > 0) {
292 if (reg < 8) {
293 tcg_gen_mov_tl(cpu_gregs[reg], v);
294 } else {
295 tcg_gen_st_tl(v, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
300 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
302 if (reg == 0 || reg >= 8) {
303 return get_temp_tl(dc);
304 } else {
305 return cpu_gregs[reg];
309 static inline void gen_goto_tb(DisasContext *s, int tb_num,
310 target_ulong pc, target_ulong npc)
312 TranslationBlock *tb;
314 tb = s->tb;
315 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
316 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
317 !s->singlestep) {
318 /* jump to same page: we can use a direct jump */
319 tcg_gen_goto_tb(tb_num);
320 tcg_gen_movi_tl(cpu_pc, pc);
321 tcg_gen_movi_tl(cpu_npc, npc);
322 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
323 } else {
324 /* jump to another page: currently not optimized */
325 tcg_gen_movi_tl(cpu_pc, pc);
326 tcg_gen_movi_tl(cpu_npc, npc);
327 tcg_gen_exit_tb(0);
331 // XXX suboptimal
332 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
334 tcg_gen_extu_i32_tl(reg, src);
335 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
336 tcg_gen_andi_tl(reg, reg, 0x1);
339 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
341 tcg_gen_extu_i32_tl(reg, src);
342 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
343 tcg_gen_andi_tl(reg, reg, 0x1);
346 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
348 tcg_gen_extu_i32_tl(reg, src);
349 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
350 tcg_gen_andi_tl(reg, reg, 0x1);
353 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
355 tcg_gen_extu_i32_tl(reg, src);
356 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
357 tcg_gen_andi_tl(reg, reg, 0x1);
360 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
362 tcg_gen_mov_tl(cpu_cc_src, src1);
363 tcg_gen_mov_tl(cpu_cc_src2, src2);
364 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
365 tcg_gen_mov_tl(dst, cpu_cc_dst);
368 static TCGv_i32 gen_add32_carry32(void)
370 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
372 /* Carry is computed from a previous add: (dst < src) */
373 #if TARGET_LONG_BITS == 64
374 cc_src1_32 = tcg_temp_new_i32();
375 cc_src2_32 = tcg_temp_new_i32();
376 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
377 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
378 #else
379 cc_src1_32 = cpu_cc_dst;
380 cc_src2_32 = cpu_cc_src;
381 #endif
383 carry_32 = tcg_temp_new_i32();
384 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
386 #if TARGET_LONG_BITS == 64
387 tcg_temp_free_i32(cc_src1_32);
388 tcg_temp_free_i32(cc_src2_32);
389 #endif
391 return carry_32;
394 static TCGv_i32 gen_sub32_carry32(void)
396 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
398 /* Carry is computed from a previous borrow: (src1 < src2) */
399 #if TARGET_LONG_BITS == 64
400 cc_src1_32 = tcg_temp_new_i32();
401 cc_src2_32 = tcg_temp_new_i32();
402 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
403 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
404 #else
405 cc_src1_32 = cpu_cc_src;
406 cc_src2_32 = cpu_cc_src2;
407 #endif
409 carry_32 = tcg_temp_new_i32();
410 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
412 #if TARGET_LONG_BITS == 64
413 tcg_temp_free_i32(cc_src1_32);
414 tcg_temp_free_i32(cc_src2_32);
415 #endif
417 return carry_32;
420 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
421 TCGv src2, int update_cc)
423 TCGv_i32 carry_32;
424 TCGv carry;
426 switch (dc->cc_op) {
427 case CC_OP_DIV:
428 case CC_OP_LOGIC:
429 /* Carry is known to be zero. Fall back to plain ADD. */
430 if (update_cc) {
431 gen_op_add_cc(dst, src1, src2);
432 } else {
433 tcg_gen_add_tl(dst, src1, src2);
435 return;
437 case CC_OP_ADD:
438 case CC_OP_TADD:
439 case CC_OP_TADDTV:
440 if (TARGET_LONG_BITS == 32) {
441 /* We can re-use the host's hardware carry generation by using
442 an ADD2 opcode. We discard the low part of the output.
443 Ideally we'd combine this operation with the add that
444 generated the carry in the first place. */
445 carry = tcg_temp_new();
446 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
447 tcg_temp_free(carry);
448 goto add_done;
450 carry_32 = gen_add32_carry32();
451 break;
453 case CC_OP_SUB:
454 case CC_OP_TSUB:
455 case CC_OP_TSUBTV:
456 carry_32 = gen_sub32_carry32();
457 break;
459 default:
460 /* We need external help to produce the carry. */
461 carry_32 = tcg_temp_new_i32();
462 gen_helper_compute_C_icc(carry_32, cpu_env);
463 break;
466 #if TARGET_LONG_BITS == 64
467 carry = tcg_temp_new();
468 tcg_gen_extu_i32_i64(carry, carry_32);
469 #else
470 carry = carry_32;
471 #endif
473 tcg_gen_add_tl(dst, src1, src2);
474 tcg_gen_add_tl(dst, dst, carry);
476 tcg_temp_free_i32(carry_32);
477 #if TARGET_LONG_BITS == 64
478 tcg_temp_free(carry);
479 #endif
481 add_done:
482 if (update_cc) {
483 tcg_gen_mov_tl(cpu_cc_src, src1);
484 tcg_gen_mov_tl(cpu_cc_src2, src2);
485 tcg_gen_mov_tl(cpu_cc_dst, dst);
486 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
487 dc->cc_op = CC_OP_ADDX;
491 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
493 tcg_gen_mov_tl(cpu_cc_src, src1);
494 tcg_gen_mov_tl(cpu_cc_src2, src2);
495 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
496 tcg_gen_mov_tl(dst, cpu_cc_dst);
499 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
500 TCGv src2, int update_cc)
502 TCGv_i32 carry_32;
503 TCGv carry;
505 switch (dc->cc_op) {
506 case CC_OP_DIV:
507 case CC_OP_LOGIC:
508 /* Carry is known to be zero. Fall back to plain SUB. */
509 if (update_cc) {
510 gen_op_sub_cc(dst, src1, src2);
511 } else {
512 tcg_gen_sub_tl(dst, src1, src2);
514 return;
516 case CC_OP_ADD:
517 case CC_OP_TADD:
518 case CC_OP_TADDTV:
519 carry_32 = gen_add32_carry32();
520 break;
522 case CC_OP_SUB:
523 case CC_OP_TSUB:
524 case CC_OP_TSUBTV:
525 if (TARGET_LONG_BITS == 32) {
526 /* We can re-use the host's hardware carry generation by using
527 a SUB2 opcode. We discard the low part of the output.
528 Ideally we'd combine this operation with the add that
529 generated the carry in the first place. */
530 carry = tcg_temp_new();
531 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
532 tcg_temp_free(carry);
533 goto sub_done;
535 carry_32 = gen_sub32_carry32();
536 break;
538 default:
539 /* We need external help to produce the carry. */
540 carry_32 = tcg_temp_new_i32();
541 gen_helper_compute_C_icc(carry_32, cpu_env);
542 break;
545 #if TARGET_LONG_BITS == 64
546 carry = tcg_temp_new();
547 tcg_gen_extu_i32_i64(carry, carry_32);
548 #else
549 carry = carry_32;
550 #endif
552 tcg_gen_sub_tl(dst, src1, src2);
553 tcg_gen_sub_tl(dst, dst, carry);
555 tcg_temp_free_i32(carry_32);
556 #if TARGET_LONG_BITS == 64
557 tcg_temp_free(carry);
558 #endif
560 sub_done:
561 if (update_cc) {
562 tcg_gen_mov_tl(cpu_cc_src, src1);
563 tcg_gen_mov_tl(cpu_cc_src2, src2);
564 tcg_gen_mov_tl(cpu_cc_dst, dst);
565 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
566 dc->cc_op = CC_OP_SUBX;
570 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
572 TCGv r_temp, zero, t0;
574 r_temp = tcg_temp_new();
575 t0 = tcg_temp_new();
577 /* old op:
578 if (!(env->y & 1))
579 T1 = 0;
581 zero = tcg_const_tl(0);
582 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
583 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
584 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
585 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
586 zero, cpu_cc_src2);
587 tcg_temp_free(zero);
589 // b2 = T0 & 1;
590 // env->y = (b2 << 31) | (env->y >> 1);
591 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
592 tcg_gen_shli_tl(r_temp, r_temp, 31);
593 tcg_gen_shri_tl(t0, cpu_y, 1);
594 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
595 tcg_gen_or_tl(t0, t0, r_temp);
596 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
598 // b1 = N ^ V;
599 gen_mov_reg_N(t0, cpu_psr);
600 gen_mov_reg_V(r_temp, cpu_psr);
601 tcg_gen_xor_tl(t0, t0, r_temp);
602 tcg_temp_free(r_temp);
604 // T0 = (b1 << 31) | (T0 >> 1);
605 // src1 = T0;
606 tcg_gen_shli_tl(t0, t0, 31);
607 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
608 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
609 tcg_temp_free(t0);
611 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
613 tcg_gen_mov_tl(dst, cpu_cc_dst);
616 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
618 #if TARGET_LONG_BITS == 32
619 if (sign_ext) {
620 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
621 } else {
622 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
624 #else
625 TCGv t0 = tcg_temp_new_i64();
626 TCGv t1 = tcg_temp_new_i64();
628 if (sign_ext) {
629 tcg_gen_ext32s_i64(t0, src1);
630 tcg_gen_ext32s_i64(t1, src2);
631 } else {
632 tcg_gen_ext32u_i64(t0, src1);
633 tcg_gen_ext32u_i64(t1, src2);
636 tcg_gen_mul_i64(dst, t0, t1);
637 tcg_temp_free(t0);
638 tcg_temp_free(t1);
640 tcg_gen_shri_i64(cpu_y, dst, 32);
641 #endif
644 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
646 /* zero-extend truncated operands before multiplication */
647 gen_op_multiply(dst, src1, src2, 0);
650 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
652 /* sign-extend truncated operands before multiplication */
653 gen_op_multiply(dst, src1, src2, 1);
656 // 1
657 static inline void gen_op_eval_ba(TCGv dst)
659 tcg_gen_movi_tl(dst, 1);
662 // Z
663 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
665 gen_mov_reg_Z(dst, src);
668 // Z | (N ^ V)
669 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
671 TCGv t0 = tcg_temp_new();
672 gen_mov_reg_N(t0, src);
673 gen_mov_reg_V(dst, src);
674 tcg_gen_xor_tl(dst, dst, t0);
675 gen_mov_reg_Z(t0, src);
676 tcg_gen_or_tl(dst, dst, t0);
677 tcg_temp_free(t0);
680 // N ^ V
681 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
683 TCGv t0 = tcg_temp_new();
684 gen_mov_reg_V(t0, src);
685 gen_mov_reg_N(dst, src);
686 tcg_gen_xor_tl(dst, dst, t0);
687 tcg_temp_free(t0);
690 // C | Z
691 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
693 TCGv t0 = tcg_temp_new();
694 gen_mov_reg_Z(t0, src);
695 gen_mov_reg_C(dst, src);
696 tcg_gen_or_tl(dst, dst, t0);
697 tcg_temp_free(t0);
700 // C
701 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
703 gen_mov_reg_C(dst, src);
706 // V
707 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
709 gen_mov_reg_V(dst, src);
712 // 0
713 static inline void gen_op_eval_bn(TCGv dst)
715 tcg_gen_movi_tl(dst, 0);
718 // N
719 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
721 gen_mov_reg_N(dst, src);
724 // !Z
725 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
727 gen_mov_reg_Z(dst, src);
728 tcg_gen_xori_tl(dst, dst, 0x1);
731 // !(Z | (N ^ V))
732 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
734 gen_op_eval_ble(dst, src);
735 tcg_gen_xori_tl(dst, dst, 0x1);
738 // !(N ^ V)
739 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
741 gen_op_eval_bl(dst, src);
742 tcg_gen_xori_tl(dst, dst, 0x1);
745 // !(C | Z)
746 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
748 gen_op_eval_bleu(dst, src);
749 tcg_gen_xori_tl(dst, dst, 0x1);
752 // !C
753 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
755 gen_mov_reg_C(dst, src);
756 tcg_gen_xori_tl(dst, dst, 0x1);
759 // !N
760 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
762 gen_mov_reg_N(dst, src);
763 tcg_gen_xori_tl(dst, dst, 0x1);
766 // !V
767 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
769 gen_mov_reg_V(dst, src);
770 tcg_gen_xori_tl(dst, dst, 0x1);
774 FPSR bit field FCC1 | FCC0:
778 3 unordered
780 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
781 unsigned int fcc_offset)
783 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
784 tcg_gen_andi_tl(reg, reg, 0x1);
787 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
788 unsigned int fcc_offset)
790 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
791 tcg_gen_andi_tl(reg, reg, 0x1);
794 // !0: FCC0 | FCC1
795 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
796 unsigned int fcc_offset)
798 TCGv t0 = tcg_temp_new();
799 gen_mov_reg_FCC0(dst, src, fcc_offset);
800 gen_mov_reg_FCC1(t0, src, fcc_offset);
801 tcg_gen_or_tl(dst, dst, t0);
802 tcg_temp_free(t0);
805 // 1 or 2: FCC0 ^ FCC1
806 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
807 unsigned int fcc_offset)
809 TCGv t0 = tcg_temp_new();
810 gen_mov_reg_FCC0(dst, src, fcc_offset);
811 gen_mov_reg_FCC1(t0, src, fcc_offset);
812 tcg_gen_xor_tl(dst, dst, t0);
813 tcg_temp_free(t0);
816 // 1 or 3: FCC0
817 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
818 unsigned int fcc_offset)
820 gen_mov_reg_FCC0(dst, src, fcc_offset);
823 // 1: FCC0 & !FCC1
824 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
825 unsigned int fcc_offset)
827 TCGv t0 = tcg_temp_new();
828 gen_mov_reg_FCC0(dst, src, fcc_offset);
829 gen_mov_reg_FCC1(t0, src, fcc_offset);
830 tcg_gen_andc_tl(dst, dst, t0);
831 tcg_temp_free(t0);
834 // 2 or 3: FCC1
835 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
836 unsigned int fcc_offset)
838 gen_mov_reg_FCC1(dst, src, fcc_offset);
841 // 2: !FCC0 & FCC1
842 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
843 unsigned int fcc_offset)
845 TCGv t0 = tcg_temp_new();
846 gen_mov_reg_FCC0(dst, src, fcc_offset);
847 gen_mov_reg_FCC1(t0, src, fcc_offset);
848 tcg_gen_andc_tl(dst, t0, dst);
849 tcg_temp_free(t0);
852 // 3: FCC0 & FCC1
853 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
854 unsigned int fcc_offset)
856 TCGv t0 = tcg_temp_new();
857 gen_mov_reg_FCC0(dst, src, fcc_offset);
858 gen_mov_reg_FCC1(t0, src, fcc_offset);
859 tcg_gen_and_tl(dst, dst, t0);
860 tcg_temp_free(t0);
863 // 0: !(FCC0 | FCC1)
864 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
865 unsigned int fcc_offset)
867 TCGv t0 = tcg_temp_new();
868 gen_mov_reg_FCC0(dst, src, fcc_offset);
869 gen_mov_reg_FCC1(t0, src, fcc_offset);
870 tcg_gen_or_tl(dst, dst, t0);
871 tcg_gen_xori_tl(dst, dst, 0x1);
872 tcg_temp_free(t0);
875 // 0 or 3: !(FCC0 ^ FCC1)
876 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
877 unsigned int fcc_offset)
879 TCGv t0 = tcg_temp_new();
880 gen_mov_reg_FCC0(dst, src, fcc_offset);
881 gen_mov_reg_FCC1(t0, src, fcc_offset);
882 tcg_gen_xor_tl(dst, dst, t0);
883 tcg_gen_xori_tl(dst, dst, 0x1);
884 tcg_temp_free(t0);
887 // 0 or 2: !FCC0
888 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
889 unsigned int fcc_offset)
891 gen_mov_reg_FCC0(dst, src, fcc_offset);
892 tcg_gen_xori_tl(dst, dst, 0x1);
895 // !1: !(FCC0 & !FCC1)
896 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
897 unsigned int fcc_offset)
899 TCGv t0 = tcg_temp_new();
900 gen_mov_reg_FCC0(dst, src, fcc_offset);
901 gen_mov_reg_FCC1(t0, src, fcc_offset);
902 tcg_gen_andc_tl(dst, dst, t0);
903 tcg_gen_xori_tl(dst, dst, 0x1);
904 tcg_temp_free(t0);
907 // 0 or 1: !FCC1
908 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
909 unsigned int fcc_offset)
911 gen_mov_reg_FCC1(dst, src, fcc_offset);
912 tcg_gen_xori_tl(dst, dst, 0x1);
915 // !2: !(!FCC0 & FCC1)
916 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
917 unsigned int fcc_offset)
919 TCGv t0 = tcg_temp_new();
920 gen_mov_reg_FCC0(dst, src, fcc_offset);
921 gen_mov_reg_FCC1(t0, src, fcc_offset);
922 tcg_gen_andc_tl(dst, t0, dst);
923 tcg_gen_xori_tl(dst, dst, 0x1);
924 tcg_temp_free(t0);
927 // !3: !(FCC0 & FCC1)
928 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
929 unsigned int fcc_offset)
931 TCGv t0 = tcg_temp_new();
932 gen_mov_reg_FCC0(dst, src, fcc_offset);
933 gen_mov_reg_FCC1(t0, src, fcc_offset);
934 tcg_gen_and_tl(dst, dst, t0);
935 tcg_gen_xori_tl(dst, dst, 0x1);
936 tcg_temp_free(t0);
939 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
940 target_ulong pc2, TCGv r_cond)
942 TCGLabel *l1 = gen_new_label();
944 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
946 gen_goto_tb(dc, 0, pc1, pc1 + 4);
948 gen_set_label(l1);
949 gen_goto_tb(dc, 1, pc2, pc2 + 4);
952 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
954 TCGLabel *l1 = gen_new_label();
955 target_ulong npc = dc->npc;
957 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
959 gen_goto_tb(dc, 0, npc, pc1);
961 gen_set_label(l1);
962 gen_goto_tb(dc, 1, npc + 4, npc + 8);
964 dc->is_br = 1;
967 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
969 target_ulong npc = dc->npc;
971 if (likely(npc != DYNAMIC_PC)) {
972 dc->pc = npc;
973 dc->jump_pc[0] = pc1;
974 dc->jump_pc[1] = npc + 4;
975 dc->npc = JUMP_PC;
976 } else {
977 TCGv t, z;
979 tcg_gen_mov_tl(cpu_pc, cpu_npc);
981 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
982 t = tcg_const_tl(pc1);
983 z = tcg_const_tl(0);
984 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
985 tcg_temp_free(t);
986 tcg_temp_free(z);
988 dc->pc = DYNAMIC_PC;
992 static inline void gen_generic_branch(DisasContext *dc)
994 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
995 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
996 TCGv zero = tcg_const_tl(0);
998 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1000 tcg_temp_free(npc0);
1001 tcg_temp_free(npc1);
1002 tcg_temp_free(zero);
1005 /* call this function before using the condition register as it may
1006 have been set for a jump */
1007 static inline void flush_cond(DisasContext *dc)
1009 if (dc->npc == JUMP_PC) {
1010 gen_generic_branch(dc);
1011 dc->npc = DYNAMIC_PC;
1015 static inline void save_npc(DisasContext *dc)
1017 if (dc->npc == JUMP_PC) {
1018 gen_generic_branch(dc);
1019 dc->npc = DYNAMIC_PC;
1020 } else if (dc->npc != DYNAMIC_PC) {
1021 tcg_gen_movi_tl(cpu_npc, dc->npc);
1025 static inline void update_psr(DisasContext *dc)
1027 if (dc->cc_op != CC_OP_FLAGS) {
1028 dc->cc_op = CC_OP_FLAGS;
1029 gen_helper_compute_psr(cpu_env);
1033 static inline void save_state(DisasContext *dc)
1035 tcg_gen_movi_tl(cpu_pc, dc->pc);
1036 save_npc(dc);
1039 static inline void gen_mov_pc_npc(DisasContext *dc)
1041 if (dc->npc == JUMP_PC) {
1042 gen_generic_branch(dc);
1043 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1044 dc->pc = DYNAMIC_PC;
1045 } else if (dc->npc == DYNAMIC_PC) {
1046 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1047 dc->pc = DYNAMIC_PC;
1048 } else {
1049 dc->pc = dc->npc;
1053 static inline void gen_op_next_insn(void)
1055 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1056 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1059 static void free_compare(DisasCompare *cmp)
1061 if (!cmp->g1) {
1062 tcg_temp_free(cmp->c1);
1064 if (!cmp->g2) {
1065 tcg_temp_free(cmp->c2);
1069 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1070 DisasContext *dc)
1072 static int subcc_cond[16] = {
1073 TCG_COND_NEVER,
1074 TCG_COND_EQ,
1075 TCG_COND_LE,
1076 TCG_COND_LT,
1077 TCG_COND_LEU,
1078 TCG_COND_LTU,
1079 -1, /* neg */
1080 -1, /* overflow */
1081 TCG_COND_ALWAYS,
1082 TCG_COND_NE,
1083 TCG_COND_GT,
1084 TCG_COND_GE,
1085 TCG_COND_GTU,
1086 TCG_COND_GEU,
1087 -1, /* pos */
1088 -1, /* no overflow */
1091 static int logic_cond[16] = {
1092 TCG_COND_NEVER,
1093 TCG_COND_EQ, /* eq: Z */
1094 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1095 TCG_COND_LT, /* lt: N ^ V -> N */
1096 TCG_COND_EQ, /* leu: C | Z -> Z */
1097 TCG_COND_NEVER, /* ltu: C -> 0 */
1098 TCG_COND_LT, /* neg: N */
1099 TCG_COND_NEVER, /* vs: V -> 0 */
1100 TCG_COND_ALWAYS,
1101 TCG_COND_NE, /* ne: !Z */
1102 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1103 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1104 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1105 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1106 TCG_COND_GE, /* pos: !N */
1107 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1110 TCGv_i32 r_src;
1111 TCGv r_dst;
1113 #ifdef TARGET_SPARC64
1114 if (xcc) {
1115 r_src = cpu_xcc;
1116 } else {
1117 r_src = cpu_psr;
1119 #else
1120 r_src = cpu_psr;
1121 #endif
1123 switch (dc->cc_op) {
1124 case CC_OP_LOGIC:
1125 cmp->cond = logic_cond[cond];
1126 do_compare_dst_0:
1127 cmp->is_bool = false;
1128 cmp->g2 = false;
1129 cmp->c2 = tcg_const_tl(0);
1130 #ifdef TARGET_SPARC64
1131 if (!xcc) {
1132 cmp->g1 = false;
1133 cmp->c1 = tcg_temp_new();
1134 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1135 break;
1137 #endif
1138 cmp->g1 = true;
1139 cmp->c1 = cpu_cc_dst;
1140 break;
1142 case CC_OP_SUB:
1143 switch (cond) {
1144 case 6: /* neg */
1145 case 14: /* pos */
1146 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1147 goto do_compare_dst_0;
1149 case 7: /* overflow */
1150 case 15: /* !overflow */
1151 goto do_dynamic;
1153 default:
1154 cmp->cond = subcc_cond[cond];
1155 cmp->is_bool = false;
1156 #ifdef TARGET_SPARC64
1157 if (!xcc) {
1158 /* Note that sign-extension works for unsigned compares as
1159 long as both operands are sign-extended. */
1160 cmp->g1 = cmp->g2 = false;
1161 cmp->c1 = tcg_temp_new();
1162 cmp->c2 = tcg_temp_new();
1163 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1164 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1165 break;
1167 #endif
1168 cmp->g1 = cmp->g2 = true;
1169 cmp->c1 = cpu_cc_src;
1170 cmp->c2 = cpu_cc_src2;
1171 break;
1173 break;
1175 default:
1176 do_dynamic:
1177 gen_helper_compute_psr(cpu_env);
1178 dc->cc_op = CC_OP_FLAGS;
1179 /* FALLTHRU */
1181 case CC_OP_FLAGS:
1182 /* We're going to generate a boolean result. */
1183 cmp->cond = TCG_COND_NE;
1184 cmp->is_bool = true;
1185 cmp->g1 = cmp->g2 = false;
1186 cmp->c1 = r_dst = tcg_temp_new();
1187 cmp->c2 = tcg_const_tl(0);
1189 switch (cond) {
1190 case 0x0:
1191 gen_op_eval_bn(r_dst);
1192 break;
1193 case 0x1:
1194 gen_op_eval_be(r_dst, r_src);
1195 break;
1196 case 0x2:
1197 gen_op_eval_ble(r_dst, r_src);
1198 break;
1199 case 0x3:
1200 gen_op_eval_bl(r_dst, r_src);
1201 break;
1202 case 0x4:
1203 gen_op_eval_bleu(r_dst, r_src);
1204 break;
1205 case 0x5:
1206 gen_op_eval_bcs(r_dst, r_src);
1207 break;
1208 case 0x6:
1209 gen_op_eval_bneg(r_dst, r_src);
1210 break;
1211 case 0x7:
1212 gen_op_eval_bvs(r_dst, r_src);
1213 break;
1214 case 0x8:
1215 gen_op_eval_ba(r_dst);
1216 break;
1217 case 0x9:
1218 gen_op_eval_bne(r_dst, r_src);
1219 break;
1220 case 0xa:
1221 gen_op_eval_bg(r_dst, r_src);
1222 break;
1223 case 0xb:
1224 gen_op_eval_bge(r_dst, r_src);
1225 break;
1226 case 0xc:
1227 gen_op_eval_bgu(r_dst, r_src);
1228 break;
1229 case 0xd:
1230 gen_op_eval_bcc(r_dst, r_src);
1231 break;
1232 case 0xe:
1233 gen_op_eval_bpos(r_dst, r_src);
1234 break;
1235 case 0xf:
1236 gen_op_eval_bvc(r_dst, r_src);
1237 break;
1239 break;
1243 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1245 unsigned int offset;
1246 TCGv r_dst;
1248 /* For now we still generate a straight boolean result. */
1249 cmp->cond = TCG_COND_NE;
1250 cmp->is_bool = true;
1251 cmp->g1 = cmp->g2 = false;
1252 cmp->c1 = r_dst = tcg_temp_new();
1253 cmp->c2 = tcg_const_tl(0);
1255 switch (cc) {
1256 default:
1257 case 0x0:
1258 offset = 0;
1259 break;
1260 case 0x1:
1261 offset = 32 - 10;
1262 break;
1263 case 0x2:
1264 offset = 34 - 10;
1265 break;
1266 case 0x3:
1267 offset = 36 - 10;
1268 break;
1271 switch (cond) {
1272 case 0x0:
1273 gen_op_eval_bn(r_dst);
1274 break;
1275 case 0x1:
1276 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1277 break;
1278 case 0x2:
1279 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1280 break;
1281 case 0x3:
1282 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1283 break;
1284 case 0x4:
1285 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1286 break;
1287 case 0x5:
1288 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1289 break;
1290 case 0x6:
1291 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1292 break;
1293 case 0x7:
1294 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1295 break;
1296 case 0x8:
1297 gen_op_eval_ba(r_dst);
1298 break;
1299 case 0x9:
1300 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1301 break;
1302 case 0xa:
1303 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1304 break;
1305 case 0xb:
1306 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1307 break;
1308 case 0xc:
1309 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1310 break;
1311 case 0xd:
1312 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1313 break;
1314 case 0xe:
1315 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1316 break;
1317 case 0xf:
1318 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1319 break;
1323 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1324 DisasContext *dc)
1326 DisasCompare cmp;
1327 gen_compare(&cmp, cc, cond, dc);
1329 /* The interface is to return a boolean in r_dst. */
1330 if (cmp.is_bool) {
1331 tcg_gen_mov_tl(r_dst, cmp.c1);
1332 } else {
1333 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1336 free_compare(&cmp);
1339 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1341 DisasCompare cmp;
1342 gen_fcompare(&cmp, cc, cond);
1344 /* The interface is to return a boolean in r_dst. */
1345 if (cmp.is_bool) {
1346 tcg_gen_mov_tl(r_dst, cmp.c1);
1347 } else {
1348 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1351 free_compare(&cmp);
1354 #ifdef TARGET_SPARC64
1355 // Inverted logic
1356 static const int gen_tcg_cond_reg[8] = {
1358 TCG_COND_NE,
1359 TCG_COND_GT,
1360 TCG_COND_GE,
1362 TCG_COND_EQ,
1363 TCG_COND_LE,
1364 TCG_COND_LT,
1367 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1369 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1370 cmp->is_bool = false;
1371 cmp->g1 = true;
1372 cmp->g2 = false;
1373 cmp->c1 = r_src;
1374 cmp->c2 = tcg_const_tl(0);
1377 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1379 DisasCompare cmp;
1380 gen_compare_reg(&cmp, cond, r_src);
1382 /* The interface is to return a boolean in r_dst. */
1383 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1385 free_compare(&cmp);
1387 #endif
1389 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1391 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1392 target_ulong target = dc->pc + offset;
1394 #ifdef TARGET_SPARC64
1395 if (unlikely(AM_CHECK(dc))) {
1396 target &= 0xffffffffULL;
1398 #endif
1399 if (cond == 0x0) {
1400 /* unconditional not taken */
1401 if (a) {
1402 dc->pc = dc->npc + 4;
1403 dc->npc = dc->pc + 4;
1404 } else {
1405 dc->pc = dc->npc;
1406 dc->npc = dc->pc + 4;
1408 } else if (cond == 0x8) {
1409 /* unconditional taken */
1410 if (a) {
1411 dc->pc = target;
1412 dc->npc = dc->pc + 4;
1413 } else {
1414 dc->pc = dc->npc;
1415 dc->npc = target;
1416 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1418 } else {
1419 flush_cond(dc);
1420 gen_cond(cpu_cond, cc, cond, dc);
1421 if (a) {
1422 gen_branch_a(dc, target);
1423 } else {
1424 gen_branch_n(dc, target);
1429 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1431 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1432 target_ulong target = dc->pc + offset;
1434 #ifdef TARGET_SPARC64
1435 if (unlikely(AM_CHECK(dc))) {
1436 target &= 0xffffffffULL;
1438 #endif
1439 if (cond == 0x0) {
1440 /* unconditional not taken */
1441 if (a) {
1442 dc->pc = dc->npc + 4;
1443 dc->npc = dc->pc + 4;
1444 } else {
1445 dc->pc = dc->npc;
1446 dc->npc = dc->pc + 4;
1448 } else if (cond == 0x8) {
1449 /* unconditional taken */
1450 if (a) {
1451 dc->pc = target;
1452 dc->npc = dc->pc + 4;
1453 } else {
1454 dc->pc = dc->npc;
1455 dc->npc = target;
1456 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1458 } else {
1459 flush_cond(dc);
1460 gen_fcond(cpu_cond, cc, cond);
1461 if (a) {
1462 gen_branch_a(dc, target);
1463 } else {
1464 gen_branch_n(dc, target);
1469 #ifdef TARGET_SPARC64
1470 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1471 TCGv r_reg)
1473 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1474 target_ulong target = dc->pc + offset;
1476 if (unlikely(AM_CHECK(dc))) {
1477 target &= 0xffffffffULL;
1479 flush_cond(dc);
1480 gen_cond_reg(cpu_cond, cond, r_reg);
1481 if (a) {
1482 gen_branch_a(dc, target);
1483 } else {
1484 gen_branch_n(dc, target);
1488 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1490 switch (fccno) {
1491 case 0:
1492 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1493 break;
1494 case 1:
1495 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1496 break;
1497 case 2:
1498 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1499 break;
1500 case 3:
1501 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1502 break;
1506 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1508 switch (fccno) {
1509 case 0:
1510 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1511 break;
1512 case 1:
1513 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1514 break;
1515 case 2:
1516 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1517 break;
1518 case 3:
1519 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1520 break;
1524 static inline void gen_op_fcmpq(int fccno)
1526 switch (fccno) {
1527 case 0:
1528 gen_helper_fcmpq(cpu_env);
1529 break;
1530 case 1:
1531 gen_helper_fcmpq_fcc1(cpu_env);
1532 break;
1533 case 2:
1534 gen_helper_fcmpq_fcc2(cpu_env);
1535 break;
1536 case 3:
1537 gen_helper_fcmpq_fcc3(cpu_env);
1538 break;
1542 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1544 switch (fccno) {
1545 case 0:
1546 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1547 break;
1548 case 1:
1549 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1550 break;
1551 case 2:
1552 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1553 break;
1554 case 3:
1555 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1556 break;
1560 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1562 switch (fccno) {
1563 case 0:
1564 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1565 break;
1566 case 1:
1567 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1568 break;
1569 case 2:
1570 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1571 break;
1572 case 3:
1573 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1574 break;
1578 static inline void gen_op_fcmpeq(int fccno)
1580 switch (fccno) {
1581 case 0:
1582 gen_helper_fcmpeq(cpu_env);
1583 break;
1584 case 1:
1585 gen_helper_fcmpeq_fcc1(cpu_env);
1586 break;
1587 case 2:
1588 gen_helper_fcmpeq_fcc2(cpu_env);
1589 break;
1590 case 3:
1591 gen_helper_fcmpeq_fcc3(cpu_env);
1592 break;
1596 #else
1598 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1600 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1603 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1605 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1608 static inline void gen_op_fcmpq(int fccno)
1610 gen_helper_fcmpq(cpu_env);
1613 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1615 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1618 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1620 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1623 static inline void gen_op_fcmpeq(int fccno)
1625 gen_helper_fcmpeq(cpu_env);
1627 #endif
1629 static inline void gen_op_fpexception_im(int fsr_flags)
1631 TCGv_i32 r_const;
1633 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1634 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1635 r_const = tcg_const_i32(TT_FP_EXCP);
1636 gen_helper_raise_exception(cpu_env, r_const);
1637 tcg_temp_free_i32(r_const);
1640 static int gen_trap_ifnofpu(DisasContext *dc)
1642 #if !defined(CONFIG_USER_ONLY)
1643 if (!dc->fpu_enabled) {
1644 TCGv_i32 r_const;
1646 save_state(dc);
1647 r_const = tcg_const_i32(TT_NFPU_INSN);
1648 gen_helper_raise_exception(cpu_env, r_const);
1649 tcg_temp_free_i32(r_const);
1650 dc->is_br = 1;
1651 return 1;
1653 #endif
1654 return 0;
1657 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1659 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1662 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1663 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1665 TCGv_i32 dst, src;
1667 src = gen_load_fpr_F(dc, rs);
1668 dst = gen_dest_fpr_F(dc);
1670 gen(dst, cpu_env, src);
1672 gen_store_fpr_F(dc, rd, dst);
1675 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1676 void (*gen)(TCGv_i32, TCGv_i32))
1678 TCGv_i32 dst, src;
1680 src = gen_load_fpr_F(dc, rs);
1681 dst = gen_dest_fpr_F(dc);
1683 gen(dst, src);
1685 gen_store_fpr_F(dc, rd, dst);
1688 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1689 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1691 TCGv_i32 dst, src1, src2;
1693 src1 = gen_load_fpr_F(dc, rs1);
1694 src2 = gen_load_fpr_F(dc, rs2);
1695 dst = gen_dest_fpr_F(dc);
1697 gen(dst, cpu_env, src1, src2);
1699 gen_store_fpr_F(dc, rd, dst);
1702 #ifdef TARGET_SPARC64
1703 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1704 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1706 TCGv_i32 dst, src1, src2;
1708 src1 = gen_load_fpr_F(dc, rs1);
1709 src2 = gen_load_fpr_F(dc, rs2);
1710 dst = gen_dest_fpr_F(dc);
1712 gen(dst, src1, src2);
1714 gen_store_fpr_F(dc, rd, dst);
1716 #endif
1718 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1719 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1721 TCGv_i64 dst, src;
1723 src = gen_load_fpr_D(dc, rs);
1724 dst = gen_dest_fpr_D(dc, rd);
1726 gen(dst, cpu_env, src);
1728 gen_store_fpr_D(dc, rd, dst);
1731 #ifdef TARGET_SPARC64
1732 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1733 void (*gen)(TCGv_i64, TCGv_i64))
1735 TCGv_i64 dst, src;
1737 src = gen_load_fpr_D(dc, rs);
1738 dst = gen_dest_fpr_D(dc, rd);
1740 gen(dst, src);
1742 gen_store_fpr_D(dc, rd, dst);
1744 #endif
1746 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1747 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1749 TCGv_i64 dst, src1, src2;
1751 src1 = gen_load_fpr_D(dc, rs1);
1752 src2 = gen_load_fpr_D(dc, rs2);
1753 dst = gen_dest_fpr_D(dc, rd);
1755 gen(dst, cpu_env, src1, src2);
1757 gen_store_fpr_D(dc, rd, dst);
1760 #ifdef TARGET_SPARC64
1761 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1762 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1764 TCGv_i64 dst, src1, src2;
1766 src1 = gen_load_fpr_D(dc, rs1);
1767 src2 = gen_load_fpr_D(dc, rs2);
1768 dst = gen_dest_fpr_D(dc, rd);
1770 gen(dst, src1, src2);
1772 gen_store_fpr_D(dc, rd, dst);
1775 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1776 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1778 TCGv_i64 dst, src1, src2;
1780 src1 = gen_load_fpr_D(dc, rs1);
1781 src2 = gen_load_fpr_D(dc, rs2);
1782 dst = gen_dest_fpr_D(dc, rd);
1784 gen(dst, cpu_gsr, src1, src2);
1786 gen_store_fpr_D(dc, rd, dst);
1789 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1790 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1792 TCGv_i64 dst, src0, src1, src2;
1794 src1 = gen_load_fpr_D(dc, rs1);
1795 src2 = gen_load_fpr_D(dc, rs2);
1796 src0 = gen_load_fpr_D(dc, rd);
1797 dst = gen_dest_fpr_D(dc, rd);
1799 gen(dst, src0, src1, src2);
1801 gen_store_fpr_D(dc, rd, dst);
1803 #endif
1805 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1806 void (*gen)(TCGv_ptr))
1808 gen_op_load_fpr_QT1(QFPREG(rs));
1810 gen(cpu_env);
1812 gen_op_store_QT0_fpr(QFPREG(rd));
1813 gen_update_fprs_dirty(QFPREG(rd));
1816 #ifdef TARGET_SPARC64
1817 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1818 void (*gen)(TCGv_ptr))
1820 gen_op_load_fpr_QT1(QFPREG(rs));
1822 gen(cpu_env);
1824 gen_op_store_QT0_fpr(QFPREG(rd));
1825 gen_update_fprs_dirty(QFPREG(rd));
1827 #endif
1829 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1830 void (*gen)(TCGv_ptr))
1832 gen_op_load_fpr_QT0(QFPREG(rs1));
1833 gen_op_load_fpr_QT1(QFPREG(rs2));
1835 gen(cpu_env);
1837 gen_op_store_QT0_fpr(QFPREG(rd));
1838 gen_update_fprs_dirty(QFPREG(rd));
1841 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1842 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1844 TCGv_i64 dst;
1845 TCGv_i32 src1, src2;
1847 src1 = gen_load_fpr_F(dc, rs1);
1848 src2 = gen_load_fpr_F(dc, rs2);
1849 dst = gen_dest_fpr_D(dc, rd);
1851 gen(dst, cpu_env, src1, src2);
1853 gen_store_fpr_D(dc, rd, dst);
1856 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1857 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1859 TCGv_i64 src1, src2;
1861 src1 = gen_load_fpr_D(dc, rs1);
1862 src2 = gen_load_fpr_D(dc, rs2);
1864 gen(cpu_env, src1, src2);
1866 gen_op_store_QT0_fpr(QFPREG(rd));
1867 gen_update_fprs_dirty(QFPREG(rd));
1870 #ifdef TARGET_SPARC64
1871 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1872 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1874 TCGv_i64 dst;
1875 TCGv_i32 src;
1877 src = gen_load_fpr_F(dc, rs);
1878 dst = gen_dest_fpr_D(dc, rd);
1880 gen(dst, cpu_env, src);
1882 gen_store_fpr_D(dc, rd, dst);
1884 #endif
1886 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1887 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1889 TCGv_i64 dst;
1890 TCGv_i32 src;
1892 src = gen_load_fpr_F(dc, rs);
1893 dst = gen_dest_fpr_D(dc, rd);
1895 gen(dst, cpu_env, src);
1897 gen_store_fpr_D(dc, rd, dst);
1900 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1901 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1903 TCGv_i32 dst;
1904 TCGv_i64 src;
1906 src = gen_load_fpr_D(dc, rs);
1907 dst = gen_dest_fpr_F(dc);
1909 gen(dst, cpu_env, src);
1911 gen_store_fpr_F(dc, rd, dst);
1914 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1915 void (*gen)(TCGv_i32, TCGv_ptr))
1917 TCGv_i32 dst;
1919 gen_op_load_fpr_QT1(QFPREG(rs));
1920 dst = gen_dest_fpr_F(dc);
1922 gen(dst, cpu_env);
1924 gen_store_fpr_F(dc, rd, dst);
1927 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1928 void (*gen)(TCGv_i64, TCGv_ptr))
1930 TCGv_i64 dst;
1932 gen_op_load_fpr_QT1(QFPREG(rs));
1933 dst = gen_dest_fpr_D(dc, rd);
1935 gen(dst, cpu_env);
1937 gen_store_fpr_D(dc, rd, dst);
1940 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1941 void (*gen)(TCGv_ptr, TCGv_i32))
1943 TCGv_i32 src;
1945 src = gen_load_fpr_F(dc, rs);
1947 gen(cpu_env, src);
1949 gen_op_store_QT0_fpr(QFPREG(rd));
1950 gen_update_fprs_dirty(QFPREG(rd));
1953 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1954 void (*gen)(TCGv_ptr, TCGv_i64))
1956 TCGv_i64 src;
1958 src = gen_load_fpr_D(dc, rs);
1960 gen(cpu_env, src);
1962 gen_op_store_QT0_fpr(QFPREG(rd));
1963 gen_update_fprs_dirty(QFPREG(rd));
1966 /* asi moves */
1967 #ifdef TARGET_SPARC64
1968 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
1970 int asi;
1971 TCGv_i32 r_asi;
1973 if (IS_IMM) {
1974 r_asi = tcg_temp_new_i32();
1975 tcg_gen_mov_i32(r_asi, cpu_asi);
1976 } else {
1977 asi = GET_FIELD(insn, 19, 26);
1978 r_asi = tcg_const_i32(asi);
1980 return r_asi;
1983 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
1984 int sign)
1986 TCGv_i32 r_asi, r_size, r_sign;
1988 r_asi = gen_get_asi(insn, addr);
1989 r_size = tcg_const_i32(size);
1990 r_sign = tcg_const_i32(sign);
1991 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
1992 tcg_temp_free_i32(r_sign);
1993 tcg_temp_free_i32(r_size);
1994 tcg_temp_free_i32(r_asi);
1997 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
1999 TCGv_i32 r_asi, r_size;
2001 r_asi = gen_get_asi(insn, addr);
2002 r_size = tcg_const_i32(size);
2003 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2004 tcg_temp_free_i32(r_size);
2005 tcg_temp_free_i32(r_asi);
2008 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2010 TCGv_i32 r_asi, r_size, r_rd;
2012 r_asi = gen_get_asi(insn, addr);
2013 r_size = tcg_const_i32(size);
2014 r_rd = tcg_const_i32(rd);
2015 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2016 tcg_temp_free_i32(r_rd);
2017 tcg_temp_free_i32(r_size);
2018 tcg_temp_free_i32(r_asi);
2021 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2023 TCGv_i32 r_asi, r_size, r_rd;
2025 r_asi = gen_get_asi(insn, addr);
2026 r_size = tcg_const_i32(size);
2027 r_rd = tcg_const_i32(rd);
2028 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2029 tcg_temp_free_i32(r_rd);
2030 tcg_temp_free_i32(r_size);
2031 tcg_temp_free_i32(r_asi);
2034 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2036 TCGv_i32 r_asi, r_size, r_sign;
2037 TCGv_i64 t64 = tcg_temp_new_i64();
2039 r_asi = gen_get_asi(insn, addr);
2040 r_size = tcg_const_i32(4);
2041 r_sign = tcg_const_i32(0);
2042 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2043 tcg_temp_free_i32(r_sign);
2044 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2045 tcg_temp_free_i32(r_size);
2046 tcg_temp_free_i32(r_asi);
2047 tcg_gen_trunc_i64_tl(dst, t64);
2048 tcg_temp_free_i64(t64);
2051 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2052 int insn, int rd)
2054 TCGv_i32 r_asi, r_rd;
2056 r_asi = gen_get_asi(insn, addr);
2057 r_rd = tcg_const_i32(rd);
2058 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2059 tcg_temp_free_i32(r_rd);
2060 tcg_temp_free_i32(r_asi);
2063 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2064 int insn, int rd)
2066 TCGv_i32 r_asi, r_size;
2067 TCGv lo = gen_load_gpr(dc, rd + 1);
2068 TCGv_i64 t64 = tcg_temp_new_i64();
2070 tcg_gen_concat_tl_i64(t64, lo, hi);
2071 r_asi = gen_get_asi(insn, addr);
2072 r_size = tcg_const_i32(8);
2073 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2074 tcg_temp_free_i32(r_size);
2075 tcg_temp_free_i32(r_asi);
2076 tcg_temp_free_i64(t64);
2079 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2080 TCGv val2, int insn, int rd)
2082 TCGv val1 = gen_load_gpr(dc, rd);
2083 TCGv dst = gen_dest_gpr(dc, rd);
2084 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2086 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2087 tcg_temp_free_i32(r_asi);
2088 gen_store_gpr(dc, rd, dst);
2091 #elif !defined(CONFIG_USER_ONLY)
2093 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2094 int sign)
2096 TCGv_i32 r_asi, r_size, r_sign;
2097 TCGv_i64 t64 = tcg_temp_new_i64();
2099 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2100 r_size = tcg_const_i32(size);
2101 r_sign = tcg_const_i32(sign);
2102 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2103 tcg_temp_free_i32(r_sign);
2104 tcg_temp_free_i32(r_size);
2105 tcg_temp_free_i32(r_asi);
2106 tcg_gen_trunc_i64_tl(dst, t64);
2107 tcg_temp_free_i64(t64);
2110 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2112 TCGv_i32 r_asi, r_size;
2113 TCGv_i64 t64 = tcg_temp_new_i64();
2115 tcg_gen_extu_tl_i64(t64, src);
2116 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2117 r_size = tcg_const_i32(size);
2118 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2119 tcg_temp_free_i32(r_size);
2120 tcg_temp_free_i32(r_asi);
2121 tcg_temp_free_i64(t64);
2124 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2126 TCGv_i32 r_asi, r_size, r_sign;
2127 TCGv_i64 r_val, t64;
2129 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2130 r_size = tcg_const_i32(4);
2131 r_sign = tcg_const_i32(0);
2132 t64 = tcg_temp_new_i64();
2133 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2134 tcg_temp_free(r_sign);
2135 r_val = tcg_temp_new_i64();
2136 tcg_gen_extu_tl_i64(r_val, src);
2137 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2138 tcg_temp_free_i64(r_val);
2139 tcg_temp_free_i32(r_size);
2140 tcg_temp_free_i32(r_asi);
2141 tcg_gen_trunc_i64_tl(dst, t64);
2142 tcg_temp_free_i64(t64);
2145 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2146 int insn, int rd)
2148 TCGv_i32 r_asi, r_size, r_sign;
2149 TCGv t;
2150 TCGv_i64 t64;
2152 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2153 r_size = tcg_const_i32(8);
2154 r_sign = tcg_const_i32(0);
2155 t64 = tcg_temp_new_i64();
2156 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2157 tcg_temp_free_i32(r_sign);
2158 tcg_temp_free_i32(r_size);
2159 tcg_temp_free_i32(r_asi);
2161 t = gen_dest_gpr(dc, rd + 1);
2162 tcg_gen_trunc_i64_tl(t, t64);
2163 gen_store_gpr(dc, rd + 1, t);
2165 tcg_gen_shri_i64(t64, t64, 32);
2166 tcg_gen_trunc_i64_tl(hi, t64);
2167 tcg_temp_free_i64(t64);
2168 gen_store_gpr(dc, rd, hi);
2171 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2172 int insn, int rd)
2174 TCGv_i32 r_asi, r_size;
2175 TCGv lo = gen_load_gpr(dc, rd + 1);
2176 TCGv_i64 t64 = tcg_temp_new_i64();
2178 tcg_gen_concat_tl_i64(t64, lo, hi);
2179 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2180 r_size = tcg_const_i32(8);
2181 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2182 tcg_temp_free_i32(r_size);
2183 tcg_temp_free_i32(r_asi);
2184 tcg_temp_free_i64(t64);
2186 #endif
2188 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2189 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2190 TCGv val2, int insn, int rd)
2192 TCGv val1 = gen_load_gpr(dc, rd);
2193 TCGv dst = gen_dest_gpr(dc, rd);
2194 #ifdef TARGET_SPARC64
2195 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2196 #else
2197 TCGv_i32 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2198 #endif
2200 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2201 tcg_temp_free_i32(r_asi);
2202 gen_store_gpr(dc, rd, dst);
2205 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2207 TCGv_i64 r_val;
2208 TCGv_i32 r_asi, r_size;
2210 gen_ld_asi(dst, addr, insn, 1, 0);
2212 r_val = tcg_const_i64(0xffULL);
2213 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2214 r_size = tcg_const_i32(1);
2215 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2216 tcg_temp_free_i32(r_size);
2217 tcg_temp_free_i32(r_asi);
2218 tcg_temp_free_i64(r_val);
2220 #endif
2222 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2224 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2225 return gen_load_gpr(dc, rs1);
2228 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2230 if (IS_IMM) { /* immediate */
2231 target_long simm = GET_FIELDs(insn, 19, 31);
2232 TCGv t = get_temp_tl(dc);
2233 tcg_gen_movi_tl(t, simm);
2234 return t;
2235 } else { /* register */
2236 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2237 return gen_load_gpr(dc, rs2);
2241 #ifdef TARGET_SPARC64
2242 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2244 TCGv_i32 c32, zero, dst, s1, s2;
2246 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2247 or fold the comparison down to 32 bits and use movcond_i32. Choose
2248 the later. */
2249 c32 = tcg_temp_new_i32();
2250 if (cmp->is_bool) {
2251 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2252 } else {
2253 TCGv_i64 c64 = tcg_temp_new_i64();
2254 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2255 tcg_gen_extrl_i64_i32(c32, c64);
2256 tcg_temp_free_i64(c64);
2259 s1 = gen_load_fpr_F(dc, rs);
2260 s2 = gen_load_fpr_F(dc, rd);
2261 dst = gen_dest_fpr_F(dc);
2262 zero = tcg_const_i32(0);
2264 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2266 tcg_temp_free_i32(c32);
2267 tcg_temp_free_i32(zero);
2268 gen_store_fpr_F(dc, rd, dst);
2271 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2273 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2274 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2275 gen_load_fpr_D(dc, rs),
2276 gen_load_fpr_D(dc, rd));
2277 gen_store_fpr_D(dc, rd, dst);
2280 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2282 int qd = QFPREG(rd);
2283 int qs = QFPREG(rs);
2285 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2286 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2287 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2288 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2290 gen_update_fprs_dirty(qd);
2293 #ifndef CONFIG_USER_ONLY
2294 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2296 TCGv_i32 r_tl = tcg_temp_new_i32();
2298 /* load env->tl into r_tl */
2299 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2301 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2302 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2304 /* calculate offset to current trap state from env->ts, reuse r_tl */
2305 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2306 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2308 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2310 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2311 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2312 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2313 tcg_temp_free_ptr(r_tl_tmp);
2316 tcg_temp_free_i32(r_tl);
2318 #endif
2320 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2321 int width, bool cc, bool left)
2323 TCGv lo1, lo2, t1, t2;
2324 uint64_t amask, tabl, tabr;
2325 int shift, imask, omask;
2327 if (cc) {
2328 tcg_gen_mov_tl(cpu_cc_src, s1);
2329 tcg_gen_mov_tl(cpu_cc_src2, s2);
2330 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2331 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2332 dc->cc_op = CC_OP_SUB;
2335 /* Theory of operation: there are two tables, left and right (not to
2336 be confused with the left and right versions of the opcode). These
2337 are indexed by the low 3 bits of the inputs. To make things "easy",
2338 these tables are loaded into two constants, TABL and TABR below.
2339 The operation index = (input & imask) << shift calculates the index
2340 into the constant, while val = (table >> index) & omask calculates
2341 the value we're looking for. */
2342 switch (width) {
2343 case 8:
2344 imask = 0x7;
2345 shift = 3;
2346 omask = 0xff;
2347 if (left) {
2348 tabl = 0x80c0e0f0f8fcfeffULL;
2349 tabr = 0xff7f3f1f0f070301ULL;
2350 } else {
2351 tabl = 0x0103070f1f3f7fffULL;
2352 tabr = 0xfffefcf8f0e0c080ULL;
2354 break;
2355 case 16:
2356 imask = 0x6;
2357 shift = 1;
2358 omask = 0xf;
2359 if (left) {
2360 tabl = 0x8cef;
2361 tabr = 0xf731;
2362 } else {
2363 tabl = 0x137f;
2364 tabr = 0xfec8;
2366 break;
2367 case 32:
2368 imask = 0x4;
2369 shift = 0;
2370 omask = 0x3;
2371 if (left) {
2372 tabl = (2 << 2) | 3;
2373 tabr = (3 << 2) | 1;
2374 } else {
2375 tabl = (1 << 2) | 3;
2376 tabr = (3 << 2) | 2;
2378 break;
2379 default:
2380 abort();
2383 lo1 = tcg_temp_new();
2384 lo2 = tcg_temp_new();
2385 tcg_gen_andi_tl(lo1, s1, imask);
2386 tcg_gen_andi_tl(lo2, s2, imask);
2387 tcg_gen_shli_tl(lo1, lo1, shift);
2388 tcg_gen_shli_tl(lo2, lo2, shift);
2390 t1 = tcg_const_tl(tabl);
2391 t2 = tcg_const_tl(tabr);
2392 tcg_gen_shr_tl(lo1, t1, lo1);
2393 tcg_gen_shr_tl(lo2, t2, lo2);
2394 tcg_gen_andi_tl(dst, lo1, omask);
2395 tcg_gen_andi_tl(lo2, lo2, omask);
2397 amask = -8;
2398 if (AM_CHECK(dc)) {
2399 amask &= 0xffffffffULL;
2401 tcg_gen_andi_tl(s1, s1, amask);
2402 tcg_gen_andi_tl(s2, s2, amask);
2404 /* We want to compute
2405 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2406 We've already done dst = lo1, so this reduces to
2407 dst &= (s1 == s2 ? -1 : lo2)
2408 Which we perform by
2409 lo2 |= -(s1 == s2)
2410 dst &= lo2
2412 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2413 tcg_gen_neg_tl(t1, t1);
2414 tcg_gen_or_tl(lo2, lo2, t1);
2415 tcg_gen_and_tl(dst, dst, lo2);
2417 tcg_temp_free(lo1);
2418 tcg_temp_free(lo2);
2419 tcg_temp_free(t1);
2420 tcg_temp_free(t2);
2423 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2425 TCGv tmp = tcg_temp_new();
2427 tcg_gen_add_tl(tmp, s1, s2);
2428 tcg_gen_andi_tl(dst, tmp, -8);
2429 if (left) {
2430 tcg_gen_neg_tl(tmp, tmp);
2432 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2434 tcg_temp_free(tmp);
2437 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2439 TCGv t1, t2, shift;
2441 t1 = tcg_temp_new();
2442 t2 = tcg_temp_new();
2443 shift = tcg_temp_new();
2445 tcg_gen_andi_tl(shift, gsr, 7);
2446 tcg_gen_shli_tl(shift, shift, 3);
2447 tcg_gen_shl_tl(t1, s1, shift);
2449 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2450 shift of (up to 63) followed by a constant shift of 1. */
2451 tcg_gen_xori_tl(shift, shift, 63);
2452 tcg_gen_shr_tl(t2, s2, shift);
2453 tcg_gen_shri_tl(t2, t2, 1);
2455 tcg_gen_or_tl(dst, t1, t2);
2457 tcg_temp_free(t1);
2458 tcg_temp_free(t2);
2459 tcg_temp_free(shift);
2461 #endif
2463 #define CHECK_IU_FEATURE(dc, FEATURE) \
2464 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2465 goto illegal_insn;
2466 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2467 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2468 goto nfpu_insn;
2470 /* before an instruction, dc->pc must be static */
2471 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2473 unsigned int opc, rs1, rs2, rd;
2474 TCGv cpu_src1, cpu_src2;
2475 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2476 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2477 target_long simm;
2479 opc = GET_FIELD(insn, 0, 1);
2480 rd = GET_FIELD(insn, 2, 6);
2482 switch (opc) {
2483 case 0: /* branches/sethi */
2485 unsigned int xop = GET_FIELD(insn, 7, 9);
2486 int32_t target;
2487 switch (xop) {
2488 #ifdef TARGET_SPARC64
2489 case 0x1: /* V9 BPcc */
2491 int cc;
2493 target = GET_FIELD_SP(insn, 0, 18);
2494 target = sign_extend(target, 19);
2495 target <<= 2;
2496 cc = GET_FIELD_SP(insn, 20, 21);
2497 if (cc == 0)
2498 do_branch(dc, target, insn, 0);
2499 else if (cc == 2)
2500 do_branch(dc, target, insn, 1);
2501 else
2502 goto illegal_insn;
2503 goto jmp_insn;
2505 case 0x3: /* V9 BPr */
2507 target = GET_FIELD_SP(insn, 0, 13) |
2508 (GET_FIELD_SP(insn, 20, 21) << 14);
2509 target = sign_extend(target, 16);
2510 target <<= 2;
2511 cpu_src1 = get_src1(dc, insn);
2512 do_branch_reg(dc, target, insn, cpu_src1);
2513 goto jmp_insn;
2515 case 0x5: /* V9 FBPcc */
2517 int cc = GET_FIELD_SP(insn, 20, 21);
2518 if (gen_trap_ifnofpu(dc)) {
2519 goto jmp_insn;
2521 target = GET_FIELD_SP(insn, 0, 18);
2522 target = sign_extend(target, 19);
2523 target <<= 2;
2524 do_fbranch(dc, target, insn, cc);
2525 goto jmp_insn;
2527 #else
2528 case 0x7: /* CBN+x */
2530 goto ncp_insn;
2532 #endif
2533 case 0x2: /* BN+x */
2535 target = GET_FIELD(insn, 10, 31);
2536 target = sign_extend(target, 22);
2537 target <<= 2;
2538 do_branch(dc, target, insn, 0);
2539 goto jmp_insn;
2541 case 0x6: /* FBN+x */
2543 if (gen_trap_ifnofpu(dc)) {
2544 goto jmp_insn;
2546 target = GET_FIELD(insn, 10, 31);
2547 target = sign_extend(target, 22);
2548 target <<= 2;
2549 do_fbranch(dc, target, insn, 0);
2550 goto jmp_insn;
2552 case 0x4: /* SETHI */
2553 /* Special-case %g0 because that's the canonical nop. */
2554 if (rd) {
2555 uint32_t value = GET_FIELD(insn, 10, 31);
2556 TCGv t = gen_dest_gpr(dc, rd);
2557 tcg_gen_movi_tl(t, value << 10);
2558 gen_store_gpr(dc, rd, t);
2560 break;
2561 case 0x0: /* UNIMPL */
2562 default:
2563 goto illegal_insn;
2565 break;
2567 break;
2568 case 1: /*CALL*/
2570 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2571 TCGv o7 = gen_dest_gpr(dc, 15);
2573 tcg_gen_movi_tl(o7, dc->pc);
2574 gen_store_gpr(dc, 15, o7);
2575 target += dc->pc;
2576 gen_mov_pc_npc(dc);
2577 #ifdef TARGET_SPARC64
2578 if (unlikely(AM_CHECK(dc))) {
2579 target &= 0xffffffffULL;
2581 #endif
2582 dc->npc = target;
2584 goto jmp_insn;
2585 case 2: /* FPU & Logical Operations */
2587 unsigned int xop = GET_FIELD(insn, 7, 12);
2588 TCGv cpu_dst = get_temp_tl(dc);
2589 TCGv cpu_tmp0;
2591 if (xop == 0x3a) { /* generate trap */
2592 int cond = GET_FIELD(insn, 3, 6);
2593 TCGv_i32 trap;
2594 TCGLabel *l1 = NULL;
2595 int mask;
2597 if (cond == 0) {
2598 /* Trap never. */
2599 break;
2602 save_state(dc);
2604 if (cond != 8) {
2605 /* Conditional trap. */
2606 DisasCompare cmp;
2607 #ifdef TARGET_SPARC64
2608 /* V9 icc/xcc */
2609 int cc = GET_FIELD_SP(insn, 11, 12);
2610 if (cc == 0) {
2611 gen_compare(&cmp, 0, cond, dc);
2612 } else if (cc == 2) {
2613 gen_compare(&cmp, 1, cond, dc);
2614 } else {
2615 goto illegal_insn;
2617 #else
2618 gen_compare(&cmp, 0, cond, dc);
2619 #endif
2620 l1 = gen_new_label();
2621 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2622 cmp.c1, cmp.c2, l1);
2623 free_compare(&cmp);
2626 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2627 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2629 /* Don't use the normal temporaries, as they may well have
2630 gone out of scope with the branch above. While we're
2631 doing that we might as well pre-truncate to 32-bit. */
2632 trap = tcg_temp_new_i32();
2634 rs1 = GET_FIELD_SP(insn, 14, 18);
2635 if (IS_IMM) {
2636 rs2 = GET_FIELD_SP(insn, 0, 6);
2637 if (rs1 == 0) {
2638 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2639 /* Signal that the trap value is fully constant. */
2640 mask = 0;
2641 } else {
2642 TCGv t1 = gen_load_gpr(dc, rs1);
2643 tcg_gen_trunc_tl_i32(trap, t1);
2644 tcg_gen_addi_i32(trap, trap, rs2);
2646 } else {
2647 TCGv t1, t2;
2648 rs2 = GET_FIELD_SP(insn, 0, 4);
2649 t1 = gen_load_gpr(dc, rs1);
2650 t2 = gen_load_gpr(dc, rs2);
2651 tcg_gen_add_tl(t1, t1, t2);
2652 tcg_gen_trunc_tl_i32(trap, t1);
2654 if (mask != 0) {
2655 tcg_gen_andi_i32(trap, trap, mask);
2656 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2659 gen_helper_raise_exception(cpu_env, trap);
2660 tcg_temp_free_i32(trap);
2662 if (cond == 8) {
2663 /* An unconditional trap ends the TB. */
2664 dc->is_br = 1;
2665 goto jmp_insn;
2666 } else {
2667 /* A conditional trap falls through to the next insn. */
2668 gen_set_label(l1);
2669 break;
2671 } else if (xop == 0x28) {
2672 rs1 = GET_FIELD(insn, 13, 17);
2673 switch(rs1) {
2674 case 0: /* rdy */
2675 #ifndef TARGET_SPARC64
2676 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2677 manual, rdy on the microSPARC
2678 II */
2679 case 0x0f: /* stbar in the SPARCv8 manual,
2680 rdy on the microSPARC II */
2681 case 0x10 ... 0x1f: /* implementation-dependent in the
2682 SPARCv8 manual, rdy on the
2683 microSPARC II */
2684 /* Read Asr17 */
2685 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2686 TCGv t = gen_dest_gpr(dc, rd);
2687 /* Read Asr17 for a Leon3 monoprocessor */
2688 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2689 gen_store_gpr(dc, rd, t);
2690 break;
2692 #endif
2693 gen_store_gpr(dc, rd, cpu_y);
2694 break;
2695 #ifdef TARGET_SPARC64
2696 case 0x2: /* V9 rdccr */
2697 update_psr(dc);
2698 gen_helper_rdccr(cpu_dst, cpu_env);
2699 gen_store_gpr(dc, rd, cpu_dst);
2700 break;
2701 case 0x3: /* V9 rdasi */
2702 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2703 gen_store_gpr(dc, rd, cpu_dst);
2704 break;
2705 case 0x4: /* V9 rdtick */
2707 TCGv_ptr r_tickptr;
2708 TCGv_i32 r_const;
2710 r_tickptr = tcg_temp_new_ptr();
2711 r_const = tcg_const_i32(dc->mem_idx);
2712 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2713 offsetof(CPUSPARCState, tick));
2714 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2715 r_const);
2716 tcg_temp_free_ptr(r_tickptr);
2717 tcg_temp_free_i32(r_const);
2718 gen_store_gpr(dc, rd, cpu_dst);
2720 break;
2721 case 0x5: /* V9 rdpc */
2723 TCGv t = gen_dest_gpr(dc, rd);
2724 if (unlikely(AM_CHECK(dc))) {
2725 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2726 } else {
2727 tcg_gen_movi_tl(t, dc->pc);
2729 gen_store_gpr(dc, rd, t);
2731 break;
2732 case 0x6: /* V9 rdfprs */
2733 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2734 gen_store_gpr(dc, rd, cpu_dst);
2735 break;
2736 case 0xf: /* V9 membar */
2737 break; /* no effect */
2738 case 0x13: /* Graphics Status */
2739 if (gen_trap_ifnofpu(dc)) {
2740 goto jmp_insn;
2742 gen_store_gpr(dc, rd, cpu_gsr);
2743 break;
2744 case 0x16: /* Softint */
2745 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2746 gen_store_gpr(dc, rd, cpu_dst);
2747 break;
2748 case 0x17: /* Tick compare */
2749 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2750 break;
2751 case 0x18: /* System tick */
2753 TCGv_ptr r_tickptr;
2754 TCGv_i32 r_const;
2756 r_tickptr = tcg_temp_new_ptr();
2757 r_const = tcg_const_i32(dc->mem_idx);
2758 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2759 offsetof(CPUSPARCState, stick));
2760 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2761 r_const);
2762 tcg_temp_free_ptr(r_tickptr);
2763 tcg_temp_free_i32(r_const);
2764 gen_store_gpr(dc, rd, cpu_dst);
2766 break;
2767 case 0x19: /* System tick compare */
2768 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2769 break;
2770 case 0x10: /* Performance Control */
2771 case 0x11: /* Performance Instrumentation Counter */
2772 case 0x12: /* Dispatch Control */
2773 case 0x14: /* Softint set, WO */
2774 case 0x15: /* Softint clear, WO */
2775 #endif
2776 default:
2777 goto illegal_insn;
2779 #if !defined(CONFIG_USER_ONLY)
2780 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2781 #ifndef TARGET_SPARC64
2782 if (!supervisor(dc)) {
2783 goto priv_insn;
2785 update_psr(dc);
2786 gen_helper_rdpsr(cpu_dst, cpu_env);
2787 #else
2788 CHECK_IU_FEATURE(dc, HYPV);
2789 if (!hypervisor(dc))
2790 goto priv_insn;
2791 rs1 = GET_FIELD(insn, 13, 17);
2792 switch (rs1) {
2793 case 0: // hpstate
2794 // gen_op_rdhpstate();
2795 break;
2796 case 1: // htstate
2797 // gen_op_rdhtstate();
2798 break;
2799 case 3: // hintp
2800 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2801 break;
2802 case 5: // htba
2803 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2804 break;
2805 case 6: // hver
2806 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2807 break;
2808 case 31: // hstick_cmpr
2809 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2810 break;
2811 default:
2812 goto illegal_insn;
2814 #endif
2815 gen_store_gpr(dc, rd, cpu_dst);
2816 break;
2817 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2818 if (!supervisor(dc)) {
2819 goto priv_insn;
2821 cpu_tmp0 = get_temp_tl(dc);
2822 #ifdef TARGET_SPARC64
2823 rs1 = GET_FIELD(insn, 13, 17);
2824 switch (rs1) {
2825 case 0: // tpc
2827 TCGv_ptr r_tsptr;
2829 r_tsptr = tcg_temp_new_ptr();
2830 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2831 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2832 offsetof(trap_state, tpc));
2833 tcg_temp_free_ptr(r_tsptr);
2835 break;
2836 case 1: // tnpc
2838 TCGv_ptr r_tsptr;
2840 r_tsptr = tcg_temp_new_ptr();
2841 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2842 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2843 offsetof(trap_state, tnpc));
2844 tcg_temp_free_ptr(r_tsptr);
2846 break;
2847 case 2: // tstate
2849 TCGv_ptr r_tsptr;
2851 r_tsptr = tcg_temp_new_ptr();
2852 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2853 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2854 offsetof(trap_state, tstate));
2855 tcg_temp_free_ptr(r_tsptr);
2857 break;
2858 case 3: // tt
2860 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2862 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2863 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2864 offsetof(trap_state, tt));
2865 tcg_temp_free_ptr(r_tsptr);
2867 break;
2868 case 4: // tick
2870 TCGv_ptr r_tickptr;
2871 TCGv_i32 r_const;
2873 r_tickptr = tcg_temp_new_ptr();
2874 r_const = tcg_const_i32(dc->mem_idx);
2875 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2876 offsetof(CPUSPARCState, tick));
2877 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
2878 r_tickptr, r_const);
2879 tcg_temp_free_ptr(r_tickptr);
2880 tcg_temp_free_i32(r_const);
2882 break;
2883 case 5: // tba
2884 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2885 break;
2886 case 6: // pstate
2887 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2888 offsetof(CPUSPARCState, pstate));
2889 break;
2890 case 7: // tl
2891 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2892 offsetof(CPUSPARCState, tl));
2893 break;
2894 case 8: // pil
2895 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2896 offsetof(CPUSPARCState, psrpil));
2897 break;
2898 case 9: // cwp
2899 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2900 break;
2901 case 10: // cansave
2902 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2903 offsetof(CPUSPARCState, cansave));
2904 break;
2905 case 11: // canrestore
2906 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2907 offsetof(CPUSPARCState, canrestore));
2908 break;
2909 case 12: // cleanwin
2910 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2911 offsetof(CPUSPARCState, cleanwin));
2912 break;
2913 case 13: // otherwin
2914 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2915 offsetof(CPUSPARCState, otherwin));
2916 break;
2917 case 14: // wstate
2918 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2919 offsetof(CPUSPARCState, wstate));
2920 break;
2921 case 16: // UA2005 gl
2922 CHECK_IU_FEATURE(dc, GL);
2923 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2924 offsetof(CPUSPARCState, gl));
2925 break;
2926 case 26: // UA2005 strand status
2927 CHECK_IU_FEATURE(dc, HYPV);
2928 if (!hypervisor(dc))
2929 goto priv_insn;
2930 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2931 break;
2932 case 31: // ver
2933 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2934 break;
2935 case 15: // fq
2936 default:
2937 goto illegal_insn;
2939 #else
2940 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2941 #endif
2942 gen_store_gpr(dc, rd, cpu_tmp0);
2943 break;
2944 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2945 #ifdef TARGET_SPARC64
2946 save_state(dc);
2947 gen_helper_flushw(cpu_env);
2948 #else
2949 if (!supervisor(dc))
2950 goto priv_insn;
2951 gen_store_gpr(dc, rd, cpu_tbr);
2952 #endif
2953 break;
2954 #endif
2955 } else if (xop == 0x34) { /* FPU Operations */
2956 if (gen_trap_ifnofpu(dc)) {
2957 goto jmp_insn;
2959 gen_op_clear_ieee_excp_and_FTT();
2960 rs1 = GET_FIELD(insn, 13, 17);
2961 rs2 = GET_FIELD(insn, 27, 31);
2962 xop = GET_FIELD(insn, 18, 26);
2963 save_state(dc);
2964 switch (xop) {
2965 case 0x1: /* fmovs */
2966 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2967 gen_store_fpr_F(dc, rd, cpu_src1_32);
2968 break;
2969 case 0x5: /* fnegs */
2970 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2971 break;
2972 case 0x9: /* fabss */
2973 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2974 break;
2975 case 0x29: /* fsqrts */
2976 CHECK_FPU_FEATURE(dc, FSQRT);
2977 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2978 break;
2979 case 0x2a: /* fsqrtd */
2980 CHECK_FPU_FEATURE(dc, FSQRT);
2981 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2982 break;
2983 case 0x2b: /* fsqrtq */
2984 CHECK_FPU_FEATURE(dc, FLOAT128);
2985 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2986 break;
2987 case 0x41: /* fadds */
2988 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
2989 break;
2990 case 0x42: /* faddd */
2991 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
2992 break;
2993 case 0x43: /* faddq */
2994 CHECK_FPU_FEATURE(dc, FLOAT128);
2995 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
2996 break;
2997 case 0x45: /* fsubs */
2998 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
2999 break;
3000 case 0x46: /* fsubd */
3001 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3002 break;
3003 case 0x47: /* fsubq */
3004 CHECK_FPU_FEATURE(dc, FLOAT128);
3005 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3006 break;
3007 case 0x49: /* fmuls */
3008 CHECK_FPU_FEATURE(dc, FMUL);
3009 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3010 break;
3011 case 0x4a: /* fmuld */
3012 CHECK_FPU_FEATURE(dc, FMUL);
3013 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3014 break;
3015 case 0x4b: /* fmulq */
3016 CHECK_FPU_FEATURE(dc, FLOAT128);
3017 CHECK_FPU_FEATURE(dc, FMUL);
3018 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3019 break;
3020 case 0x4d: /* fdivs */
3021 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3022 break;
3023 case 0x4e: /* fdivd */
3024 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3025 break;
3026 case 0x4f: /* fdivq */
3027 CHECK_FPU_FEATURE(dc, FLOAT128);
3028 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3029 break;
3030 case 0x69: /* fsmuld */
3031 CHECK_FPU_FEATURE(dc, FSMULD);
3032 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3033 break;
3034 case 0x6e: /* fdmulq */
3035 CHECK_FPU_FEATURE(dc, FLOAT128);
3036 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3037 break;
3038 case 0xc4: /* fitos */
3039 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3040 break;
3041 case 0xc6: /* fdtos */
3042 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3043 break;
3044 case 0xc7: /* fqtos */
3045 CHECK_FPU_FEATURE(dc, FLOAT128);
3046 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3047 break;
3048 case 0xc8: /* fitod */
3049 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3050 break;
3051 case 0xc9: /* fstod */
3052 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3053 break;
3054 case 0xcb: /* fqtod */
3055 CHECK_FPU_FEATURE(dc, FLOAT128);
3056 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3057 break;
3058 case 0xcc: /* fitoq */
3059 CHECK_FPU_FEATURE(dc, FLOAT128);
3060 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3061 break;
3062 case 0xcd: /* fstoq */
3063 CHECK_FPU_FEATURE(dc, FLOAT128);
3064 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3065 break;
3066 case 0xce: /* fdtoq */
3067 CHECK_FPU_FEATURE(dc, FLOAT128);
3068 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3069 break;
3070 case 0xd1: /* fstoi */
3071 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3072 break;
3073 case 0xd2: /* fdtoi */
3074 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3075 break;
3076 case 0xd3: /* fqtoi */
3077 CHECK_FPU_FEATURE(dc, FLOAT128);
3078 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3079 break;
3080 #ifdef TARGET_SPARC64
3081 case 0x2: /* V9 fmovd */
3082 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3083 gen_store_fpr_D(dc, rd, cpu_src1_64);
3084 break;
3085 case 0x3: /* V9 fmovq */
3086 CHECK_FPU_FEATURE(dc, FLOAT128);
3087 gen_move_Q(rd, rs2);
3088 break;
3089 case 0x6: /* V9 fnegd */
3090 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3091 break;
3092 case 0x7: /* V9 fnegq */
3093 CHECK_FPU_FEATURE(dc, FLOAT128);
3094 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3095 break;
3096 case 0xa: /* V9 fabsd */
3097 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3098 break;
3099 case 0xb: /* V9 fabsq */
3100 CHECK_FPU_FEATURE(dc, FLOAT128);
3101 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3102 break;
3103 case 0x81: /* V9 fstox */
3104 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3105 break;
3106 case 0x82: /* V9 fdtox */
3107 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3108 break;
3109 case 0x83: /* V9 fqtox */
3110 CHECK_FPU_FEATURE(dc, FLOAT128);
3111 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3112 break;
3113 case 0x84: /* V9 fxtos */
3114 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3115 break;
3116 case 0x88: /* V9 fxtod */
3117 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3118 break;
3119 case 0x8c: /* V9 fxtoq */
3120 CHECK_FPU_FEATURE(dc, FLOAT128);
3121 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3122 break;
3123 #endif
3124 default:
3125 goto illegal_insn;
3127 } else if (xop == 0x35) { /* FPU Operations */
3128 #ifdef TARGET_SPARC64
3129 int cond;
3130 #endif
3131 if (gen_trap_ifnofpu(dc)) {
3132 goto jmp_insn;
3134 gen_op_clear_ieee_excp_and_FTT();
3135 rs1 = GET_FIELD(insn, 13, 17);
3136 rs2 = GET_FIELD(insn, 27, 31);
3137 xop = GET_FIELD(insn, 18, 26);
3138 save_state(dc);
3140 #ifdef TARGET_SPARC64
3141 #define FMOVR(sz) \
3142 do { \
3143 DisasCompare cmp; \
3144 cond = GET_FIELD_SP(insn, 10, 12); \
3145 cpu_src1 = get_src1(dc, insn); \
3146 gen_compare_reg(&cmp, cond, cpu_src1); \
3147 gen_fmov##sz(dc, &cmp, rd, rs2); \
3148 free_compare(&cmp); \
3149 } while (0)
3151 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3152 FMOVR(s);
3153 break;
3154 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3155 FMOVR(d);
3156 break;
3157 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3158 CHECK_FPU_FEATURE(dc, FLOAT128);
3159 FMOVR(q);
3160 break;
3162 #undef FMOVR
3163 #endif
3164 switch (xop) {
3165 #ifdef TARGET_SPARC64
3166 #define FMOVCC(fcc, sz) \
3167 do { \
3168 DisasCompare cmp; \
3169 cond = GET_FIELD_SP(insn, 14, 17); \
3170 gen_fcompare(&cmp, fcc, cond); \
3171 gen_fmov##sz(dc, &cmp, rd, rs2); \
3172 free_compare(&cmp); \
3173 } while (0)
3175 case 0x001: /* V9 fmovscc %fcc0 */
3176 FMOVCC(0, s);
3177 break;
3178 case 0x002: /* V9 fmovdcc %fcc0 */
3179 FMOVCC(0, d);
3180 break;
3181 case 0x003: /* V9 fmovqcc %fcc0 */
3182 CHECK_FPU_FEATURE(dc, FLOAT128);
3183 FMOVCC(0, q);
3184 break;
3185 case 0x041: /* V9 fmovscc %fcc1 */
3186 FMOVCC(1, s);
3187 break;
3188 case 0x042: /* V9 fmovdcc %fcc1 */
3189 FMOVCC(1, d);
3190 break;
3191 case 0x043: /* V9 fmovqcc %fcc1 */
3192 CHECK_FPU_FEATURE(dc, FLOAT128);
3193 FMOVCC(1, q);
3194 break;
3195 case 0x081: /* V9 fmovscc %fcc2 */
3196 FMOVCC(2, s);
3197 break;
3198 case 0x082: /* V9 fmovdcc %fcc2 */
3199 FMOVCC(2, d);
3200 break;
3201 case 0x083: /* V9 fmovqcc %fcc2 */
3202 CHECK_FPU_FEATURE(dc, FLOAT128);
3203 FMOVCC(2, q);
3204 break;
3205 case 0x0c1: /* V9 fmovscc %fcc3 */
3206 FMOVCC(3, s);
3207 break;
3208 case 0x0c2: /* V9 fmovdcc %fcc3 */
3209 FMOVCC(3, d);
3210 break;
3211 case 0x0c3: /* V9 fmovqcc %fcc3 */
3212 CHECK_FPU_FEATURE(dc, FLOAT128);
3213 FMOVCC(3, q);
3214 break;
3215 #undef FMOVCC
3216 #define FMOVCC(xcc, sz) \
3217 do { \
3218 DisasCompare cmp; \
3219 cond = GET_FIELD_SP(insn, 14, 17); \
3220 gen_compare(&cmp, xcc, cond, dc); \
3221 gen_fmov##sz(dc, &cmp, rd, rs2); \
3222 free_compare(&cmp); \
3223 } while (0)
3225 case 0x101: /* V9 fmovscc %icc */
3226 FMOVCC(0, s);
3227 break;
3228 case 0x102: /* V9 fmovdcc %icc */
3229 FMOVCC(0, d);
3230 break;
3231 case 0x103: /* V9 fmovqcc %icc */
3232 CHECK_FPU_FEATURE(dc, FLOAT128);
3233 FMOVCC(0, q);
3234 break;
3235 case 0x181: /* V9 fmovscc %xcc */
3236 FMOVCC(1, s);
3237 break;
3238 case 0x182: /* V9 fmovdcc %xcc */
3239 FMOVCC(1, d);
3240 break;
3241 case 0x183: /* V9 fmovqcc %xcc */
3242 CHECK_FPU_FEATURE(dc, FLOAT128);
3243 FMOVCC(1, q);
3244 break;
3245 #undef FMOVCC
3246 #endif
3247 case 0x51: /* fcmps, V9 %fcc */
3248 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3249 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3250 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3251 break;
3252 case 0x52: /* fcmpd, V9 %fcc */
3253 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3254 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3255 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3256 break;
3257 case 0x53: /* fcmpq, V9 %fcc */
3258 CHECK_FPU_FEATURE(dc, FLOAT128);
3259 gen_op_load_fpr_QT0(QFPREG(rs1));
3260 gen_op_load_fpr_QT1(QFPREG(rs2));
3261 gen_op_fcmpq(rd & 3);
3262 break;
3263 case 0x55: /* fcmpes, V9 %fcc */
3264 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3265 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3266 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3267 break;
3268 case 0x56: /* fcmped, V9 %fcc */
3269 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3270 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3271 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3272 break;
3273 case 0x57: /* fcmpeq, V9 %fcc */
3274 CHECK_FPU_FEATURE(dc, FLOAT128);
3275 gen_op_load_fpr_QT0(QFPREG(rs1));
3276 gen_op_load_fpr_QT1(QFPREG(rs2));
3277 gen_op_fcmpeq(rd & 3);
3278 break;
3279 default:
3280 goto illegal_insn;
3282 } else if (xop == 0x2) {
3283 TCGv dst = gen_dest_gpr(dc, rd);
3284 rs1 = GET_FIELD(insn, 13, 17);
3285 if (rs1 == 0) {
3286 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3287 if (IS_IMM) { /* immediate */
3288 simm = GET_FIELDs(insn, 19, 31);
3289 tcg_gen_movi_tl(dst, simm);
3290 gen_store_gpr(dc, rd, dst);
3291 } else { /* register */
3292 rs2 = GET_FIELD(insn, 27, 31);
3293 if (rs2 == 0) {
3294 tcg_gen_movi_tl(dst, 0);
3295 gen_store_gpr(dc, rd, dst);
3296 } else {
3297 cpu_src2 = gen_load_gpr(dc, rs2);
3298 gen_store_gpr(dc, rd, cpu_src2);
3301 } else {
3302 cpu_src1 = get_src1(dc, insn);
3303 if (IS_IMM) { /* immediate */
3304 simm = GET_FIELDs(insn, 19, 31);
3305 tcg_gen_ori_tl(dst, cpu_src1, simm);
3306 gen_store_gpr(dc, rd, dst);
3307 } else { /* register */
3308 rs2 = GET_FIELD(insn, 27, 31);
3309 if (rs2 == 0) {
3310 /* mov shortcut: or x, %g0, y -> mov x, y */
3311 gen_store_gpr(dc, rd, cpu_src1);
3312 } else {
3313 cpu_src2 = gen_load_gpr(dc, rs2);
3314 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3315 gen_store_gpr(dc, rd, dst);
3319 #ifdef TARGET_SPARC64
3320 } else if (xop == 0x25) { /* sll, V9 sllx */
3321 cpu_src1 = get_src1(dc, insn);
3322 if (IS_IMM) { /* immediate */
3323 simm = GET_FIELDs(insn, 20, 31);
3324 if (insn & (1 << 12)) {
3325 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3326 } else {
3327 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3329 } else { /* register */
3330 rs2 = GET_FIELD(insn, 27, 31);
3331 cpu_src2 = gen_load_gpr(dc, rs2);
3332 cpu_tmp0 = get_temp_tl(dc);
3333 if (insn & (1 << 12)) {
3334 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3335 } else {
3336 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3338 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3340 gen_store_gpr(dc, rd, cpu_dst);
3341 } else if (xop == 0x26) { /* srl, V9 srlx */
3342 cpu_src1 = get_src1(dc, insn);
3343 if (IS_IMM) { /* immediate */
3344 simm = GET_FIELDs(insn, 20, 31);
3345 if (insn & (1 << 12)) {
3346 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3347 } else {
3348 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3349 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3351 } else { /* register */
3352 rs2 = GET_FIELD(insn, 27, 31);
3353 cpu_src2 = gen_load_gpr(dc, rs2);
3354 cpu_tmp0 = get_temp_tl(dc);
3355 if (insn & (1 << 12)) {
3356 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3357 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3358 } else {
3359 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3360 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3361 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3364 gen_store_gpr(dc, rd, cpu_dst);
3365 } else if (xop == 0x27) { /* sra, V9 srax */
3366 cpu_src1 = get_src1(dc, insn);
3367 if (IS_IMM) { /* immediate */
3368 simm = GET_FIELDs(insn, 20, 31);
3369 if (insn & (1 << 12)) {
3370 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3371 } else {
3372 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3373 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3375 } else { /* register */
3376 rs2 = GET_FIELD(insn, 27, 31);
3377 cpu_src2 = gen_load_gpr(dc, rs2);
3378 cpu_tmp0 = get_temp_tl(dc);
3379 if (insn & (1 << 12)) {
3380 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3381 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3382 } else {
3383 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3384 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3385 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3388 gen_store_gpr(dc, rd, cpu_dst);
3389 #endif
3390 } else if (xop < 0x36) {
3391 if (xop < 0x20) {
3392 cpu_src1 = get_src1(dc, insn);
3393 cpu_src2 = get_src2(dc, insn);
3394 switch (xop & ~0x10) {
3395 case 0x0: /* add */
3396 if (xop & 0x10) {
3397 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3398 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3399 dc->cc_op = CC_OP_ADD;
3400 } else {
3401 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3403 break;
3404 case 0x1: /* and */
3405 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3406 if (xop & 0x10) {
3407 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3408 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3409 dc->cc_op = CC_OP_LOGIC;
3411 break;
3412 case 0x2: /* or */
3413 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3414 if (xop & 0x10) {
3415 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3416 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3417 dc->cc_op = CC_OP_LOGIC;
3419 break;
3420 case 0x3: /* xor */
3421 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3422 if (xop & 0x10) {
3423 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3424 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3425 dc->cc_op = CC_OP_LOGIC;
3427 break;
3428 case 0x4: /* sub */
3429 if (xop & 0x10) {
3430 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3431 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3432 dc->cc_op = CC_OP_SUB;
3433 } else {
3434 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3436 break;
3437 case 0x5: /* andn */
3438 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3439 if (xop & 0x10) {
3440 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3441 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3442 dc->cc_op = CC_OP_LOGIC;
3444 break;
3445 case 0x6: /* orn */
3446 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3447 if (xop & 0x10) {
3448 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3449 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3450 dc->cc_op = CC_OP_LOGIC;
3452 break;
3453 case 0x7: /* xorn */
3454 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3455 if (xop & 0x10) {
3456 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3457 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3458 dc->cc_op = CC_OP_LOGIC;
3460 break;
3461 case 0x8: /* addx, V9 addc */
3462 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3463 (xop & 0x10));
3464 break;
3465 #ifdef TARGET_SPARC64
3466 case 0x9: /* V9 mulx */
3467 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3468 break;
3469 #endif
3470 case 0xa: /* umul */
3471 CHECK_IU_FEATURE(dc, MUL);
3472 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3473 if (xop & 0x10) {
3474 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3475 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3476 dc->cc_op = CC_OP_LOGIC;
3478 break;
3479 case 0xb: /* smul */
3480 CHECK_IU_FEATURE(dc, MUL);
3481 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3482 if (xop & 0x10) {
3483 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3484 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3485 dc->cc_op = CC_OP_LOGIC;
3487 break;
3488 case 0xc: /* subx, V9 subc */
3489 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3490 (xop & 0x10));
3491 break;
3492 #ifdef TARGET_SPARC64
3493 case 0xd: /* V9 udivx */
3494 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3495 break;
3496 #endif
3497 case 0xe: /* udiv */
3498 CHECK_IU_FEATURE(dc, DIV);
3499 if (xop & 0x10) {
3500 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3501 cpu_src2);
3502 dc->cc_op = CC_OP_DIV;
3503 } else {
3504 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3505 cpu_src2);
3507 break;
3508 case 0xf: /* sdiv */
3509 CHECK_IU_FEATURE(dc, DIV);
3510 if (xop & 0x10) {
3511 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3512 cpu_src2);
3513 dc->cc_op = CC_OP_DIV;
3514 } else {
3515 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3516 cpu_src2);
3518 break;
3519 default:
3520 goto illegal_insn;
3522 gen_store_gpr(dc, rd, cpu_dst);
3523 } else {
3524 cpu_src1 = get_src1(dc, insn);
3525 cpu_src2 = get_src2(dc, insn);
3526 switch (xop) {
3527 case 0x20: /* taddcc */
3528 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3529 gen_store_gpr(dc, rd, cpu_dst);
3530 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3531 dc->cc_op = CC_OP_TADD;
3532 break;
3533 case 0x21: /* tsubcc */
3534 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3535 gen_store_gpr(dc, rd, cpu_dst);
3536 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3537 dc->cc_op = CC_OP_TSUB;
3538 break;
3539 case 0x22: /* taddcctv */
3540 gen_helper_taddcctv(cpu_dst, cpu_env,
3541 cpu_src1, cpu_src2);
3542 gen_store_gpr(dc, rd, cpu_dst);
3543 dc->cc_op = CC_OP_TADDTV;
3544 break;
3545 case 0x23: /* tsubcctv */
3546 gen_helper_tsubcctv(cpu_dst, cpu_env,
3547 cpu_src1, cpu_src2);
3548 gen_store_gpr(dc, rd, cpu_dst);
3549 dc->cc_op = CC_OP_TSUBTV;
3550 break;
3551 case 0x24: /* mulscc */
3552 update_psr(dc);
3553 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3554 gen_store_gpr(dc, rd, cpu_dst);
3555 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3556 dc->cc_op = CC_OP_ADD;
3557 break;
3558 #ifndef TARGET_SPARC64
3559 case 0x25: /* sll */
3560 if (IS_IMM) { /* immediate */
3561 simm = GET_FIELDs(insn, 20, 31);
3562 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3563 } else { /* register */
3564 cpu_tmp0 = get_temp_tl(dc);
3565 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3566 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3568 gen_store_gpr(dc, rd, cpu_dst);
3569 break;
3570 case 0x26: /* srl */
3571 if (IS_IMM) { /* immediate */
3572 simm = GET_FIELDs(insn, 20, 31);
3573 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3574 } else { /* register */
3575 cpu_tmp0 = get_temp_tl(dc);
3576 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3577 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3579 gen_store_gpr(dc, rd, cpu_dst);
3580 break;
3581 case 0x27: /* sra */
3582 if (IS_IMM) { /* immediate */
3583 simm = GET_FIELDs(insn, 20, 31);
3584 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3585 } else { /* register */
3586 cpu_tmp0 = get_temp_tl(dc);
3587 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3588 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3590 gen_store_gpr(dc, rd, cpu_dst);
3591 break;
3592 #endif
3593 case 0x30:
3595 cpu_tmp0 = get_temp_tl(dc);
3596 switch(rd) {
3597 case 0: /* wry */
3598 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3599 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3600 break;
3601 #ifndef TARGET_SPARC64
3602 case 0x01 ... 0x0f: /* undefined in the
3603 SPARCv8 manual, nop
3604 on the microSPARC
3605 II */
3606 case 0x10 ... 0x1f: /* implementation-dependent
3607 in the SPARCv8
3608 manual, nop on the
3609 microSPARC II */
3610 if ((rd == 0x13) && (dc->def->features &
3611 CPU_FEATURE_POWERDOWN)) {
3612 /* LEON3 power-down */
3613 save_state(dc);
3614 gen_helper_power_down(cpu_env);
3616 break;
3617 #else
3618 case 0x2: /* V9 wrccr */
3619 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3620 gen_helper_wrccr(cpu_env, cpu_tmp0);
3621 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3622 dc->cc_op = CC_OP_FLAGS;
3623 break;
3624 case 0x3: /* V9 wrasi */
3625 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3626 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
3627 tcg_gen_trunc_tl_i32(cpu_asi, cpu_tmp0);
3628 break;
3629 case 0x6: /* V9 wrfprs */
3630 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3631 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
3632 save_state(dc);
3633 gen_op_next_insn();
3634 tcg_gen_exit_tb(0);
3635 dc->is_br = 1;
3636 break;
3637 case 0xf: /* V9 sir, nop if user */
3638 #if !defined(CONFIG_USER_ONLY)
3639 if (supervisor(dc)) {
3640 ; // XXX
3642 #endif
3643 break;
3644 case 0x13: /* Graphics Status */
3645 if (gen_trap_ifnofpu(dc)) {
3646 goto jmp_insn;
3648 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3649 break;
3650 case 0x14: /* Softint set */
3651 if (!supervisor(dc))
3652 goto illegal_insn;
3653 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3654 gen_helper_set_softint(cpu_env, cpu_tmp0);
3655 break;
3656 case 0x15: /* Softint clear */
3657 if (!supervisor(dc))
3658 goto illegal_insn;
3659 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3660 gen_helper_clear_softint(cpu_env, cpu_tmp0);
3661 break;
3662 case 0x16: /* Softint write */
3663 if (!supervisor(dc))
3664 goto illegal_insn;
3665 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3666 gen_helper_write_softint(cpu_env, cpu_tmp0);
3667 break;
3668 case 0x17: /* Tick compare */
3669 #if !defined(CONFIG_USER_ONLY)
3670 if (!supervisor(dc))
3671 goto illegal_insn;
3672 #endif
3674 TCGv_ptr r_tickptr;
3676 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3677 cpu_src2);
3678 r_tickptr = tcg_temp_new_ptr();
3679 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3680 offsetof(CPUSPARCState, tick));
3681 gen_helper_tick_set_limit(r_tickptr,
3682 cpu_tick_cmpr);
3683 tcg_temp_free_ptr(r_tickptr);
3685 break;
3686 case 0x18: /* System tick */
3687 #if !defined(CONFIG_USER_ONLY)
3688 if (!supervisor(dc))
3689 goto illegal_insn;
3690 #endif
3692 TCGv_ptr r_tickptr;
3694 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
3695 cpu_src2);
3696 r_tickptr = tcg_temp_new_ptr();
3697 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3698 offsetof(CPUSPARCState, stick));
3699 gen_helper_tick_set_count(r_tickptr,
3700 cpu_tmp0);
3701 tcg_temp_free_ptr(r_tickptr);
3703 break;
3704 case 0x19: /* System tick compare */
3705 #if !defined(CONFIG_USER_ONLY)
3706 if (!supervisor(dc))
3707 goto illegal_insn;
3708 #endif
3710 TCGv_ptr r_tickptr;
3712 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3713 cpu_src2);
3714 r_tickptr = tcg_temp_new_ptr();
3715 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3716 offsetof(CPUSPARCState, stick));
3717 gen_helper_tick_set_limit(r_tickptr,
3718 cpu_stick_cmpr);
3719 tcg_temp_free_ptr(r_tickptr);
3721 break;
3723 case 0x10: /* Performance Control */
3724 case 0x11: /* Performance Instrumentation
3725 Counter */
3726 case 0x12: /* Dispatch Control */
3727 #endif
3728 default:
3729 goto illegal_insn;
3732 break;
3733 #if !defined(CONFIG_USER_ONLY)
3734 case 0x31: /* wrpsr, V9 saved, restored */
3736 if (!supervisor(dc))
3737 goto priv_insn;
3738 #ifdef TARGET_SPARC64
3739 switch (rd) {
3740 case 0:
3741 gen_helper_saved(cpu_env);
3742 break;
3743 case 1:
3744 gen_helper_restored(cpu_env);
3745 break;
3746 case 2: /* UA2005 allclean */
3747 case 3: /* UA2005 otherw */
3748 case 4: /* UA2005 normalw */
3749 case 5: /* UA2005 invalw */
3750 // XXX
3751 default:
3752 goto illegal_insn;
3754 #else
3755 cpu_tmp0 = get_temp_tl(dc);
3756 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3757 gen_helper_wrpsr(cpu_env, cpu_tmp0);
3758 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3759 dc->cc_op = CC_OP_FLAGS;
3760 save_state(dc);
3761 gen_op_next_insn();
3762 tcg_gen_exit_tb(0);
3763 dc->is_br = 1;
3764 #endif
3766 break;
3767 case 0x32: /* wrwim, V9 wrpr */
3769 if (!supervisor(dc))
3770 goto priv_insn;
3771 cpu_tmp0 = get_temp_tl(dc);
3772 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3773 #ifdef TARGET_SPARC64
3774 switch (rd) {
3775 case 0: // tpc
3777 TCGv_ptr r_tsptr;
3779 r_tsptr = tcg_temp_new_ptr();
3780 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3781 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3782 offsetof(trap_state, tpc));
3783 tcg_temp_free_ptr(r_tsptr);
3785 break;
3786 case 1: // tnpc
3788 TCGv_ptr r_tsptr;
3790 r_tsptr = tcg_temp_new_ptr();
3791 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3792 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3793 offsetof(trap_state, tnpc));
3794 tcg_temp_free_ptr(r_tsptr);
3796 break;
3797 case 2: // tstate
3799 TCGv_ptr r_tsptr;
3801 r_tsptr = tcg_temp_new_ptr();
3802 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3803 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3804 offsetof(trap_state,
3805 tstate));
3806 tcg_temp_free_ptr(r_tsptr);
3808 break;
3809 case 3: // tt
3811 TCGv_ptr r_tsptr;
3813 r_tsptr = tcg_temp_new_ptr();
3814 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3815 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
3816 offsetof(trap_state, tt));
3817 tcg_temp_free_ptr(r_tsptr);
3819 break;
3820 case 4: // tick
3822 TCGv_ptr r_tickptr;
3824 r_tickptr = tcg_temp_new_ptr();
3825 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3826 offsetof(CPUSPARCState, tick));
3827 gen_helper_tick_set_count(r_tickptr,
3828 cpu_tmp0);
3829 tcg_temp_free_ptr(r_tickptr);
3831 break;
3832 case 5: // tba
3833 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3834 break;
3835 case 6: // pstate
3836 save_state(dc);
3837 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3838 dc->npc = DYNAMIC_PC;
3839 break;
3840 case 7: // tl
3841 save_state(dc);
3842 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3843 offsetof(CPUSPARCState, tl));
3844 dc->npc = DYNAMIC_PC;
3845 break;
3846 case 8: // pil
3847 gen_helper_wrpil(cpu_env, cpu_tmp0);
3848 break;
3849 case 9: // cwp
3850 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3851 break;
3852 case 10: // cansave
3853 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3854 offsetof(CPUSPARCState,
3855 cansave));
3856 break;
3857 case 11: // canrestore
3858 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3859 offsetof(CPUSPARCState,
3860 canrestore));
3861 break;
3862 case 12: // cleanwin
3863 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3864 offsetof(CPUSPARCState,
3865 cleanwin));
3866 break;
3867 case 13: // otherwin
3868 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3869 offsetof(CPUSPARCState,
3870 otherwin));
3871 break;
3872 case 14: // wstate
3873 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3874 offsetof(CPUSPARCState,
3875 wstate));
3876 break;
3877 case 16: // UA2005 gl
3878 CHECK_IU_FEATURE(dc, GL);
3879 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3880 offsetof(CPUSPARCState, gl));
3881 break;
3882 case 26: // UA2005 strand status
3883 CHECK_IU_FEATURE(dc, HYPV);
3884 if (!hypervisor(dc))
3885 goto priv_insn;
3886 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3887 break;
3888 default:
3889 goto illegal_insn;
3891 #else
3892 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
3893 if (dc->def->nwindows != 32) {
3894 tcg_gen_andi_tl(cpu_wim, cpu_wim,
3895 (1 << dc->def->nwindows) - 1);
3897 #endif
3899 break;
3900 case 0x33: /* wrtbr, UA2005 wrhpr */
3902 #ifndef TARGET_SPARC64
3903 if (!supervisor(dc))
3904 goto priv_insn;
3905 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3906 #else
3907 CHECK_IU_FEATURE(dc, HYPV);
3908 if (!hypervisor(dc))
3909 goto priv_insn;
3910 cpu_tmp0 = get_temp_tl(dc);
3911 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3912 switch (rd) {
3913 case 0: // hpstate
3914 // XXX gen_op_wrhpstate();
3915 save_state(dc);
3916 gen_op_next_insn();
3917 tcg_gen_exit_tb(0);
3918 dc->is_br = 1;
3919 break;
3920 case 1: // htstate
3921 // XXX gen_op_wrhtstate();
3922 break;
3923 case 3: // hintp
3924 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3925 break;
3926 case 5: // htba
3927 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3928 break;
3929 case 31: // hstick_cmpr
3931 TCGv_ptr r_tickptr;
3933 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3934 r_tickptr = tcg_temp_new_ptr();
3935 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3936 offsetof(CPUSPARCState, hstick));
3937 gen_helper_tick_set_limit(r_tickptr,
3938 cpu_hstick_cmpr);
3939 tcg_temp_free_ptr(r_tickptr);
3941 break;
3942 case 6: // hver readonly
3943 default:
3944 goto illegal_insn;
3946 #endif
3948 break;
3949 #endif
3950 #ifdef TARGET_SPARC64
3951 case 0x2c: /* V9 movcc */
3953 int cc = GET_FIELD_SP(insn, 11, 12);
3954 int cond = GET_FIELD_SP(insn, 14, 17);
3955 DisasCompare cmp;
3956 TCGv dst;
3958 if (insn & (1 << 18)) {
3959 if (cc == 0) {
3960 gen_compare(&cmp, 0, cond, dc);
3961 } else if (cc == 2) {
3962 gen_compare(&cmp, 1, cond, dc);
3963 } else {
3964 goto illegal_insn;
3966 } else {
3967 gen_fcompare(&cmp, cc, cond);
3970 /* The get_src2 above loaded the normal 13-bit
3971 immediate field, not the 11-bit field we have
3972 in movcc. But it did handle the reg case. */
3973 if (IS_IMM) {
3974 simm = GET_FIELD_SPs(insn, 0, 10);
3975 tcg_gen_movi_tl(cpu_src2, simm);
3978 dst = gen_load_gpr(dc, rd);
3979 tcg_gen_movcond_tl(cmp.cond, dst,
3980 cmp.c1, cmp.c2,
3981 cpu_src2, dst);
3982 free_compare(&cmp);
3983 gen_store_gpr(dc, rd, dst);
3984 break;
3986 case 0x2d: /* V9 sdivx */
3987 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3988 gen_store_gpr(dc, rd, cpu_dst);
3989 break;
3990 case 0x2e: /* V9 popc */
3991 gen_helper_popc(cpu_dst, cpu_src2);
3992 gen_store_gpr(dc, rd, cpu_dst);
3993 break;
3994 case 0x2f: /* V9 movr */
3996 int cond = GET_FIELD_SP(insn, 10, 12);
3997 DisasCompare cmp;
3998 TCGv dst;
4000 gen_compare_reg(&cmp, cond, cpu_src1);
4002 /* The get_src2 above loaded the normal 13-bit
4003 immediate field, not the 10-bit field we have
4004 in movr. But it did handle the reg case. */
4005 if (IS_IMM) {
4006 simm = GET_FIELD_SPs(insn, 0, 9);
4007 tcg_gen_movi_tl(cpu_src2, simm);
4010 dst = gen_load_gpr(dc, rd);
4011 tcg_gen_movcond_tl(cmp.cond, dst,
4012 cmp.c1, cmp.c2,
4013 cpu_src2, dst);
4014 free_compare(&cmp);
4015 gen_store_gpr(dc, rd, dst);
4016 break;
4018 #endif
4019 default:
4020 goto illegal_insn;
4023 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4024 #ifdef TARGET_SPARC64
4025 int opf = GET_FIELD_SP(insn, 5, 13);
4026 rs1 = GET_FIELD(insn, 13, 17);
4027 rs2 = GET_FIELD(insn, 27, 31);
4028 if (gen_trap_ifnofpu(dc)) {
4029 goto jmp_insn;
4032 switch (opf) {
4033 case 0x000: /* VIS I edge8cc */
4034 CHECK_FPU_FEATURE(dc, VIS1);
4035 cpu_src1 = gen_load_gpr(dc, rs1);
4036 cpu_src2 = gen_load_gpr(dc, rs2);
4037 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4038 gen_store_gpr(dc, rd, cpu_dst);
4039 break;
4040 case 0x001: /* VIS II edge8n */
4041 CHECK_FPU_FEATURE(dc, VIS2);
4042 cpu_src1 = gen_load_gpr(dc, rs1);
4043 cpu_src2 = gen_load_gpr(dc, rs2);
4044 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4045 gen_store_gpr(dc, rd, cpu_dst);
4046 break;
4047 case 0x002: /* VIS I edge8lcc */
4048 CHECK_FPU_FEATURE(dc, VIS1);
4049 cpu_src1 = gen_load_gpr(dc, rs1);
4050 cpu_src2 = gen_load_gpr(dc, rs2);
4051 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4052 gen_store_gpr(dc, rd, cpu_dst);
4053 break;
4054 case 0x003: /* VIS II edge8ln */
4055 CHECK_FPU_FEATURE(dc, VIS2);
4056 cpu_src1 = gen_load_gpr(dc, rs1);
4057 cpu_src2 = gen_load_gpr(dc, rs2);
4058 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4059 gen_store_gpr(dc, rd, cpu_dst);
4060 break;
4061 case 0x004: /* VIS I edge16cc */
4062 CHECK_FPU_FEATURE(dc, VIS1);
4063 cpu_src1 = gen_load_gpr(dc, rs1);
4064 cpu_src2 = gen_load_gpr(dc, rs2);
4065 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4066 gen_store_gpr(dc, rd, cpu_dst);
4067 break;
4068 case 0x005: /* VIS II edge16n */
4069 CHECK_FPU_FEATURE(dc, VIS2);
4070 cpu_src1 = gen_load_gpr(dc, rs1);
4071 cpu_src2 = gen_load_gpr(dc, rs2);
4072 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4073 gen_store_gpr(dc, rd, cpu_dst);
4074 break;
4075 case 0x006: /* VIS I edge16lcc */
4076 CHECK_FPU_FEATURE(dc, VIS1);
4077 cpu_src1 = gen_load_gpr(dc, rs1);
4078 cpu_src2 = gen_load_gpr(dc, rs2);
4079 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4080 gen_store_gpr(dc, rd, cpu_dst);
4081 break;
4082 case 0x007: /* VIS II edge16ln */
4083 CHECK_FPU_FEATURE(dc, VIS2);
4084 cpu_src1 = gen_load_gpr(dc, rs1);
4085 cpu_src2 = gen_load_gpr(dc, rs2);
4086 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4087 gen_store_gpr(dc, rd, cpu_dst);
4088 break;
4089 case 0x008: /* VIS I edge32cc */
4090 CHECK_FPU_FEATURE(dc, VIS1);
4091 cpu_src1 = gen_load_gpr(dc, rs1);
4092 cpu_src2 = gen_load_gpr(dc, rs2);
4093 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4094 gen_store_gpr(dc, rd, cpu_dst);
4095 break;
4096 case 0x009: /* VIS II edge32n */
4097 CHECK_FPU_FEATURE(dc, VIS2);
4098 cpu_src1 = gen_load_gpr(dc, rs1);
4099 cpu_src2 = gen_load_gpr(dc, rs2);
4100 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4101 gen_store_gpr(dc, rd, cpu_dst);
4102 break;
4103 case 0x00a: /* VIS I edge32lcc */
4104 CHECK_FPU_FEATURE(dc, VIS1);
4105 cpu_src1 = gen_load_gpr(dc, rs1);
4106 cpu_src2 = gen_load_gpr(dc, rs2);
4107 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4108 gen_store_gpr(dc, rd, cpu_dst);
4109 break;
4110 case 0x00b: /* VIS II edge32ln */
4111 CHECK_FPU_FEATURE(dc, VIS2);
4112 cpu_src1 = gen_load_gpr(dc, rs1);
4113 cpu_src2 = gen_load_gpr(dc, rs2);
4114 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4115 gen_store_gpr(dc, rd, cpu_dst);
4116 break;
4117 case 0x010: /* VIS I array8 */
4118 CHECK_FPU_FEATURE(dc, VIS1);
4119 cpu_src1 = gen_load_gpr(dc, rs1);
4120 cpu_src2 = gen_load_gpr(dc, rs2);
4121 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4122 gen_store_gpr(dc, rd, cpu_dst);
4123 break;
4124 case 0x012: /* VIS I array16 */
4125 CHECK_FPU_FEATURE(dc, VIS1);
4126 cpu_src1 = gen_load_gpr(dc, rs1);
4127 cpu_src2 = gen_load_gpr(dc, rs2);
4128 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4129 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4130 gen_store_gpr(dc, rd, cpu_dst);
4131 break;
4132 case 0x014: /* VIS I array32 */
4133 CHECK_FPU_FEATURE(dc, VIS1);
4134 cpu_src1 = gen_load_gpr(dc, rs1);
4135 cpu_src2 = gen_load_gpr(dc, rs2);
4136 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4137 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4138 gen_store_gpr(dc, rd, cpu_dst);
4139 break;
4140 case 0x018: /* VIS I alignaddr */
4141 CHECK_FPU_FEATURE(dc, VIS1);
4142 cpu_src1 = gen_load_gpr(dc, rs1);
4143 cpu_src2 = gen_load_gpr(dc, rs2);
4144 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4145 gen_store_gpr(dc, rd, cpu_dst);
4146 break;
4147 case 0x01a: /* VIS I alignaddrl */
4148 CHECK_FPU_FEATURE(dc, VIS1);
4149 cpu_src1 = gen_load_gpr(dc, rs1);
4150 cpu_src2 = gen_load_gpr(dc, rs2);
4151 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4152 gen_store_gpr(dc, rd, cpu_dst);
4153 break;
4154 case 0x019: /* VIS II bmask */
4155 CHECK_FPU_FEATURE(dc, VIS2);
4156 cpu_src1 = gen_load_gpr(dc, rs1);
4157 cpu_src2 = gen_load_gpr(dc, rs2);
4158 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4159 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4160 gen_store_gpr(dc, rd, cpu_dst);
4161 break;
4162 case 0x020: /* VIS I fcmple16 */
4163 CHECK_FPU_FEATURE(dc, VIS1);
4164 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4165 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4166 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4167 gen_store_gpr(dc, rd, cpu_dst);
4168 break;
4169 case 0x022: /* VIS I fcmpne16 */
4170 CHECK_FPU_FEATURE(dc, VIS1);
4171 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4172 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4173 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4174 gen_store_gpr(dc, rd, cpu_dst);
4175 break;
4176 case 0x024: /* VIS I fcmple32 */
4177 CHECK_FPU_FEATURE(dc, VIS1);
4178 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4179 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4180 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4181 gen_store_gpr(dc, rd, cpu_dst);
4182 break;
4183 case 0x026: /* VIS I fcmpne32 */
4184 CHECK_FPU_FEATURE(dc, VIS1);
4185 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4186 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4187 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4188 gen_store_gpr(dc, rd, cpu_dst);
4189 break;
4190 case 0x028: /* VIS I fcmpgt16 */
4191 CHECK_FPU_FEATURE(dc, VIS1);
4192 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4193 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4194 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4195 gen_store_gpr(dc, rd, cpu_dst);
4196 break;
4197 case 0x02a: /* VIS I fcmpeq16 */
4198 CHECK_FPU_FEATURE(dc, VIS1);
4199 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4200 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4201 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4202 gen_store_gpr(dc, rd, cpu_dst);
4203 break;
4204 case 0x02c: /* VIS I fcmpgt32 */
4205 CHECK_FPU_FEATURE(dc, VIS1);
4206 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4207 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4208 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4209 gen_store_gpr(dc, rd, cpu_dst);
4210 break;
4211 case 0x02e: /* VIS I fcmpeq32 */
4212 CHECK_FPU_FEATURE(dc, VIS1);
4213 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4214 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4215 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4216 gen_store_gpr(dc, rd, cpu_dst);
4217 break;
4218 case 0x031: /* VIS I fmul8x16 */
4219 CHECK_FPU_FEATURE(dc, VIS1);
4220 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4221 break;
4222 case 0x033: /* VIS I fmul8x16au */
4223 CHECK_FPU_FEATURE(dc, VIS1);
4224 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4225 break;
4226 case 0x035: /* VIS I fmul8x16al */
4227 CHECK_FPU_FEATURE(dc, VIS1);
4228 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4229 break;
4230 case 0x036: /* VIS I fmul8sux16 */
4231 CHECK_FPU_FEATURE(dc, VIS1);
4232 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4233 break;
4234 case 0x037: /* VIS I fmul8ulx16 */
4235 CHECK_FPU_FEATURE(dc, VIS1);
4236 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4237 break;
4238 case 0x038: /* VIS I fmuld8sux16 */
4239 CHECK_FPU_FEATURE(dc, VIS1);
4240 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4241 break;
4242 case 0x039: /* VIS I fmuld8ulx16 */
4243 CHECK_FPU_FEATURE(dc, VIS1);
4244 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4245 break;
4246 case 0x03a: /* VIS I fpack32 */
4247 CHECK_FPU_FEATURE(dc, VIS1);
4248 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4249 break;
4250 case 0x03b: /* VIS I fpack16 */
4251 CHECK_FPU_FEATURE(dc, VIS1);
4252 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4253 cpu_dst_32 = gen_dest_fpr_F(dc);
4254 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4255 gen_store_fpr_F(dc, rd, cpu_dst_32);
4256 break;
4257 case 0x03d: /* VIS I fpackfix */
4258 CHECK_FPU_FEATURE(dc, VIS1);
4259 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4260 cpu_dst_32 = gen_dest_fpr_F(dc);
4261 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4262 gen_store_fpr_F(dc, rd, cpu_dst_32);
4263 break;
4264 case 0x03e: /* VIS I pdist */
4265 CHECK_FPU_FEATURE(dc, VIS1);
4266 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4267 break;
4268 case 0x048: /* VIS I faligndata */
4269 CHECK_FPU_FEATURE(dc, VIS1);
4270 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4271 break;
4272 case 0x04b: /* VIS I fpmerge */
4273 CHECK_FPU_FEATURE(dc, VIS1);
4274 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4275 break;
4276 case 0x04c: /* VIS II bshuffle */
4277 CHECK_FPU_FEATURE(dc, VIS2);
4278 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4279 break;
4280 case 0x04d: /* VIS I fexpand */
4281 CHECK_FPU_FEATURE(dc, VIS1);
4282 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4283 break;
4284 case 0x050: /* VIS I fpadd16 */
4285 CHECK_FPU_FEATURE(dc, VIS1);
4286 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4287 break;
4288 case 0x051: /* VIS I fpadd16s */
4289 CHECK_FPU_FEATURE(dc, VIS1);
4290 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4291 break;
4292 case 0x052: /* VIS I fpadd32 */
4293 CHECK_FPU_FEATURE(dc, VIS1);
4294 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4295 break;
4296 case 0x053: /* VIS I fpadd32s */
4297 CHECK_FPU_FEATURE(dc, VIS1);
4298 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4299 break;
4300 case 0x054: /* VIS I fpsub16 */
4301 CHECK_FPU_FEATURE(dc, VIS1);
4302 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4303 break;
4304 case 0x055: /* VIS I fpsub16s */
4305 CHECK_FPU_FEATURE(dc, VIS1);
4306 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4307 break;
4308 case 0x056: /* VIS I fpsub32 */
4309 CHECK_FPU_FEATURE(dc, VIS1);
4310 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4311 break;
4312 case 0x057: /* VIS I fpsub32s */
4313 CHECK_FPU_FEATURE(dc, VIS1);
4314 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4315 break;
4316 case 0x060: /* VIS I fzero */
4317 CHECK_FPU_FEATURE(dc, VIS1);
4318 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4319 tcg_gen_movi_i64(cpu_dst_64, 0);
4320 gen_store_fpr_D(dc, rd, cpu_dst_64);
4321 break;
4322 case 0x061: /* VIS I fzeros */
4323 CHECK_FPU_FEATURE(dc, VIS1);
4324 cpu_dst_32 = gen_dest_fpr_F(dc);
4325 tcg_gen_movi_i32(cpu_dst_32, 0);
4326 gen_store_fpr_F(dc, rd, cpu_dst_32);
4327 break;
4328 case 0x062: /* VIS I fnor */
4329 CHECK_FPU_FEATURE(dc, VIS1);
4330 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4331 break;
4332 case 0x063: /* VIS I fnors */
4333 CHECK_FPU_FEATURE(dc, VIS1);
4334 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4335 break;
4336 case 0x064: /* VIS I fandnot2 */
4337 CHECK_FPU_FEATURE(dc, VIS1);
4338 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4339 break;
4340 case 0x065: /* VIS I fandnot2s */
4341 CHECK_FPU_FEATURE(dc, VIS1);
4342 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4343 break;
4344 case 0x066: /* VIS I fnot2 */
4345 CHECK_FPU_FEATURE(dc, VIS1);
4346 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4347 break;
4348 case 0x067: /* VIS I fnot2s */
4349 CHECK_FPU_FEATURE(dc, VIS1);
4350 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4351 break;
4352 case 0x068: /* VIS I fandnot1 */
4353 CHECK_FPU_FEATURE(dc, VIS1);
4354 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4355 break;
4356 case 0x069: /* VIS I fandnot1s */
4357 CHECK_FPU_FEATURE(dc, VIS1);
4358 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4359 break;
4360 case 0x06a: /* VIS I fnot1 */
4361 CHECK_FPU_FEATURE(dc, VIS1);
4362 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4363 break;
4364 case 0x06b: /* VIS I fnot1s */
4365 CHECK_FPU_FEATURE(dc, VIS1);
4366 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4367 break;
4368 case 0x06c: /* VIS I fxor */
4369 CHECK_FPU_FEATURE(dc, VIS1);
4370 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4371 break;
4372 case 0x06d: /* VIS I fxors */
4373 CHECK_FPU_FEATURE(dc, VIS1);
4374 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4375 break;
4376 case 0x06e: /* VIS I fnand */
4377 CHECK_FPU_FEATURE(dc, VIS1);
4378 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4379 break;
4380 case 0x06f: /* VIS I fnands */
4381 CHECK_FPU_FEATURE(dc, VIS1);
4382 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4383 break;
4384 case 0x070: /* VIS I fand */
4385 CHECK_FPU_FEATURE(dc, VIS1);
4386 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4387 break;
4388 case 0x071: /* VIS I fands */
4389 CHECK_FPU_FEATURE(dc, VIS1);
4390 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4391 break;
4392 case 0x072: /* VIS I fxnor */
4393 CHECK_FPU_FEATURE(dc, VIS1);
4394 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4395 break;
4396 case 0x073: /* VIS I fxnors */
4397 CHECK_FPU_FEATURE(dc, VIS1);
4398 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4399 break;
4400 case 0x074: /* VIS I fsrc1 */
4401 CHECK_FPU_FEATURE(dc, VIS1);
4402 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4403 gen_store_fpr_D(dc, rd, cpu_src1_64);
4404 break;
4405 case 0x075: /* VIS I fsrc1s */
4406 CHECK_FPU_FEATURE(dc, VIS1);
4407 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4408 gen_store_fpr_F(dc, rd, cpu_src1_32);
4409 break;
4410 case 0x076: /* VIS I fornot2 */
4411 CHECK_FPU_FEATURE(dc, VIS1);
4412 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4413 break;
4414 case 0x077: /* VIS I fornot2s */
4415 CHECK_FPU_FEATURE(dc, VIS1);
4416 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4417 break;
4418 case 0x078: /* VIS I fsrc2 */
4419 CHECK_FPU_FEATURE(dc, VIS1);
4420 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4421 gen_store_fpr_D(dc, rd, cpu_src1_64);
4422 break;
4423 case 0x079: /* VIS I fsrc2s */
4424 CHECK_FPU_FEATURE(dc, VIS1);
4425 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4426 gen_store_fpr_F(dc, rd, cpu_src1_32);
4427 break;
4428 case 0x07a: /* VIS I fornot1 */
4429 CHECK_FPU_FEATURE(dc, VIS1);
4430 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4431 break;
4432 case 0x07b: /* VIS I fornot1s */
4433 CHECK_FPU_FEATURE(dc, VIS1);
4434 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4435 break;
4436 case 0x07c: /* VIS I for */
4437 CHECK_FPU_FEATURE(dc, VIS1);
4438 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4439 break;
4440 case 0x07d: /* VIS I fors */
4441 CHECK_FPU_FEATURE(dc, VIS1);
4442 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4443 break;
4444 case 0x07e: /* VIS I fone */
4445 CHECK_FPU_FEATURE(dc, VIS1);
4446 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4447 tcg_gen_movi_i64(cpu_dst_64, -1);
4448 gen_store_fpr_D(dc, rd, cpu_dst_64);
4449 break;
4450 case 0x07f: /* VIS I fones */
4451 CHECK_FPU_FEATURE(dc, VIS1);
4452 cpu_dst_32 = gen_dest_fpr_F(dc);
4453 tcg_gen_movi_i32(cpu_dst_32, -1);
4454 gen_store_fpr_F(dc, rd, cpu_dst_32);
4455 break;
4456 case 0x080: /* VIS I shutdown */
4457 case 0x081: /* VIS II siam */
4458 // XXX
4459 goto illegal_insn;
4460 default:
4461 goto illegal_insn;
4463 #else
4464 goto ncp_insn;
4465 #endif
4466 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4467 #ifdef TARGET_SPARC64
4468 goto illegal_insn;
4469 #else
4470 goto ncp_insn;
4471 #endif
4472 #ifdef TARGET_SPARC64
4473 } else if (xop == 0x39) { /* V9 return */
4474 TCGv_i32 r_const;
4476 save_state(dc);
4477 cpu_src1 = get_src1(dc, insn);
4478 cpu_tmp0 = get_temp_tl(dc);
4479 if (IS_IMM) { /* immediate */
4480 simm = GET_FIELDs(insn, 19, 31);
4481 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4482 } else { /* register */
4483 rs2 = GET_FIELD(insn, 27, 31);
4484 if (rs2) {
4485 cpu_src2 = gen_load_gpr(dc, rs2);
4486 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4487 } else {
4488 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4491 gen_helper_restore(cpu_env);
4492 gen_mov_pc_npc(dc);
4493 r_const = tcg_const_i32(3);
4494 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4495 tcg_temp_free_i32(r_const);
4496 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4497 dc->npc = DYNAMIC_PC;
4498 goto jmp_insn;
4499 #endif
4500 } else {
4501 cpu_src1 = get_src1(dc, insn);
4502 cpu_tmp0 = get_temp_tl(dc);
4503 if (IS_IMM) { /* immediate */
4504 simm = GET_FIELDs(insn, 19, 31);
4505 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4506 } else { /* register */
4507 rs2 = GET_FIELD(insn, 27, 31);
4508 if (rs2) {
4509 cpu_src2 = gen_load_gpr(dc, rs2);
4510 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4511 } else {
4512 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4515 switch (xop) {
4516 case 0x38: /* jmpl */
4518 TCGv t;
4519 TCGv_i32 r_const;
4521 t = gen_dest_gpr(dc, rd);
4522 tcg_gen_movi_tl(t, dc->pc);
4523 gen_store_gpr(dc, rd, t);
4524 gen_mov_pc_npc(dc);
4525 r_const = tcg_const_i32(3);
4526 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4527 tcg_temp_free_i32(r_const);
4528 gen_address_mask(dc, cpu_tmp0);
4529 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4530 dc->npc = DYNAMIC_PC;
4532 goto jmp_insn;
4533 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4534 case 0x39: /* rett, V9 return */
4536 TCGv_i32 r_const;
4538 if (!supervisor(dc))
4539 goto priv_insn;
4540 gen_mov_pc_npc(dc);
4541 r_const = tcg_const_i32(3);
4542 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4543 tcg_temp_free_i32(r_const);
4544 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4545 dc->npc = DYNAMIC_PC;
4546 gen_helper_rett(cpu_env);
4548 goto jmp_insn;
4549 #endif
4550 case 0x3b: /* flush */
4551 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4552 goto unimp_flush;
4553 /* nop */
4554 break;
4555 case 0x3c: /* save */
4556 save_state(dc);
4557 gen_helper_save(cpu_env);
4558 gen_store_gpr(dc, rd, cpu_tmp0);
4559 break;
4560 case 0x3d: /* restore */
4561 save_state(dc);
4562 gen_helper_restore(cpu_env);
4563 gen_store_gpr(dc, rd, cpu_tmp0);
4564 break;
4565 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4566 case 0x3e: /* V9 done/retry */
4568 switch (rd) {
4569 case 0:
4570 if (!supervisor(dc))
4571 goto priv_insn;
4572 dc->npc = DYNAMIC_PC;
4573 dc->pc = DYNAMIC_PC;
4574 gen_helper_done(cpu_env);
4575 goto jmp_insn;
4576 case 1:
4577 if (!supervisor(dc))
4578 goto priv_insn;
4579 dc->npc = DYNAMIC_PC;
4580 dc->pc = DYNAMIC_PC;
4581 gen_helper_retry(cpu_env);
4582 goto jmp_insn;
4583 default:
4584 goto illegal_insn;
4587 break;
4588 #endif
4589 default:
4590 goto illegal_insn;
4593 break;
4595 break;
4596 case 3: /* load/store instructions */
4598 unsigned int xop = GET_FIELD(insn, 7, 12);
4599 /* ??? gen_address_mask prevents us from using a source
4600 register directly. Always generate a temporary. */
4601 TCGv cpu_addr = get_temp_tl(dc);
4603 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
4604 if (xop == 0x3c || xop == 0x3e) {
4605 /* V9 casa/casxa : no offset */
4606 } else if (IS_IMM) { /* immediate */
4607 simm = GET_FIELDs(insn, 19, 31);
4608 if (simm != 0) {
4609 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
4611 } else { /* register */
4612 rs2 = GET_FIELD(insn, 27, 31);
4613 if (rs2 != 0) {
4614 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4617 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4618 (xop > 0x17 && xop <= 0x1d ) ||
4619 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4620 TCGv cpu_val = gen_dest_gpr(dc, rd);
4622 switch (xop) {
4623 case 0x0: /* ld, V9 lduw, load unsigned word */
4624 gen_address_mask(dc, cpu_addr);
4625 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4626 break;
4627 case 0x1: /* ldub, load unsigned byte */
4628 gen_address_mask(dc, cpu_addr);
4629 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4630 break;
4631 case 0x2: /* lduh, load unsigned halfword */
4632 gen_address_mask(dc, cpu_addr);
4633 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4634 break;
4635 case 0x3: /* ldd, load double word */
4636 if (rd & 1)
4637 goto illegal_insn;
4638 else {
4639 TCGv_i32 r_const;
4640 TCGv_i64 t64;
4642 save_state(dc);
4643 r_const = tcg_const_i32(7);
4644 /* XXX remove alignment check */
4645 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4646 tcg_temp_free_i32(r_const);
4647 gen_address_mask(dc, cpu_addr);
4648 t64 = tcg_temp_new_i64();
4649 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4650 tcg_gen_trunc_i64_tl(cpu_val, t64);
4651 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4652 gen_store_gpr(dc, rd + 1, cpu_val);
4653 tcg_gen_shri_i64(t64, t64, 32);
4654 tcg_gen_trunc_i64_tl(cpu_val, t64);
4655 tcg_temp_free_i64(t64);
4656 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4658 break;
4659 case 0x9: /* ldsb, load signed byte */
4660 gen_address_mask(dc, cpu_addr);
4661 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4662 break;
4663 case 0xa: /* ldsh, load signed halfword */
4664 gen_address_mask(dc, cpu_addr);
4665 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4666 break;
4667 case 0xd: /* ldstub -- XXX: should be atomically */
4669 TCGv r_const;
4671 gen_address_mask(dc, cpu_addr);
4672 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4673 r_const = tcg_const_tl(0xff);
4674 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4675 tcg_temp_free(r_const);
4677 break;
4678 case 0x0f:
4679 /* swap, swap register with memory. Also atomically */
4681 TCGv t0 = get_temp_tl(dc);
4682 CHECK_IU_FEATURE(dc, SWAP);
4683 cpu_src1 = gen_load_gpr(dc, rd);
4684 gen_address_mask(dc, cpu_addr);
4685 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4686 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4687 tcg_gen_mov_tl(cpu_val, t0);
4689 break;
4690 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4691 case 0x10: /* lda, V9 lduwa, load word alternate */
4692 #ifndef TARGET_SPARC64
4693 if (IS_IMM)
4694 goto illegal_insn;
4695 if (!supervisor(dc))
4696 goto priv_insn;
4697 #endif
4698 save_state(dc);
4699 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4700 break;
4701 case 0x11: /* lduba, load unsigned byte alternate */
4702 #ifndef TARGET_SPARC64
4703 if (IS_IMM)
4704 goto illegal_insn;
4705 if (!supervisor(dc))
4706 goto priv_insn;
4707 #endif
4708 save_state(dc);
4709 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4710 break;
4711 case 0x12: /* lduha, load unsigned halfword alternate */
4712 #ifndef TARGET_SPARC64
4713 if (IS_IMM)
4714 goto illegal_insn;
4715 if (!supervisor(dc))
4716 goto priv_insn;
4717 #endif
4718 save_state(dc);
4719 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4720 break;
4721 case 0x13: /* ldda, load double word alternate */
4722 #ifndef TARGET_SPARC64
4723 if (IS_IMM)
4724 goto illegal_insn;
4725 if (!supervisor(dc))
4726 goto priv_insn;
4727 #endif
4728 if (rd & 1)
4729 goto illegal_insn;
4730 save_state(dc);
4731 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4732 goto skip_move;
4733 case 0x19: /* ldsba, load signed byte alternate */
4734 #ifndef TARGET_SPARC64
4735 if (IS_IMM)
4736 goto illegal_insn;
4737 if (!supervisor(dc))
4738 goto priv_insn;
4739 #endif
4740 save_state(dc);
4741 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4742 break;
4743 case 0x1a: /* ldsha, load signed halfword alternate */
4744 #ifndef TARGET_SPARC64
4745 if (IS_IMM)
4746 goto illegal_insn;
4747 if (!supervisor(dc))
4748 goto priv_insn;
4749 #endif
4750 save_state(dc);
4751 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4752 break;
4753 case 0x1d: /* ldstuba -- XXX: should be atomically */
4754 #ifndef TARGET_SPARC64
4755 if (IS_IMM)
4756 goto illegal_insn;
4757 if (!supervisor(dc))
4758 goto priv_insn;
4759 #endif
4760 save_state(dc);
4761 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4762 break;
4763 case 0x1f: /* swapa, swap reg with alt. memory. Also
4764 atomically */
4765 CHECK_IU_FEATURE(dc, SWAP);
4766 #ifndef TARGET_SPARC64
4767 if (IS_IMM)
4768 goto illegal_insn;
4769 if (!supervisor(dc))
4770 goto priv_insn;
4771 #endif
4772 save_state(dc);
4773 cpu_src1 = gen_load_gpr(dc, rd);
4774 gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
4775 break;
4777 #ifndef TARGET_SPARC64
4778 case 0x30: /* ldc */
4779 case 0x31: /* ldcsr */
4780 case 0x33: /* lddc */
4781 goto ncp_insn;
4782 #endif
4783 #endif
4784 #ifdef TARGET_SPARC64
4785 case 0x08: /* V9 ldsw */
4786 gen_address_mask(dc, cpu_addr);
4787 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4788 break;
4789 case 0x0b: /* V9 ldx */
4790 gen_address_mask(dc, cpu_addr);
4791 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4792 break;
4793 case 0x18: /* V9 ldswa */
4794 save_state(dc);
4795 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4796 break;
4797 case 0x1b: /* V9 ldxa */
4798 save_state(dc);
4799 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4800 break;
4801 case 0x2d: /* V9 prefetch, no effect */
4802 goto skip_move;
4803 case 0x30: /* V9 ldfa */
4804 if (gen_trap_ifnofpu(dc)) {
4805 goto jmp_insn;
4807 save_state(dc);
4808 gen_ldf_asi(cpu_addr, insn, 4, rd);
4809 gen_update_fprs_dirty(rd);
4810 goto skip_move;
4811 case 0x33: /* V9 lddfa */
4812 if (gen_trap_ifnofpu(dc)) {
4813 goto jmp_insn;
4815 save_state(dc);
4816 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4817 gen_update_fprs_dirty(DFPREG(rd));
4818 goto skip_move;
4819 case 0x3d: /* V9 prefetcha, no effect */
4820 goto skip_move;
4821 case 0x32: /* V9 ldqfa */
4822 CHECK_FPU_FEATURE(dc, FLOAT128);
4823 if (gen_trap_ifnofpu(dc)) {
4824 goto jmp_insn;
4826 save_state(dc);
4827 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4828 gen_update_fprs_dirty(QFPREG(rd));
4829 goto skip_move;
4830 #endif
4831 default:
4832 goto illegal_insn;
4834 gen_store_gpr(dc, rd, cpu_val);
4835 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4836 skip_move: ;
4837 #endif
4838 } else if (xop >= 0x20 && xop < 0x24) {
4839 TCGv t0;
4841 if (gen_trap_ifnofpu(dc)) {
4842 goto jmp_insn;
4844 save_state(dc);
4845 switch (xop) {
4846 case 0x20: /* ldf, load fpreg */
4847 gen_address_mask(dc, cpu_addr);
4848 t0 = get_temp_tl(dc);
4849 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4850 cpu_dst_32 = gen_dest_fpr_F(dc);
4851 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4852 gen_store_fpr_F(dc, rd, cpu_dst_32);
4853 break;
4854 case 0x21: /* ldfsr, V9 ldxfsr */
4855 #ifdef TARGET_SPARC64
4856 gen_address_mask(dc, cpu_addr);
4857 if (rd == 1) {
4858 TCGv_i64 t64 = tcg_temp_new_i64();
4859 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4860 gen_helper_ldxfsr(cpu_env, t64);
4861 tcg_temp_free_i64(t64);
4862 break;
4864 #endif
4865 cpu_dst_32 = get_temp_i32(dc);
4866 t0 = get_temp_tl(dc);
4867 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4868 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4869 gen_helper_ldfsr(cpu_env, cpu_dst_32);
4870 break;
4871 case 0x22: /* ldqf, load quad fpreg */
4873 TCGv_i32 r_const;
4875 CHECK_FPU_FEATURE(dc, FLOAT128);
4876 r_const = tcg_const_i32(dc->mem_idx);
4877 gen_address_mask(dc, cpu_addr);
4878 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4879 tcg_temp_free_i32(r_const);
4880 gen_op_store_QT0_fpr(QFPREG(rd));
4881 gen_update_fprs_dirty(QFPREG(rd));
4883 break;
4884 case 0x23: /* lddf, load double fpreg */
4885 gen_address_mask(dc, cpu_addr);
4886 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4887 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4888 gen_store_fpr_D(dc, rd, cpu_dst_64);
4889 break;
4890 default:
4891 goto illegal_insn;
4893 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4894 xop == 0xe || xop == 0x1e) {
4895 TCGv cpu_val = gen_load_gpr(dc, rd);
4897 switch (xop) {
4898 case 0x4: /* st, store word */
4899 gen_address_mask(dc, cpu_addr);
4900 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4901 break;
4902 case 0x5: /* stb, store byte */
4903 gen_address_mask(dc, cpu_addr);
4904 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4905 break;
4906 case 0x6: /* sth, store halfword */
4907 gen_address_mask(dc, cpu_addr);
4908 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4909 break;
4910 case 0x7: /* std, store double word */
4911 if (rd & 1)
4912 goto illegal_insn;
4913 else {
4914 TCGv_i32 r_const;
4915 TCGv_i64 t64;
4916 TCGv lo;
4918 save_state(dc);
4919 gen_address_mask(dc, cpu_addr);
4920 r_const = tcg_const_i32(7);
4921 /* XXX remove alignment check */
4922 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4923 tcg_temp_free_i32(r_const);
4924 lo = gen_load_gpr(dc, rd + 1);
4926 t64 = tcg_temp_new_i64();
4927 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
4928 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
4929 tcg_temp_free_i64(t64);
4931 break;
4932 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4933 case 0x14: /* sta, V9 stwa, store word alternate */
4934 #ifndef TARGET_SPARC64
4935 if (IS_IMM)
4936 goto illegal_insn;
4937 if (!supervisor(dc))
4938 goto priv_insn;
4939 #endif
4940 save_state(dc);
4941 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4942 dc->npc = DYNAMIC_PC;
4943 break;
4944 case 0x15: /* stba, store byte alternate */
4945 #ifndef TARGET_SPARC64
4946 if (IS_IMM)
4947 goto illegal_insn;
4948 if (!supervisor(dc))
4949 goto priv_insn;
4950 #endif
4951 save_state(dc);
4952 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4953 dc->npc = DYNAMIC_PC;
4954 break;
4955 case 0x16: /* stha, store halfword alternate */
4956 #ifndef TARGET_SPARC64
4957 if (IS_IMM)
4958 goto illegal_insn;
4959 if (!supervisor(dc))
4960 goto priv_insn;
4961 #endif
4962 save_state(dc);
4963 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4964 dc->npc = DYNAMIC_PC;
4965 break;
4966 case 0x17: /* stda, store double word alternate */
4967 #ifndef TARGET_SPARC64
4968 if (IS_IMM)
4969 goto illegal_insn;
4970 if (!supervisor(dc))
4971 goto priv_insn;
4972 #endif
4973 if (rd & 1)
4974 goto illegal_insn;
4975 else {
4976 save_state(dc);
4977 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4979 break;
4980 #endif
4981 #ifdef TARGET_SPARC64
4982 case 0x0e: /* V9 stx */
4983 gen_address_mask(dc, cpu_addr);
4984 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4985 break;
4986 case 0x1e: /* V9 stxa */
4987 save_state(dc);
4988 gen_st_asi(cpu_val, cpu_addr, insn, 8);
4989 dc->npc = DYNAMIC_PC;
4990 break;
4991 #endif
4992 default:
4993 goto illegal_insn;
4995 } else if (xop > 0x23 && xop < 0x28) {
4996 if (gen_trap_ifnofpu(dc)) {
4997 goto jmp_insn;
4999 save_state(dc);
5000 switch (xop) {
5001 case 0x24: /* stf, store fpreg */
5003 TCGv t = get_temp_tl(dc);
5004 gen_address_mask(dc, cpu_addr);
5005 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5006 tcg_gen_ext_i32_tl(t, cpu_src1_32);
5007 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5009 break;
5010 case 0x25: /* stfsr, V9 stxfsr */
5012 TCGv t = get_temp_tl(dc);
5014 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUSPARCState, fsr));
5015 #ifdef TARGET_SPARC64
5016 gen_address_mask(dc, cpu_addr);
5017 if (rd == 1) {
5018 tcg_gen_qemu_st64(t, cpu_addr, dc->mem_idx);
5019 break;
5021 #endif
5022 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5024 break;
5025 case 0x26:
5026 #ifdef TARGET_SPARC64
5027 /* V9 stqf, store quad fpreg */
5029 TCGv_i32 r_const;
5031 CHECK_FPU_FEATURE(dc, FLOAT128);
5032 gen_op_load_fpr_QT0(QFPREG(rd));
5033 r_const = tcg_const_i32(dc->mem_idx);
5034 gen_address_mask(dc, cpu_addr);
5035 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5036 tcg_temp_free_i32(r_const);
5038 break;
5039 #else /* !TARGET_SPARC64 */
5040 /* stdfq, store floating point queue */
5041 #if defined(CONFIG_USER_ONLY)
5042 goto illegal_insn;
5043 #else
5044 if (!supervisor(dc))
5045 goto priv_insn;
5046 if (gen_trap_ifnofpu(dc)) {
5047 goto jmp_insn;
5049 goto nfq_insn;
5050 #endif
5051 #endif
5052 case 0x27: /* stdf, store double fpreg */
5053 gen_address_mask(dc, cpu_addr);
5054 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5055 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5056 break;
5057 default:
5058 goto illegal_insn;
5060 } else if (xop > 0x33 && xop < 0x3f) {
5061 save_state(dc);
5062 switch (xop) {
5063 #ifdef TARGET_SPARC64
5064 case 0x34: /* V9 stfa */
5065 if (gen_trap_ifnofpu(dc)) {
5066 goto jmp_insn;
5068 gen_stf_asi(cpu_addr, insn, 4, rd);
5069 break;
5070 case 0x36: /* V9 stqfa */
5072 TCGv_i32 r_const;
5074 CHECK_FPU_FEATURE(dc, FLOAT128);
5075 if (gen_trap_ifnofpu(dc)) {
5076 goto jmp_insn;
5078 r_const = tcg_const_i32(7);
5079 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5080 tcg_temp_free_i32(r_const);
5081 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5083 break;
5084 case 0x37: /* V9 stdfa */
5085 if (gen_trap_ifnofpu(dc)) {
5086 goto jmp_insn;
5088 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5089 break;
5090 case 0x3e: /* V9 casxa */
5091 rs2 = GET_FIELD(insn, 27, 31);
5092 cpu_src2 = gen_load_gpr(dc, rs2);
5093 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5094 break;
5095 #else
5096 case 0x34: /* stc */
5097 case 0x35: /* stcsr */
5098 case 0x36: /* stdcq */
5099 case 0x37: /* stdc */
5100 goto ncp_insn;
5101 #endif
5102 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5103 case 0x3c: /* V9 or LEON3 casa */
5104 #ifndef TARGET_SPARC64
5105 CHECK_IU_FEATURE(dc, CASA);
5106 if (IS_IMM) {
5107 goto illegal_insn;
5109 /* LEON3 allows CASA from user space with ASI 0xa */
5110 if ((GET_FIELD(insn, 19, 26) != 0xa) && !supervisor(dc)) {
5111 goto priv_insn;
5113 #endif
5114 rs2 = GET_FIELD(insn, 27, 31);
5115 cpu_src2 = gen_load_gpr(dc, rs2);
5116 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5117 break;
5118 #endif
5119 default:
5120 goto illegal_insn;
5122 } else {
5123 goto illegal_insn;
5126 break;
5128 /* default case for non jump instructions */
5129 if (dc->npc == DYNAMIC_PC) {
5130 dc->pc = DYNAMIC_PC;
5131 gen_op_next_insn();
5132 } else if (dc->npc == JUMP_PC) {
5133 /* we can do a static jump */
5134 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5135 dc->is_br = 1;
5136 } else {
5137 dc->pc = dc->npc;
5138 dc->npc = dc->npc + 4;
5140 jmp_insn:
5141 goto egress;
5142 illegal_insn:
5144 TCGv_i32 r_const;
5146 save_state(dc);
5147 r_const = tcg_const_i32(TT_ILL_INSN);
5148 gen_helper_raise_exception(cpu_env, r_const);
5149 tcg_temp_free_i32(r_const);
5150 dc->is_br = 1;
5152 goto egress;
5153 unimp_flush:
5155 TCGv_i32 r_const;
5157 save_state(dc);
5158 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5159 gen_helper_raise_exception(cpu_env, r_const);
5160 tcg_temp_free_i32(r_const);
5161 dc->is_br = 1;
5163 goto egress;
5164 #if !defined(CONFIG_USER_ONLY)
5165 priv_insn:
5167 TCGv_i32 r_const;
5169 save_state(dc);
5170 r_const = tcg_const_i32(TT_PRIV_INSN);
5171 gen_helper_raise_exception(cpu_env, r_const);
5172 tcg_temp_free_i32(r_const);
5173 dc->is_br = 1;
5175 goto egress;
5176 #endif
5177 nfpu_insn:
5178 save_state(dc);
5179 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5180 dc->is_br = 1;
5181 goto egress;
5182 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5183 nfq_insn:
5184 save_state(dc);
5185 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5186 dc->is_br = 1;
5187 goto egress;
5188 #endif
5189 #ifndef TARGET_SPARC64
5190 ncp_insn:
5192 TCGv r_const;
5194 save_state(dc);
5195 r_const = tcg_const_i32(TT_NCP_INSN);
5196 gen_helper_raise_exception(cpu_env, r_const);
5197 tcg_temp_free(r_const);
5198 dc->is_br = 1;
5200 goto egress;
5201 #endif
5202 egress:
5203 if (dc->n_t32 != 0) {
5204 int i;
5205 for (i = dc->n_t32 - 1; i >= 0; --i) {
5206 tcg_temp_free_i32(dc->t32[i]);
5208 dc->n_t32 = 0;
5210 if (dc->n_ttl != 0) {
5211 int i;
5212 for (i = dc->n_ttl - 1; i >= 0; --i) {
5213 tcg_temp_free(dc->ttl[i]);
5215 dc->n_ttl = 0;
5219 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5221 SPARCCPU *cpu = sparc_env_get_cpu(env);
5222 CPUState *cs = CPU(cpu);
5223 target_ulong pc_start, last_pc;
5224 DisasContext dc1, *dc = &dc1;
5225 int num_insns;
5226 int max_insns;
5227 unsigned int insn;
5229 memset(dc, 0, sizeof(DisasContext));
5230 dc->tb = tb;
5231 pc_start = tb->pc;
5232 dc->pc = pc_start;
5233 last_pc = dc->pc;
5234 dc->npc = (target_ulong) tb->cs_base;
5235 dc->cc_op = CC_OP_DYNAMIC;
5236 dc->mem_idx = cpu_mmu_index(env, false);
5237 dc->def = env->def;
5238 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5239 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5240 dc->singlestep = (cs->singlestep_enabled || singlestep);
5242 num_insns = 0;
5243 max_insns = tb->cflags & CF_COUNT_MASK;
5244 if (max_insns == 0) {
5245 max_insns = CF_COUNT_MASK;
5247 if (max_insns > TCG_MAX_INSNS) {
5248 max_insns = TCG_MAX_INSNS;
5251 gen_tb_start(tb);
5252 do {
5253 if (dc->npc & JUMP_PC) {
5254 assert(dc->jump_pc[1] == dc->pc + 4);
5255 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5256 } else {
5257 tcg_gen_insn_start(dc->pc, dc->npc);
5259 num_insns++;
5260 last_pc = dc->pc;
5262 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5263 if (dc->pc != pc_start) {
5264 save_state(dc);
5266 gen_helper_debug(cpu_env);
5267 tcg_gen_exit_tb(0);
5268 dc->is_br = 1;
5269 goto exit_gen_loop;
5272 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5273 gen_io_start();
5276 insn = cpu_ldl_code(env, dc->pc);
5278 disas_sparc_insn(dc, insn);
5280 if (dc->is_br)
5281 break;
5282 /* if the next PC is different, we abort now */
5283 if (dc->pc != (last_pc + 4))
5284 break;
5285 /* if we reach a page boundary, we stop generation so that the
5286 PC of a TT_TFAULT exception is always in the right page */
5287 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5288 break;
5289 /* if single step mode, we generate only one instruction and
5290 generate an exception */
5291 if (dc->singlestep) {
5292 break;
5294 } while (!tcg_op_buf_full() &&
5295 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5296 num_insns < max_insns);
5298 exit_gen_loop:
5299 if (tb->cflags & CF_LAST_IO) {
5300 gen_io_end();
5302 if (!dc->is_br) {
5303 if (dc->pc != DYNAMIC_PC &&
5304 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5305 /* static PC and NPC: we can use direct chaining */
5306 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5307 } else {
5308 if (dc->pc != DYNAMIC_PC) {
5309 tcg_gen_movi_tl(cpu_pc, dc->pc);
5311 save_npc(dc);
5312 tcg_gen_exit_tb(0);
5315 gen_tb_end(tb, num_insns);
5317 tb->size = last_pc + 4 - pc_start;
5318 tb->icount = num_insns;
5320 #ifdef DEBUG_DISAS
5321 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5322 qemu_log("--------------\n");
5323 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5324 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5325 qemu_log("\n");
5327 #endif
5330 void gen_intermediate_code_init(CPUSPARCState *env)
5332 unsigned int i;
5333 static int inited;
5334 static const char * const gregnames[8] = {
5335 NULL, // g0 not used
5336 "g1",
5337 "g2",
5338 "g3",
5339 "g4",
5340 "g5",
5341 "g6",
5342 "g7",
5344 static const char * const fregnames[32] = {
5345 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5346 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5347 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5348 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5351 /* init various static tables */
5352 if (!inited) {
5353 inited = 1;
5355 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5356 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5357 offsetof(CPUSPARCState, regwptr),
5358 "regwptr");
5359 #ifdef TARGET_SPARC64
5360 cpu_xcc = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSPARCState, xcc),
5361 "xcc");
5362 cpu_asi = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSPARCState, asi),
5363 "asi");
5364 cpu_fprs = tcg_global_mem_new_i32(cpu_env,
5365 offsetof(CPUSPARCState, fprs),
5366 "fprs");
5367 cpu_gsr = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, gsr),
5368 "gsr");
5369 cpu_tick_cmpr = tcg_global_mem_new(cpu_env,
5370 offsetof(CPUSPARCState, tick_cmpr),
5371 "tick_cmpr");
5372 cpu_stick_cmpr = tcg_global_mem_new(cpu_env,
5373 offsetof(CPUSPARCState, stick_cmpr),
5374 "stick_cmpr");
5375 cpu_hstick_cmpr = tcg_global_mem_new(cpu_env,
5376 offsetof(CPUSPARCState, hstick_cmpr),
5377 "hstick_cmpr");
5378 cpu_hintp = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, hintp),
5379 "hintp");
5380 cpu_htba = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, htba),
5381 "htba");
5382 cpu_hver = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, hver),
5383 "hver");
5384 cpu_ssr = tcg_global_mem_new(cpu_env,
5385 offsetof(CPUSPARCState, ssr), "ssr");
5386 cpu_ver = tcg_global_mem_new(cpu_env,
5387 offsetof(CPUSPARCState, version), "ver");
5388 cpu_softint = tcg_global_mem_new_i32(cpu_env,
5389 offsetof(CPUSPARCState, softint),
5390 "softint");
5391 #else
5392 cpu_wim = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, wim),
5393 "wim");
5394 #endif
5395 cpu_cond = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, cond),
5396 "cond");
5397 cpu_cc_src = tcg_global_mem_new(cpu_env,
5398 offsetof(CPUSPARCState, cc_src),
5399 "cc_src");
5400 cpu_cc_src2 = tcg_global_mem_new(cpu_env,
5401 offsetof(CPUSPARCState, cc_src2),
5402 "cc_src2");
5403 cpu_cc_dst = tcg_global_mem_new(cpu_env,
5404 offsetof(CPUSPARCState, cc_dst),
5405 "cc_dst");
5406 cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
5407 offsetof(CPUSPARCState, cc_op),
5408 "cc_op");
5409 cpu_psr = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSPARCState, psr),
5410 "psr");
5411 cpu_fsr = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, fsr),
5412 "fsr");
5413 cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, pc),
5414 "pc");
5415 cpu_npc = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, npc),
5416 "npc");
5417 cpu_y = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, y), "y");
5418 #ifndef CONFIG_USER_ONLY
5419 cpu_tbr = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, tbr),
5420 "tbr");
5421 #endif
5422 for (i = 1; i < 8; i++) {
5423 cpu_gregs[i] = tcg_global_mem_new(cpu_env,
5424 offsetof(CPUSPARCState, gregs[i]),
5425 gregnames[i]);
5427 for (i = 0; i < TARGET_DPREGS; i++) {
5428 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5429 offsetof(CPUSPARCState, fpr[i]),
5430 fregnames[i]);
5435 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5436 target_ulong *data)
5438 target_ulong pc = data[0];
5439 target_ulong npc = data[1];
5441 env->pc = pc;
5442 if (npc == DYNAMIC_PC) {
5443 /* dynamic NPC: already stored */
5444 } else if (npc & JUMP_PC) {
5445 /* jump PC: use 'cond' and the jump targets of the translation */
5446 if (env->cond) {
5447 env->npc = npc & ~3;
5448 } else {
5449 env->npc = pc + 4;
5451 } else {
5452 env->npc = npc;