block/backup: avoid copying less than full target clusters
[qemu.git] / target-sparc / translate.c
blob00d61ee16af49d5eb38bcedf4e20facd0005897e
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
32 #include "exec/log.h"
35 #define DEBUG_DISAS
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
41 /* global register indexes */
42 static TCGv_ptr cpu_env, cpu_regwptr;
43 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
44 static TCGv_i32 cpu_cc_op;
45 static TCGv_i32 cpu_psr;
46 static TCGv cpu_fsr, cpu_pc, cpu_npc;
47 static TCGv cpu_regs[32];
48 static TCGv cpu_y;
49 #ifndef CONFIG_USER_ONLY
50 static TCGv cpu_tbr;
51 #endif
52 static TCGv cpu_cond;
53 #ifdef TARGET_SPARC64
54 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
55 static TCGv cpu_gsr;
56 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
57 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
58 static TCGv_i32 cpu_softint;
59 #else
60 static TCGv cpu_wim;
61 #endif
62 /* Floating point registers */
63 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
65 #include "exec/gen-icount.h"
67 typedef struct DisasContext {
68 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
69 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
70 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
71 int is_br;
72 int mem_idx;
73 int fpu_enabled;
74 int address_mask_32bit;
75 int singlestep;
76 uint32_t cc_op; /* current CC operation */
77 struct TranslationBlock *tb;
78 sparc_def_t *def;
79 TCGv_i32 t32[3];
80 TCGv ttl[5];
81 int n_t32;
82 int n_ttl;
83 } DisasContext;
85 typedef struct {
86 TCGCond cond;
87 bool is_bool;
88 bool g1, g2;
89 TCGv c1, c2;
90 } DisasCompare;
92 // This function uses non-native bit order
93 #define GET_FIELD(X, FROM, TO) \
94 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
96 // This function uses the order in the manuals, i.e. bit 0 is 2^0
97 #define GET_FIELD_SP(X, FROM, TO) \
98 GET_FIELD(X, 31 - (TO), 31 - (FROM))
100 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
101 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
103 #ifdef TARGET_SPARC64
104 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
105 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
106 #else
107 #define DFPREG(r) (r & 0x1e)
108 #define QFPREG(r) (r & 0x1c)
109 #endif
111 #define UA2005_HTRAP_MASK 0xff
112 #define V8_TRAP_MASK 0x7f
114 static int sign_extend(int x, int len)
116 len = 32 - len;
117 return (x << len) >> len;
120 #define IS_IMM (insn & (1<<13))
122 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
124 TCGv_i32 t;
125 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
126 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
127 return t;
130 static inline TCGv get_temp_tl(DisasContext *dc)
132 TCGv t;
133 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
134 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
135 return t;
138 static inline void gen_update_fprs_dirty(int rd)
140 #if defined(TARGET_SPARC64)
141 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
142 #endif
145 /* floating point registers moves */
146 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
148 #if TCG_TARGET_REG_BITS == 32
149 if (src & 1) {
150 return TCGV_LOW(cpu_fpr[src / 2]);
151 } else {
152 return TCGV_HIGH(cpu_fpr[src / 2]);
154 #else
155 if (src & 1) {
156 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
157 } else {
158 TCGv_i32 ret = get_temp_i32(dc);
159 TCGv_i64 t = tcg_temp_new_i64();
161 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
162 tcg_gen_extrl_i64_i32(ret, t);
163 tcg_temp_free_i64(t);
165 return ret;
167 #endif
170 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
172 #if TCG_TARGET_REG_BITS == 32
173 if (dst & 1) {
174 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
175 } else {
176 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
178 #else
179 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
180 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
181 (dst & 1 ? 0 : 32), 32);
182 #endif
183 gen_update_fprs_dirty(dst);
186 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
188 return get_temp_i32(dc);
191 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
193 src = DFPREG(src);
194 return cpu_fpr[src / 2];
197 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
199 dst = DFPREG(dst);
200 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
201 gen_update_fprs_dirty(dst);
204 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
206 return cpu_fpr[DFPREG(dst) / 2];
209 static void gen_op_load_fpr_QT0(unsigned int src)
211 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
212 offsetof(CPU_QuadU, ll.upper));
213 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
214 offsetof(CPU_QuadU, ll.lower));
217 static void gen_op_load_fpr_QT1(unsigned int src)
219 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
220 offsetof(CPU_QuadU, ll.upper));
221 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
222 offsetof(CPU_QuadU, ll.lower));
225 static void gen_op_store_QT0_fpr(unsigned int dst)
227 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
228 offsetof(CPU_QuadU, ll.upper));
229 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
230 offsetof(CPU_QuadU, ll.lower));
233 #ifdef TARGET_SPARC64
234 static void gen_move_Q(unsigned int rd, unsigned int rs)
236 rd = QFPREG(rd);
237 rs = QFPREG(rs);
239 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
240 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
241 gen_update_fprs_dirty(rd);
243 #endif
245 /* moves */
246 #ifdef CONFIG_USER_ONLY
247 #define supervisor(dc) 0
248 #ifdef TARGET_SPARC64
249 #define hypervisor(dc) 0
250 #endif
251 #else
252 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
253 #ifdef TARGET_SPARC64
254 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
255 #else
256 #endif
257 #endif
259 #ifdef TARGET_SPARC64
260 #ifndef TARGET_ABI32
261 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
262 #else
263 #define AM_CHECK(dc) (1)
264 #endif
265 #endif
267 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
269 #ifdef TARGET_SPARC64
270 if (AM_CHECK(dc))
271 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
272 #endif
275 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
277 if (reg > 0) {
278 assert(reg < 32);
279 return cpu_regs[reg];
280 } else {
281 TCGv t = get_temp_tl(dc);
282 tcg_gen_movi_tl(t, 0);
283 return t;
287 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
289 if (reg > 0) {
290 assert(reg < 32);
291 tcg_gen_mov_tl(cpu_regs[reg], v);
295 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
297 if (reg > 0) {
298 assert(reg < 32);
299 return cpu_regs[reg];
300 } else {
301 return get_temp_tl(dc);
305 static inline void gen_goto_tb(DisasContext *s, int tb_num,
306 target_ulong pc, target_ulong npc)
308 TranslationBlock *tb;
310 tb = s->tb;
311 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
312 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
313 !s->singlestep) {
314 /* jump to same page: we can use a direct jump */
315 tcg_gen_goto_tb(tb_num);
316 tcg_gen_movi_tl(cpu_pc, pc);
317 tcg_gen_movi_tl(cpu_npc, npc);
318 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
319 } else {
320 /* jump to another page: currently not optimized */
321 tcg_gen_movi_tl(cpu_pc, pc);
322 tcg_gen_movi_tl(cpu_npc, npc);
323 tcg_gen_exit_tb(0);
327 // XXX suboptimal
328 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
330 tcg_gen_extu_i32_tl(reg, src);
331 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
332 tcg_gen_andi_tl(reg, reg, 0x1);
335 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
337 tcg_gen_extu_i32_tl(reg, src);
338 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
339 tcg_gen_andi_tl(reg, reg, 0x1);
342 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
344 tcg_gen_extu_i32_tl(reg, src);
345 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
346 tcg_gen_andi_tl(reg, reg, 0x1);
349 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
351 tcg_gen_extu_i32_tl(reg, src);
352 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
353 tcg_gen_andi_tl(reg, reg, 0x1);
356 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
358 tcg_gen_mov_tl(cpu_cc_src, src1);
359 tcg_gen_mov_tl(cpu_cc_src2, src2);
360 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
361 tcg_gen_mov_tl(dst, cpu_cc_dst);
364 static TCGv_i32 gen_add32_carry32(void)
366 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
368 /* Carry is computed from a previous add: (dst < src) */
369 #if TARGET_LONG_BITS == 64
370 cc_src1_32 = tcg_temp_new_i32();
371 cc_src2_32 = tcg_temp_new_i32();
372 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
373 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
374 #else
375 cc_src1_32 = cpu_cc_dst;
376 cc_src2_32 = cpu_cc_src;
377 #endif
379 carry_32 = tcg_temp_new_i32();
380 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
382 #if TARGET_LONG_BITS == 64
383 tcg_temp_free_i32(cc_src1_32);
384 tcg_temp_free_i32(cc_src2_32);
385 #endif
387 return carry_32;
390 static TCGv_i32 gen_sub32_carry32(void)
392 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
394 /* Carry is computed from a previous borrow: (src1 < src2) */
395 #if TARGET_LONG_BITS == 64
396 cc_src1_32 = tcg_temp_new_i32();
397 cc_src2_32 = tcg_temp_new_i32();
398 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
399 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
400 #else
401 cc_src1_32 = cpu_cc_src;
402 cc_src2_32 = cpu_cc_src2;
403 #endif
405 carry_32 = tcg_temp_new_i32();
406 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
408 #if TARGET_LONG_BITS == 64
409 tcg_temp_free_i32(cc_src1_32);
410 tcg_temp_free_i32(cc_src2_32);
411 #endif
413 return carry_32;
416 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
417 TCGv src2, int update_cc)
419 TCGv_i32 carry_32;
420 TCGv carry;
422 switch (dc->cc_op) {
423 case CC_OP_DIV:
424 case CC_OP_LOGIC:
425 /* Carry is known to be zero. Fall back to plain ADD. */
426 if (update_cc) {
427 gen_op_add_cc(dst, src1, src2);
428 } else {
429 tcg_gen_add_tl(dst, src1, src2);
431 return;
433 case CC_OP_ADD:
434 case CC_OP_TADD:
435 case CC_OP_TADDTV:
436 if (TARGET_LONG_BITS == 32) {
437 /* We can re-use the host's hardware carry generation by using
438 an ADD2 opcode. We discard the low part of the output.
439 Ideally we'd combine this operation with the add that
440 generated the carry in the first place. */
441 carry = tcg_temp_new();
442 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
443 tcg_temp_free(carry);
444 goto add_done;
446 carry_32 = gen_add32_carry32();
447 break;
449 case CC_OP_SUB:
450 case CC_OP_TSUB:
451 case CC_OP_TSUBTV:
452 carry_32 = gen_sub32_carry32();
453 break;
455 default:
456 /* We need external help to produce the carry. */
457 carry_32 = tcg_temp_new_i32();
458 gen_helper_compute_C_icc(carry_32, cpu_env);
459 break;
462 #if TARGET_LONG_BITS == 64
463 carry = tcg_temp_new();
464 tcg_gen_extu_i32_i64(carry, carry_32);
465 #else
466 carry = carry_32;
467 #endif
469 tcg_gen_add_tl(dst, src1, src2);
470 tcg_gen_add_tl(dst, dst, carry);
472 tcg_temp_free_i32(carry_32);
473 #if TARGET_LONG_BITS == 64
474 tcg_temp_free(carry);
475 #endif
477 add_done:
478 if (update_cc) {
479 tcg_gen_mov_tl(cpu_cc_src, src1);
480 tcg_gen_mov_tl(cpu_cc_src2, src2);
481 tcg_gen_mov_tl(cpu_cc_dst, dst);
482 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
483 dc->cc_op = CC_OP_ADDX;
487 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
489 tcg_gen_mov_tl(cpu_cc_src, src1);
490 tcg_gen_mov_tl(cpu_cc_src2, src2);
491 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
492 tcg_gen_mov_tl(dst, cpu_cc_dst);
495 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
496 TCGv src2, int update_cc)
498 TCGv_i32 carry_32;
499 TCGv carry;
501 switch (dc->cc_op) {
502 case CC_OP_DIV:
503 case CC_OP_LOGIC:
504 /* Carry is known to be zero. Fall back to plain SUB. */
505 if (update_cc) {
506 gen_op_sub_cc(dst, src1, src2);
507 } else {
508 tcg_gen_sub_tl(dst, src1, src2);
510 return;
512 case CC_OP_ADD:
513 case CC_OP_TADD:
514 case CC_OP_TADDTV:
515 carry_32 = gen_add32_carry32();
516 break;
518 case CC_OP_SUB:
519 case CC_OP_TSUB:
520 case CC_OP_TSUBTV:
521 if (TARGET_LONG_BITS == 32) {
522 /* We can re-use the host's hardware carry generation by using
523 a SUB2 opcode. We discard the low part of the output.
524 Ideally we'd combine this operation with the add that
525 generated the carry in the first place. */
526 carry = tcg_temp_new();
527 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
528 tcg_temp_free(carry);
529 goto sub_done;
531 carry_32 = gen_sub32_carry32();
532 break;
534 default:
535 /* We need external help to produce the carry. */
536 carry_32 = tcg_temp_new_i32();
537 gen_helper_compute_C_icc(carry_32, cpu_env);
538 break;
541 #if TARGET_LONG_BITS == 64
542 carry = tcg_temp_new();
543 tcg_gen_extu_i32_i64(carry, carry_32);
544 #else
545 carry = carry_32;
546 #endif
548 tcg_gen_sub_tl(dst, src1, src2);
549 tcg_gen_sub_tl(dst, dst, carry);
551 tcg_temp_free_i32(carry_32);
552 #if TARGET_LONG_BITS == 64
553 tcg_temp_free(carry);
554 #endif
556 sub_done:
557 if (update_cc) {
558 tcg_gen_mov_tl(cpu_cc_src, src1);
559 tcg_gen_mov_tl(cpu_cc_src2, src2);
560 tcg_gen_mov_tl(cpu_cc_dst, dst);
561 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
562 dc->cc_op = CC_OP_SUBX;
566 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
568 TCGv r_temp, zero, t0;
570 r_temp = tcg_temp_new();
571 t0 = tcg_temp_new();
573 /* old op:
574 if (!(env->y & 1))
575 T1 = 0;
577 zero = tcg_const_tl(0);
578 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
579 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
580 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
581 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
582 zero, cpu_cc_src2);
583 tcg_temp_free(zero);
585 // b2 = T0 & 1;
586 // env->y = (b2 << 31) | (env->y >> 1);
587 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
588 tcg_gen_shli_tl(r_temp, r_temp, 31);
589 tcg_gen_shri_tl(t0, cpu_y, 1);
590 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
591 tcg_gen_or_tl(t0, t0, r_temp);
592 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
594 // b1 = N ^ V;
595 gen_mov_reg_N(t0, cpu_psr);
596 gen_mov_reg_V(r_temp, cpu_psr);
597 tcg_gen_xor_tl(t0, t0, r_temp);
598 tcg_temp_free(r_temp);
600 // T0 = (b1 << 31) | (T0 >> 1);
601 // src1 = T0;
602 tcg_gen_shli_tl(t0, t0, 31);
603 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
604 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
605 tcg_temp_free(t0);
607 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
609 tcg_gen_mov_tl(dst, cpu_cc_dst);
612 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
614 #if TARGET_LONG_BITS == 32
615 if (sign_ext) {
616 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
617 } else {
618 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
620 #else
621 TCGv t0 = tcg_temp_new_i64();
622 TCGv t1 = tcg_temp_new_i64();
624 if (sign_ext) {
625 tcg_gen_ext32s_i64(t0, src1);
626 tcg_gen_ext32s_i64(t1, src2);
627 } else {
628 tcg_gen_ext32u_i64(t0, src1);
629 tcg_gen_ext32u_i64(t1, src2);
632 tcg_gen_mul_i64(dst, t0, t1);
633 tcg_temp_free(t0);
634 tcg_temp_free(t1);
636 tcg_gen_shri_i64(cpu_y, dst, 32);
637 #endif
640 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
642 /* zero-extend truncated operands before multiplication */
643 gen_op_multiply(dst, src1, src2, 0);
646 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
648 /* sign-extend truncated operands before multiplication */
649 gen_op_multiply(dst, src1, src2, 1);
652 // 1
653 static inline void gen_op_eval_ba(TCGv dst)
655 tcg_gen_movi_tl(dst, 1);
658 // Z
659 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
661 gen_mov_reg_Z(dst, src);
664 // Z | (N ^ V)
665 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
667 TCGv t0 = tcg_temp_new();
668 gen_mov_reg_N(t0, src);
669 gen_mov_reg_V(dst, src);
670 tcg_gen_xor_tl(dst, dst, t0);
671 gen_mov_reg_Z(t0, src);
672 tcg_gen_or_tl(dst, dst, t0);
673 tcg_temp_free(t0);
676 // N ^ V
677 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
679 TCGv t0 = tcg_temp_new();
680 gen_mov_reg_V(t0, src);
681 gen_mov_reg_N(dst, src);
682 tcg_gen_xor_tl(dst, dst, t0);
683 tcg_temp_free(t0);
686 // C | Z
687 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
689 TCGv t0 = tcg_temp_new();
690 gen_mov_reg_Z(t0, src);
691 gen_mov_reg_C(dst, src);
692 tcg_gen_or_tl(dst, dst, t0);
693 tcg_temp_free(t0);
696 // C
697 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
699 gen_mov_reg_C(dst, src);
702 // V
703 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
705 gen_mov_reg_V(dst, src);
708 // 0
709 static inline void gen_op_eval_bn(TCGv dst)
711 tcg_gen_movi_tl(dst, 0);
714 // N
715 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
717 gen_mov_reg_N(dst, src);
720 // !Z
721 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
723 gen_mov_reg_Z(dst, src);
724 tcg_gen_xori_tl(dst, dst, 0x1);
727 // !(Z | (N ^ V))
728 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
730 gen_op_eval_ble(dst, src);
731 tcg_gen_xori_tl(dst, dst, 0x1);
734 // !(N ^ V)
735 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
737 gen_op_eval_bl(dst, src);
738 tcg_gen_xori_tl(dst, dst, 0x1);
741 // !(C | Z)
742 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
744 gen_op_eval_bleu(dst, src);
745 tcg_gen_xori_tl(dst, dst, 0x1);
748 // !C
749 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
751 gen_mov_reg_C(dst, src);
752 tcg_gen_xori_tl(dst, dst, 0x1);
755 // !N
756 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
758 gen_mov_reg_N(dst, src);
759 tcg_gen_xori_tl(dst, dst, 0x1);
762 // !V
763 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
765 gen_mov_reg_V(dst, src);
766 tcg_gen_xori_tl(dst, dst, 0x1);
770 FPSR bit field FCC1 | FCC0:
774 3 unordered
776 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
777 unsigned int fcc_offset)
779 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
780 tcg_gen_andi_tl(reg, reg, 0x1);
783 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
784 unsigned int fcc_offset)
786 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
787 tcg_gen_andi_tl(reg, reg, 0x1);
790 // !0: FCC0 | FCC1
791 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
792 unsigned int fcc_offset)
794 TCGv t0 = tcg_temp_new();
795 gen_mov_reg_FCC0(dst, src, fcc_offset);
796 gen_mov_reg_FCC1(t0, src, fcc_offset);
797 tcg_gen_or_tl(dst, dst, t0);
798 tcg_temp_free(t0);
801 // 1 or 2: FCC0 ^ FCC1
802 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
803 unsigned int fcc_offset)
805 TCGv t0 = tcg_temp_new();
806 gen_mov_reg_FCC0(dst, src, fcc_offset);
807 gen_mov_reg_FCC1(t0, src, fcc_offset);
808 tcg_gen_xor_tl(dst, dst, t0);
809 tcg_temp_free(t0);
812 // 1 or 3: FCC0
813 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
814 unsigned int fcc_offset)
816 gen_mov_reg_FCC0(dst, src, fcc_offset);
819 // 1: FCC0 & !FCC1
820 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
821 unsigned int fcc_offset)
823 TCGv t0 = tcg_temp_new();
824 gen_mov_reg_FCC0(dst, src, fcc_offset);
825 gen_mov_reg_FCC1(t0, src, fcc_offset);
826 tcg_gen_andc_tl(dst, dst, t0);
827 tcg_temp_free(t0);
830 // 2 or 3: FCC1
831 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
832 unsigned int fcc_offset)
834 gen_mov_reg_FCC1(dst, src, fcc_offset);
837 // 2: !FCC0 & FCC1
838 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
839 unsigned int fcc_offset)
841 TCGv t0 = tcg_temp_new();
842 gen_mov_reg_FCC0(dst, src, fcc_offset);
843 gen_mov_reg_FCC1(t0, src, fcc_offset);
844 tcg_gen_andc_tl(dst, t0, dst);
845 tcg_temp_free(t0);
848 // 3: FCC0 & FCC1
849 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
850 unsigned int fcc_offset)
852 TCGv t0 = tcg_temp_new();
853 gen_mov_reg_FCC0(dst, src, fcc_offset);
854 gen_mov_reg_FCC1(t0, src, fcc_offset);
855 tcg_gen_and_tl(dst, dst, t0);
856 tcg_temp_free(t0);
859 // 0: !(FCC0 | FCC1)
860 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
861 unsigned int fcc_offset)
863 TCGv t0 = tcg_temp_new();
864 gen_mov_reg_FCC0(dst, src, fcc_offset);
865 gen_mov_reg_FCC1(t0, src, fcc_offset);
866 tcg_gen_or_tl(dst, dst, t0);
867 tcg_gen_xori_tl(dst, dst, 0x1);
868 tcg_temp_free(t0);
871 // 0 or 3: !(FCC0 ^ FCC1)
872 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
873 unsigned int fcc_offset)
875 TCGv t0 = tcg_temp_new();
876 gen_mov_reg_FCC0(dst, src, fcc_offset);
877 gen_mov_reg_FCC1(t0, src, fcc_offset);
878 tcg_gen_xor_tl(dst, dst, t0);
879 tcg_gen_xori_tl(dst, dst, 0x1);
880 tcg_temp_free(t0);
883 // 0 or 2: !FCC0
884 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
885 unsigned int fcc_offset)
887 gen_mov_reg_FCC0(dst, src, fcc_offset);
888 tcg_gen_xori_tl(dst, dst, 0x1);
891 // !1: !(FCC0 & !FCC1)
892 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
893 unsigned int fcc_offset)
895 TCGv t0 = tcg_temp_new();
896 gen_mov_reg_FCC0(dst, src, fcc_offset);
897 gen_mov_reg_FCC1(t0, src, fcc_offset);
898 tcg_gen_andc_tl(dst, dst, t0);
899 tcg_gen_xori_tl(dst, dst, 0x1);
900 tcg_temp_free(t0);
903 // 0 or 1: !FCC1
904 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
905 unsigned int fcc_offset)
907 gen_mov_reg_FCC1(dst, src, fcc_offset);
908 tcg_gen_xori_tl(dst, dst, 0x1);
911 // !2: !(!FCC0 & FCC1)
912 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
913 unsigned int fcc_offset)
915 TCGv t0 = tcg_temp_new();
916 gen_mov_reg_FCC0(dst, src, fcc_offset);
917 gen_mov_reg_FCC1(t0, src, fcc_offset);
918 tcg_gen_andc_tl(dst, t0, dst);
919 tcg_gen_xori_tl(dst, dst, 0x1);
920 tcg_temp_free(t0);
923 // !3: !(FCC0 & FCC1)
924 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
925 unsigned int fcc_offset)
927 TCGv t0 = tcg_temp_new();
928 gen_mov_reg_FCC0(dst, src, fcc_offset);
929 gen_mov_reg_FCC1(t0, src, fcc_offset);
930 tcg_gen_and_tl(dst, dst, t0);
931 tcg_gen_xori_tl(dst, dst, 0x1);
932 tcg_temp_free(t0);
935 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
936 target_ulong pc2, TCGv r_cond)
938 TCGLabel *l1 = gen_new_label();
940 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
942 gen_goto_tb(dc, 0, pc1, pc1 + 4);
944 gen_set_label(l1);
945 gen_goto_tb(dc, 1, pc2, pc2 + 4);
948 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
950 TCGLabel *l1 = gen_new_label();
951 target_ulong npc = dc->npc;
953 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
955 gen_goto_tb(dc, 0, npc, pc1);
957 gen_set_label(l1);
958 gen_goto_tb(dc, 1, npc + 4, npc + 8);
960 dc->is_br = 1;
963 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
965 target_ulong npc = dc->npc;
967 if (likely(npc != DYNAMIC_PC)) {
968 dc->pc = npc;
969 dc->jump_pc[0] = pc1;
970 dc->jump_pc[1] = npc + 4;
971 dc->npc = JUMP_PC;
972 } else {
973 TCGv t, z;
975 tcg_gen_mov_tl(cpu_pc, cpu_npc);
977 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
978 t = tcg_const_tl(pc1);
979 z = tcg_const_tl(0);
980 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
981 tcg_temp_free(t);
982 tcg_temp_free(z);
984 dc->pc = DYNAMIC_PC;
988 static inline void gen_generic_branch(DisasContext *dc)
990 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
991 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
992 TCGv zero = tcg_const_tl(0);
994 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
996 tcg_temp_free(npc0);
997 tcg_temp_free(npc1);
998 tcg_temp_free(zero);
1001 /* call this function before using the condition register as it may
1002 have been set for a jump */
1003 static inline void flush_cond(DisasContext *dc)
1005 if (dc->npc == JUMP_PC) {
1006 gen_generic_branch(dc);
1007 dc->npc = DYNAMIC_PC;
1011 static inline void save_npc(DisasContext *dc)
1013 if (dc->npc == JUMP_PC) {
1014 gen_generic_branch(dc);
1015 dc->npc = DYNAMIC_PC;
1016 } else if (dc->npc != DYNAMIC_PC) {
1017 tcg_gen_movi_tl(cpu_npc, dc->npc);
1021 static inline void update_psr(DisasContext *dc)
1023 if (dc->cc_op != CC_OP_FLAGS) {
1024 dc->cc_op = CC_OP_FLAGS;
1025 gen_helper_compute_psr(cpu_env);
1029 static inline void save_state(DisasContext *dc)
1031 tcg_gen_movi_tl(cpu_pc, dc->pc);
1032 save_npc(dc);
1035 static inline void gen_mov_pc_npc(DisasContext *dc)
1037 if (dc->npc == JUMP_PC) {
1038 gen_generic_branch(dc);
1039 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1040 dc->pc = DYNAMIC_PC;
1041 } else if (dc->npc == DYNAMIC_PC) {
1042 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1043 dc->pc = DYNAMIC_PC;
1044 } else {
1045 dc->pc = dc->npc;
1049 static inline void gen_op_next_insn(void)
1051 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1052 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1055 static void free_compare(DisasCompare *cmp)
1057 if (!cmp->g1) {
1058 tcg_temp_free(cmp->c1);
1060 if (!cmp->g2) {
1061 tcg_temp_free(cmp->c2);
1065 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1066 DisasContext *dc)
1068 static int subcc_cond[16] = {
1069 TCG_COND_NEVER,
1070 TCG_COND_EQ,
1071 TCG_COND_LE,
1072 TCG_COND_LT,
1073 TCG_COND_LEU,
1074 TCG_COND_LTU,
1075 -1, /* neg */
1076 -1, /* overflow */
1077 TCG_COND_ALWAYS,
1078 TCG_COND_NE,
1079 TCG_COND_GT,
1080 TCG_COND_GE,
1081 TCG_COND_GTU,
1082 TCG_COND_GEU,
1083 -1, /* pos */
1084 -1, /* no overflow */
1087 static int logic_cond[16] = {
1088 TCG_COND_NEVER,
1089 TCG_COND_EQ, /* eq: Z */
1090 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1091 TCG_COND_LT, /* lt: N ^ V -> N */
1092 TCG_COND_EQ, /* leu: C | Z -> Z */
1093 TCG_COND_NEVER, /* ltu: C -> 0 */
1094 TCG_COND_LT, /* neg: N */
1095 TCG_COND_NEVER, /* vs: V -> 0 */
1096 TCG_COND_ALWAYS,
1097 TCG_COND_NE, /* ne: !Z */
1098 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1099 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1100 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1101 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1102 TCG_COND_GE, /* pos: !N */
1103 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1106 TCGv_i32 r_src;
1107 TCGv r_dst;
1109 #ifdef TARGET_SPARC64
1110 if (xcc) {
1111 r_src = cpu_xcc;
1112 } else {
1113 r_src = cpu_psr;
1115 #else
1116 r_src = cpu_psr;
1117 #endif
1119 switch (dc->cc_op) {
1120 case CC_OP_LOGIC:
1121 cmp->cond = logic_cond[cond];
1122 do_compare_dst_0:
1123 cmp->is_bool = false;
1124 cmp->g2 = false;
1125 cmp->c2 = tcg_const_tl(0);
1126 #ifdef TARGET_SPARC64
1127 if (!xcc) {
1128 cmp->g1 = false;
1129 cmp->c1 = tcg_temp_new();
1130 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1131 break;
1133 #endif
1134 cmp->g1 = true;
1135 cmp->c1 = cpu_cc_dst;
1136 break;
1138 case CC_OP_SUB:
1139 switch (cond) {
1140 case 6: /* neg */
1141 case 14: /* pos */
1142 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1143 goto do_compare_dst_0;
1145 case 7: /* overflow */
1146 case 15: /* !overflow */
1147 goto do_dynamic;
1149 default:
1150 cmp->cond = subcc_cond[cond];
1151 cmp->is_bool = false;
1152 #ifdef TARGET_SPARC64
1153 if (!xcc) {
1154 /* Note that sign-extension works for unsigned compares as
1155 long as both operands are sign-extended. */
1156 cmp->g1 = cmp->g2 = false;
1157 cmp->c1 = tcg_temp_new();
1158 cmp->c2 = tcg_temp_new();
1159 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1160 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1161 break;
1163 #endif
1164 cmp->g1 = cmp->g2 = true;
1165 cmp->c1 = cpu_cc_src;
1166 cmp->c2 = cpu_cc_src2;
1167 break;
1169 break;
1171 default:
1172 do_dynamic:
1173 gen_helper_compute_psr(cpu_env);
1174 dc->cc_op = CC_OP_FLAGS;
1175 /* FALLTHRU */
1177 case CC_OP_FLAGS:
1178 /* We're going to generate a boolean result. */
1179 cmp->cond = TCG_COND_NE;
1180 cmp->is_bool = true;
1181 cmp->g1 = cmp->g2 = false;
1182 cmp->c1 = r_dst = tcg_temp_new();
1183 cmp->c2 = tcg_const_tl(0);
1185 switch (cond) {
1186 case 0x0:
1187 gen_op_eval_bn(r_dst);
1188 break;
1189 case 0x1:
1190 gen_op_eval_be(r_dst, r_src);
1191 break;
1192 case 0x2:
1193 gen_op_eval_ble(r_dst, r_src);
1194 break;
1195 case 0x3:
1196 gen_op_eval_bl(r_dst, r_src);
1197 break;
1198 case 0x4:
1199 gen_op_eval_bleu(r_dst, r_src);
1200 break;
1201 case 0x5:
1202 gen_op_eval_bcs(r_dst, r_src);
1203 break;
1204 case 0x6:
1205 gen_op_eval_bneg(r_dst, r_src);
1206 break;
1207 case 0x7:
1208 gen_op_eval_bvs(r_dst, r_src);
1209 break;
1210 case 0x8:
1211 gen_op_eval_ba(r_dst);
1212 break;
1213 case 0x9:
1214 gen_op_eval_bne(r_dst, r_src);
1215 break;
1216 case 0xa:
1217 gen_op_eval_bg(r_dst, r_src);
1218 break;
1219 case 0xb:
1220 gen_op_eval_bge(r_dst, r_src);
1221 break;
1222 case 0xc:
1223 gen_op_eval_bgu(r_dst, r_src);
1224 break;
1225 case 0xd:
1226 gen_op_eval_bcc(r_dst, r_src);
1227 break;
1228 case 0xe:
1229 gen_op_eval_bpos(r_dst, r_src);
1230 break;
1231 case 0xf:
1232 gen_op_eval_bvc(r_dst, r_src);
1233 break;
1235 break;
1239 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1241 unsigned int offset;
1242 TCGv r_dst;
1244 /* For now we still generate a straight boolean result. */
1245 cmp->cond = TCG_COND_NE;
1246 cmp->is_bool = true;
1247 cmp->g1 = cmp->g2 = false;
1248 cmp->c1 = r_dst = tcg_temp_new();
1249 cmp->c2 = tcg_const_tl(0);
1251 switch (cc) {
1252 default:
1253 case 0x0:
1254 offset = 0;
1255 break;
1256 case 0x1:
1257 offset = 32 - 10;
1258 break;
1259 case 0x2:
1260 offset = 34 - 10;
1261 break;
1262 case 0x3:
1263 offset = 36 - 10;
1264 break;
1267 switch (cond) {
1268 case 0x0:
1269 gen_op_eval_bn(r_dst);
1270 break;
1271 case 0x1:
1272 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1273 break;
1274 case 0x2:
1275 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1276 break;
1277 case 0x3:
1278 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1279 break;
1280 case 0x4:
1281 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1282 break;
1283 case 0x5:
1284 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1285 break;
1286 case 0x6:
1287 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1288 break;
1289 case 0x7:
1290 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1291 break;
1292 case 0x8:
1293 gen_op_eval_ba(r_dst);
1294 break;
1295 case 0x9:
1296 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1297 break;
1298 case 0xa:
1299 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1300 break;
1301 case 0xb:
1302 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1303 break;
1304 case 0xc:
1305 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1306 break;
1307 case 0xd:
1308 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1309 break;
1310 case 0xe:
1311 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1312 break;
1313 case 0xf:
1314 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1315 break;
1319 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1320 DisasContext *dc)
1322 DisasCompare cmp;
1323 gen_compare(&cmp, cc, cond, dc);
1325 /* The interface is to return a boolean in r_dst. */
1326 if (cmp.is_bool) {
1327 tcg_gen_mov_tl(r_dst, cmp.c1);
1328 } else {
1329 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1332 free_compare(&cmp);
1335 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1337 DisasCompare cmp;
1338 gen_fcompare(&cmp, cc, cond);
1340 /* The interface is to return a boolean in r_dst. */
1341 if (cmp.is_bool) {
1342 tcg_gen_mov_tl(r_dst, cmp.c1);
1343 } else {
1344 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1347 free_compare(&cmp);
1350 #ifdef TARGET_SPARC64
1351 // Inverted logic
1352 static const int gen_tcg_cond_reg[8] = {
1354 TCG_COND_NE,
1355 TCG_COND_GT,
1356 TCG_COND_GE,
1358 TCG_COND_EQ,
1359 TCG_COND_LE,
1360 TCG_COND_LT,
1363 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1365 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1366 cmp->is_bool = false;
1367 cmp->g1 = true;
1368 cmp->g2 = false;
1369 cmp->c1 = r_src;
1370 cmp->c2 = tcg_const_tl(0);
1373 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1375 DisasCompare cmp;
1376 gen_compare_reg(&cmp, cond, r_src);
1378 /* The interface is to return a boolean in r_dst. */
1379 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1381 free_compare(&cmp);
1383 #endif
1385 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1387 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1388 target_ulong target = dc->pc + offset;
1390 #ifdef TARGET_SPARC64
1391 if (unlikely(AM_CHECK(dc))) {
1392 target &= 0xffffffffULL;
1394 #endif
1395 if (cond == 0x0) {
1396 /* unconditional not taken */
1397 if (a) {
1398 dc->pc = dc->npc + 4;
1399 dc->npc = dc->pc + 4;
1400 } else {
1401 dc->pc = dc->npc;
1402 dc->npc = dc->pc + 4;
1404 } else if (cond == 0x8) {
1405 /* unconditional taken */
1406 if (a) {
1407 dc->pc = target;
1408 dc->npc = dc->pc + 4;
1409 } else {
1410 dc->pc = dc->npc;
1411 dc->npc = target;
1412 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1414 } else {
1415 flush_cond(dc);
1416 gen_cond(cpu_cond, cc, cond, dc);
1417 if (a) {
1418 gen_branch_a(dc, target);
1419 } else {
1420 gen_branch_n(dc, target);
1425 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1427 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1428 target_ulong target = dc->pc + offset;
1430 #ifdef TARGET_SPARC64
1431 if (unlikely(AM_CHECK(dc))) {
1432 target &= 0xffffffffULL;
1434 #endif
1435 if (cond == 0x0) {
1436 /* unconditional not taken */
1437 if (a) {
1438 dc->pc = dc->npc + 4;
1439 dc->npc = dc->pc + 4;
1440 } else {
1441 dc->pc = dc->npc;
1442 dc->npc = dc->pc + 4;
1444 } else if (cond == 0x8) {
1445 /* unconditional taken */
1446 if (a) {
1447 dc->pc = target;
1448 dc->npc = dc->pc + 4;
1449 } else {
1450 dc->pc = dc->npc;
1451 dc->npc = target;
1452 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1454 } else {
1455 flush_cond(dc);
1456 gen_fcond(cpu_cond, cc, cond);
1457 if (a) {
1458 gen_branch_a(dc, target);
1459 } else {
1460 gen_branch_n(dc, target);
1465 #ifdef TARGET_SPARC64
1466 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1467 TCGv r_reg)
1469 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1470 target_ulong target = dc->pc + offset;
1472 if (unlikely(AM_CHECK(dc))) {
1473 target &= 0xffffffffULL;
1475 flush_cond(dc);
1476 gen_cond_reg(cpu_cond, cond, r_reg);
1477 if (a) {
1478 gen_branch_a(dc, target);
1479 } else {
1480 gen_branch_n(dc, target);
1484 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1486 switch (fccno) {
1487 case 0:
1488 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1489 break;
1490 case 1:
1491 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1492 break;
1493 case 2:
1494 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1495 break;
1496 case 3:
1497 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1498 break;
1502 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1504 switch (fccno) {
1505 case 0:
1506 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1507 break;
1508 case 1:
1509 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1510 break;
1511 case 2:
1512 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1513 break;
1514 case 3:
1515 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1516 break;
1520 static inline void gen_op_fcmpq(int fccno)
1522 switch (fccno) {
1523 case 0:
1524 gen_helper_fcmpq(cpu_env);
1525 break;
1526 case 1:
1527 gen_helper_fcmpq_fcc1(cpu_env);
1528 break;
1529 case 2:
1530 gen_helper_fcmpq_fcc2(cpu_env);
1531 break;
1532 case 3:
1533 gen_helper_fcmpq_fcc3(cpu_env);
1534 break;
1538 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1540 switch (fccno) {
1541 case 0:
1542 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1543 break;
1544 case 1:
1545 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1546 break;
1547 case 2:
1548 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1549 break;
1550 case 3:
1551 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1552 break;
1556 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1558 switch (fccno) {
1559 case 0:
1560 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1561 break;
1562 case 1:
1563 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1564 break;
1565 case 2:
1566 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1567 break;
1568 case 3:
1569 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1570 break;
1574 static inline void gen_op_fcmpeq(int fccno)
1576 switch (fccno) {
1577 case 0:
1578 gen_helper_fcmpeq(cpu_env);
1579 break;
1580 case 1:
1581 gen_helper_fcmpeq_fcc1(cpu_env);
1582 break;
1583 case 2:
1584 gen_helper_fcmpeq_fcc2(cpu_env);
1585 break;
1586 case 3:
1587 gen_helper_fcmpeq_fcc3(cpu_env);
1588 break;
1592 #else
1594 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1596 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1599 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1601 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1604 static inline void gen_op_fcmpq(int fccno)
1606 gen_helper_fcmpq(cpu_env);
1609 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1611 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1614 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1616 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1619 static inline void gen_op_fcmpeq(int fccno)
1621 gen_helper_fcmpeq(cpu_env);
1623 #endif
1625 static inline void gen_op_fpexception_im(int fsr_flags)
1627 TCGv_i32 r_const;
1629 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1630 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1631 r_const = tcg_const_i32(TT_FP_EXCP);
1632 gen_helper_raise_exception(cpu_env, r_const);
1633 tcg_temp_free_i32(r_const);
1636 static int gen_trap_ifnofpu(DisasContext *dc)
1638 #if !defined(CONFIG_USER_ONLY)
1639 if (!dc->fpu_enabled) {
1640 TCGv_i32 r_const;
1642 save_state(dc);
1643 r_const = tcg_const_i32(TT_NFPU_INSN);
1644 gen_helper_raise_exception(cpu_env, r_const);
1645 tcg_temp_free_i32(r_const);
1646 dc->is_br = 1;
1647 return 1;
1649 #endif
1650 return 0;
1653 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1655 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1658 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1659 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1661 TCGv_i32 dst, src;
1663 src = gen_load_fpr_F(dc, rs);
1664 dst = gen_dest_fpr_F(dc);
1666 gen(dst, cpu_env, src);
1668 gen_store_fpr_F(dc, rd, dst);
1671 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1672 void (*gen)(TCGv_i32, TCGv_i32))
1674 TCGv_i32 dst, src;
1676 src = gen_load_fpr_F(dc, rs);
1677 dst = gen_dest_fpr_F(dc);
1679 gen(dst, src);
1681 gen_store_fpr_F(dc, rd, dst);
1684 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1685 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1687 TCGv_i32 dst, src1, src2;
1689 src1 = gen_load_fpr_F(dc, rs1);
1690 src2 = gen_load_fpr_F(dc, rs2);
1691 dst = gen_dest_fpr_F(dc);
1693 gen(dst, cpu_env, src1, src2);
1695 gen_store_fpr_F(dc, rd, dst);
1698 #ifdef TARGET_SPARC64
1699 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1700 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1702 TCGv_i32 dst, src1, src2;
1704 src1 = gen_load_fpr_F(dc, rs1);
1705 src2 = gen_load_fpr_F(dc, rs2);
1706 dst = gen_dest_fpr_F(dc);
1708 gen(dst, src1, src2);
1710 gen_store_fpr_F(dc, rd, dst);
1712 #endif
1714 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1715 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1717 TCGv_i64 dst, src;
1719 src = gen_load_fpr_D(dc, rs);
1720 dst = gen_dest_fpr_D(dc, rd);
1722 gen(dst, cpu_env, src);
1724 gen_store_fpr_D(dc, rd, dst);
1727 #ifdef TARGET_SPARC64
1728 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1729 void (*gen)(TCGv_i64, TCGv_i64))
1731 TCGv_i64 dst, src;
1733 src = gen_load_fpr_D(dc, rs);
1734 dst = gen_dest_fpr_D(dc, rd);
1736 gen(dst, src);
1738 gen_store_fpr_D(dc, rd, dst);
1740 #endif
1742 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1743 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1745 TCGv_i64 dst, src1, src2;
1747 src1 = gen_load_fpr_D(dc, rs1);
1748 src2 = gen_load_fpr_D(dc, rs2);
1749 dst = gen_dest_fpr_D(dc, rd);
1751 gen(dst, cpu_env, src1, src2);
1753 gen_store_fpr_D(dc, rd, dst);
1756 #ifdef TARGET_SPARC64
1757 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1758 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1760 TCGv_i64 dst, src1, src2;
1762 src1 = gen_load_fpr_D(dc, rs1);
1763 src2 = gen_load_fpr_D(dc, rs2);
1764 dst = gen_dest_fpr_D(dc, rd);
1766 gen(dst, src1, src2);
1768 gen_store_fpr_D(dc, rd, dst);
1771 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1772 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1774 TCGv_i64 dst, src1, src2;
1776 src1 = gen_load_fpr_D(dc, rs1);
1777 src2 = gen_load_fpr_D(dc, rs2);
1778 dst = gen_dest_fpr_D(dc, rd);
1780 gen(dst, cpu_gsr, src1, src2);
1782 gen_store_fpr_D(dc, rd, dst);
1785 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1786 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1788 TCGv_i64 dst, src0, src1, src2;
1790 src1 = gen_load_fpr_D(dc, rs1);
1791 src2 = gen_load_fpr_D(dc, rs2);
1792 src0 = gen_load_fpr_D(dc, rd);
1793 dst = gen_dest_fpr_D(dc, rd);
1795 gen(dst, src0, src1, src2);
1797 gen_store_fpr_D(dc, rd, dst);
1799 #endif
1801 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1802 void (*gen)(TCGv_ptr))
1804 gen_op_load_fpr_QT1(QFPREG(rs));
1806 gen(cpu_env);
1808 gen_op_store_QT0_fpr(QFPREG(rd));
1809 gen_update_fprs_dirty(QFPREG(rd));
1812 #ifdef TARGET_SPARC64
1813 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1814 void (*gen)(TCGv_ptr))
1816 gen_op_load_fpr_QT1(QFPREG(rs));
1818 gen(cpu_env);
1820 gen_op_store_QT0_fpr(QFPREG(rd));
1821 gen_update_fprs_dirty(QFPREG(rd));
1823 #endif
1825 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1826 void (*gen)(TCGv_ptr))
1828 gen_op_load_fpr_QT0(QFPREG(rs1));
1829 gen_op_load_fpr_QT1(QFPREG(rs2));
1831 gen(cpu_env);
1833 gen_op_store_QT0_fpr(QFPREG(rd));
1834 gen_update_fprs_dirty(QFPREG(rd));
1837 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1838 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1840 TCGv_i64 dst;
1841 TCGv_i32 src1, src2;
1843 src1 = gen_load_fpr_F(dc, rs1);
1844 src2 = gen_load_fpr_F(dc, rs2);
1845 dst = gen_dest_fpr_D(dc, rd);
1847 gen(dst, cpu_env, src1, src2);
1849 gen_store_fpr_D(dc, rd, dst);
1852 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1853 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1855 TCGv_i64 src1, src2;
1857 src1 = gen_load_fpr_D(dc, rs1);
1858 src2 = gen_load_fpr_D(dc, rs2);
1860 gen(cpu_env, src1, src2);
1862 gen_op_store_QT0_fpr(QFPREG(rd));
1863 gen_update_fprs_dirty(QFPREG(rd));
1866 #ifdef TARGET_SPARC64
1867 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1868 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1870 TCGv_i64 dst;
1871 TCGv_i32 src;
1873 src = gen_load_fpr_F(dc, rs);
1874 dst = gen_dest_fpr_D(dc, rd);
1876 gen(dst, cpu_env, src);
1878 gen_store_fpr_D(dc, rd, dst);
1880 #endif
1882 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1883 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1885 TCGv_i64 dst;
1886 TCGv_i32 src;
1888 src = gen_load_fpr_F(dc, rs);
1889 dst = gen_dest_fpr_D(dc, rd);
1891 gen(dst, cpu_env, src);
1893 gen_store_fpr_D(dc, rd, dst);
1896 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1897 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1899 TCGv_i32 dst;
1900 TCGv_i64 src;
1902 src = gen_load_fpr_D(dc, rs);
1903 dst = gen_dest_fpr_F(dc);
1905 gen(dst, cpu_env, src);
1907 gen_store_fpr_F(dc, rd, dst);
1910 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1911 void (*gen)(TCGv_i32, TCGv_ptr))
1913 TCGv_i32 dst;
1915 gen_op_load_fpr_QT1(QFPREG(rs));
1916 dst = gen_dest_fpr_F(dc);
1918 gen(dst, cpu_env);
1920 gen_store_fpr_F(dc, rd, dst);
1923 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1924 void (*gen)(TCGv_i64, TCGv_ptr))
1926 TCGv_i64 dst;
1928 gen_op_load_fpr_QT1(QFPREG(rs));
1929 dst = gen_dest_fpr_D(dc, rd);
1931 gen(dst, cpu_env);
1933 gen_store_fpr_D(dc, rd, dst);
1936 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1937 void (*gen)(TCGv_ptr, TCGv_i32))
1939 TCGv_i32 src;
1941 src = gen_load_fpr_F(dc, rs);
1943 gen(cpu_env, src);
1945 gen_op_store_QT0_fpr(QFPREG(rd));
1946 gen_update_fprs_dirty(QFPREG(rd));
1949 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1950 void (*gen)(TCGv_ptr, TCGv_i64))
1952 TCGv_i64 src;
1954 src = gen_load_fpr_D(dc, rs);
1956 gen(cpu_env, src);
1958 gen_op_store_QT0_fpr(QFPREG(rd));
1959 gen_update_fprs_dirty(QFPREG(rd));
1962 /* asi moves */
1963 #ifdef TARGET_SPARC64
1964 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
1966 int asi;
1967 TCGv_i32 r_asi;
1969 if (IS_IMM) {
1970 r_asi = tcg_temp_new_i32();
1971 tcg_gen_mov_i32(r_asi, cpu_asi);
1972 } else {
1973 asi = GET_FIELD(insn, 19, 26);
1974 r_asi = tcg_const_i32(asi);
1976 return r_asi;
1979 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
1980 int sign)
1982 TCGv_i32 r_asi, r_size, r_sign;
1984 r_asi = gen_get_asi(insn, addr);
1985 r_size = tcg_const_i32(size);
1986 r_sign = tcg_const_i32(sign);
1987 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
1988 tcg_temp_free_i32(r_sign);
1989 tcg_temp_free_i32(r_size);
1990 tcg_temp_free_i32(r_asi);
1993 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
1995 TCGv_i32 r_asi, r_size;
1997 r_asi = gen_get_asi(insn, addr);
1998 r_size = tcg_const_i32(size);
1999 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2000 tcg_temp_free_i32(r_size);
2001 tcg_temp_free_i32(r_asi);
2004 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2006 TCGv_i32 r_asi, r_size, r_rd;
2008 r_asi = gen_get_asi(insn, addr);
2009 r_size = tcg_const_i32(size);
2010 r_rd = tcg_const_i32(rd);
2011 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2012 tcg_temp_free_i32(r_rd);
2013 tcg_temp_free_i32(r_size);
2014 tcg_temp_free_i32(r_asi);
2017 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2019 TCGv_i32 r_asi, r_size, r_rd;
2021 r_asi = gen_get_asi(insn, addr);
2022 r_size = tcg_const_i32(size);
2023 r_rd = tcg_const_i32(rd);
2024 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2025 tcg_temp_free_i32(r_rd);
2026 tcg_temp_free_i32(r_size);
2027 tcg_temp_free_i32(r_asi);
2030 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2032 TCGv_i32 r_asi, r_size, r_sign;
2033 TCGv_i64 t64 = tcg_temp_new_i64();
2035 r_asi = gen_get_asi(insn, addr);
2036 r_size = tcg_const_i32(4);
2037 r_sign = tcg_const_i32(0);
2038 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2039 tcg_temp_free_i32(r_sign);
2040 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2041 tcg_temp_free_i32(r_size);
2042 tcg_temp_free_i32(r_asi);
2043 tcg_gen_trunc_i64_tl(dst, t64);
2044 tcg_temp_free_i64(t64);
2047 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2048 int insn, int rd)
2050 TCGv_i32 r_asi, r_rd;
2052 r_asi = gen_get_asi(insn, addr);
2053 r_rd = tcg_const_i32(rd);
2054 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2055 tcg_temp_free_i32(r_rd);
2056 tcg_temp_free_i32(r_asi);
2059 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2060 int insn, int rd)
2062 TCGv_i32 r_asi, r_size;
2063 TCGv lo = gen_load_gpr(dc, rd + 1);
2064 TCGv_i64 t64 = tcg_temp_new_i64();
2066 tcg_gen_concat_tl_i64(t64, lo, hi);
2067 r_asi = gen_get_asi(insn, addr);
2068 r_size = tcg_const_i32(8);
2069 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2070 tcg_temp_free_i32(r_size);
2071 tcg_temp_free_i32(r_asi);
2072 tcg_temp_free_i64(t64);
2075 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2076 TCGv val2, int insn, int rd)
2078 TCGv val1 = gen_load_gpr(dc, rd);
2079 TCGv dst = gen_dest_gpr(dc, rd);
2080 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2082 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2083 tcg_temp_free_i32(r_asi);
2084 gen_store_gpr(dc, rd, dst);
2087 #elif !defined(CONFIG_USER_ONLY)
2089 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2090 int sign)
2092 TCGv_i32 r_asi, r_size, r_sign;
2093 TCGv_i64 t64 = tcg_temp_new_i64();
2095 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2096 r_size = tcg_const_i32(size);
2097 r_sign = tcg_const_i32(sign);
2098 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2099 tcg_temp_free_i32(r_sign);
2100 tcg_temp_free_i32(r_size);
2101 tcg_temp_free_i32(r_asi);
2102 tcg_gen_trunc_i64_tl(dst, t64);
2103 tcg_temp_free_i64(t64);
2106 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2108 TCGv_i32 r_asi, r_size;
2109 TCGv_i64 t64 = tcg_temp_new_i64();
2111 tcg_gen_extu_tl_i64(t64, src);
2112 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2113 r_size = tcg_const_i32(size);
2114 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2115 tcg_temp_free_i32(r_size);
2116 tcg_temp_free_i32(r_asi);
2117 tcg_temp_free_i64(t64);
2120 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2122 TCGv_i32 r_asi, r_size, r_sign;
2123 TCGv_i64 r_val, t64;
2125 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2126 r_size = tcg_const_i32(4);
2127 r_sign = tcg_const_i32(0);
2128 t64 = tcg_temp_new_i64();
2129 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2130 tcg_temp_free(r_sign);
2131 r_val = tcg_temp_new_i64();
2132 tcg_gen_extu_tl_i64(r_val, src);
2133 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2134 tcg_temp_free_i64(r_val);
2135 tcg_temp_free_i32(r_size);
2136 tcg_temp_free_i32(r_asi);
2137 tcg_gen_trunc_i64_tl(dst, t64);
2138 tcg_temp_free_i64(t64);
2141 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2142 int insn, int rd)
2144 TCGv_i32 r_asi, r_size, r_sign;
2145 TCGv t;
2146 TCGv_i64 t64;
2148 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2149 r_size = tcg_const_i32(8);
2150 r_sign = tcg_const_i32(0);
2151 t64 = tcg_temp_new_i64();
2152 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2153 tcg_temp_free_i32(r_sign);
2154 tcg_temp_free_i32(r_size);
2155 tcg_temp_free_i32(r_asi);
2157 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2158 whereby "rd + 1" elicits "error: array subscript is above array".
2159 Since we have already asserted that rd is even, the semantics
2160 are unchanged. */
2161 t = gen_dest_gpr(dc, rd | 1);
2162 tcg_gen_trunc_i64_tl(t, t64);
2163 gen_store_gpr(dc, rd | 1, t);
2165 tcg_gen_shri_i64(t64, t64, 32);
2166 tcg_gen_trunc_i64_tl(hi, t64);
2167 tcg_temp_free_i64(t64);
2168 gen_store_gpr(dc, rd, hi);
2171 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2172 int insn, int rd)
2174 TCGv_i32 r_asi, r_size;
2175 TCGv lo = gen_load_gpr(dc, rd + 1);
2176 TCGv_i64 t64 = tcg_temp_new_i64();
2178 tcg_gen_concat_tl_i64(t64, lo, hi);
2179 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2180 r_size = tcg_const_i32(8);
2181 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2182 tcg_temp_free_i32(r_size);
2183 tcg_temp_free_i32(r_asi);
2184 tcg_temp_free_i64(t64);
2186 #endif
2188 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2189 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2190 TCGv val2, int insn, int rd)
2192 TCGv val1 = gen_load_gpr(dc, rd);
2193 TCGv dst = gen_dest_gpr(dc, rd);
2194 #ifdef TARGET_SPARC64
2195 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2196 #else
2197 TCGv_i32 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2198 #endif
2200 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2201 tcg_temp_free_i32(r_asi);
2202 gen_store_gpr(dc, rd, dst);
2205 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2207 TCGv_i64 r_val;
2208 TCGv_i32 r_asi, r_size;
2210 gen_ld_asi(dst, addr, insn, 1, 0);
2212 r_val = tcg_const_i64(0xffULL);
2213 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2214 r_size = tcg_const_i32(1);
2215 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2216 tcg_temp_free_i32(r_size);
2217 tcg_temp_free_i32(r_asi);
2218 tcg_temp_free_i64(r_val);
2220 #endif
2222 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2224 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2225 return gen_load_gpr(dc, rs1);
2228 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2230 if (IS_IMM) { /* immediate */
2231 target_long simm = GET_FIELDs(insn, 19, 31);
2232 TCGv t = get_temp_tl(dc);
2233 tcg_gen_movi_tl(t, simm);
2234 return t;
2235 } else { /* register */
2236 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2237 return gen_load_gpr(dc, rs2);
2241 #ifdef TARGET_SPARC64
2242 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2244 TCGv_i32 c32, zero, dst, s1, s2;
2246 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2247 or fold the comparison down to 32 bits and use movcond_i32. Choose
2248 the later. */
2249 c32 = tcg_temp_new_i32();
2250 if (cmp->is_bool) {
2251 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2252 } else {
2253 TCGv_i64 c64 = tcg_temp_new_i64();
2254 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2255 tcg_gen_extrl_i64_i32(c32, c64);
2256 tcg_temp_free_i64(c64);
2259 s1 = gen_load_fpr_F(dc, rs);
2260 s2 = gen_load_fpr_F(dc, rd);
2261 dst = gen_dest_fpr_F(dc);
2262 zero = tcg_const_i32(0);
2264 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2266 tcg_temp_free_i32(c32);
2267 tcg_temp_free_i32(zero);
2268 gen_store_fpr_F(dc, rd, dst);
2271 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2273 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2274 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2275 gen_load_fpr_D(dc, rs),
2276 gen_load_fpr_D(dc, rd));
2277 gen_store_fpr_D(dc, rd, dst);
2280 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2282 int qd = QFPREG(rd);
2283 int qs = QFPREG(rs);
2285 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2286 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2287 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2288 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2290 gen_update_fprs_dirty(qd);
2293 #ifndef CONFIG_USER_ONLY
2294 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2296 TCGv_i32 r_tl = tcg_temp_new_i32();
2298 /* load env->tl into r_tl */
2299 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2301 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2302 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2304 /* calculate offset to current trap state from env->ts, reuse r_tl */
2305 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2306 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2308 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2310 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2311 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2312 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2313 tcg_temp_free_ptr(r_tl_tmp);
2316 tcg_temp_free_i32(r_tl);
2318 #endif
2320 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2321 int width, bool cc, bool left)
2323 TCGv lo1, lo2, t1, t2;
2324 uint64_t amask, tabl, tabr;
2325 int shift, imask, omask;
2327 if (cc) {
2328 tcg_gen_mov_tl(cpu_cc_src, s1);
2329 tcg_gen_mov_tl(cpu_cc_src2, s2);
2330 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2331 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2332 dc->cc_op = CC_OP_SUB;
2335 /* Theory of operation: there are two tables, left and right (not to
2336 be confused with the left and right versions of the opcode). These
2337 are indexed by the low 3 bits of the inputs. To make things "easy",
2338 these tables are loaded into two constants, TABL and TABR below.
2339 The operation index = (input & imask) << shift calculates the index
2340 into the constant, while val = (table >> index) & omask calculates
2341 the value we're looking for. */
2342 switch (width) {
2343 case 8:
2344 imask = 0x7;
2345 shift = 3;
2346 omask = 0xff;
2347 if (left) {
2348 tabl = 0x80c0e0f0f8fcfeffULL;
2349 tabr = 0xff7f3f1f0f070301ULL;
2350 } else {
2351 tabl = 0x0103070f1f3f7fffULL;
2352 tabr = 0xfffefcf8f0e0c080ULL;
2354 break;
2355 case 16:
2356 imask = 0x6;
2357 shift = 1;
2358 omask = 0xf;
2359 if (left) {
2360 tabl = 0x8cef;
2361 tabr = 0xf731;
2362 } else {
2363 tabl = 0x137f;
2364 tabr = 0xfec8;
2366 break;
2367 case 32:
2368 imask = 0x4;
2369 shift = 0;
2370 omask = 0x3;
2371 if (left) {
2372 tabl = (2 << 2) | 3;
2373 tabr = (3 << 2) | 1;
2374 } else {
2375 tabl = (1 << 2) | 3;
2376 tabr = (3 << 2) | 2;
2378 break;
2379 default:
2380 abort();
2383 lo1 = tcg_temp_new();
2384 lo2 = tcg_temp_new();
2385 tcg_gen_andi_tl(lo1, s1, imask);
2386 tcg_gen_andi_tl(lo2, s2, imask);
2387 tcg_gen_shli_tl(lo1, lo1, shift);
2388 tcg_gen_shli_tl(lo2, lo2, shift);
2390 t1 = tcg_const_tl(tabl);
2391 t2 = tcg_const_tl(tabr);
2392 tcg_gen_shr_tl(lo1, t1, lo1);
2393 tcg_gen_shr_tl(lo2, t2, lo2);
2394 tcg_gen_andi_tl(dst, lo1, omask);
2395 tcg_gen_andi_tl(lo2, lo2, omask);
2397 amask = -8;
2398 if (AM_CHECK(dc)) {
2399 amask &= 0xffffffffULL;
2401 tcg_gen_andi_tl(s1, s1, amask);
2402 tcg_gen_andi_tl(s2, s2, amask);
2404 /* We want to compute
2405 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2406 We've already done dst = lo1, so this reduces to
2407 dst &= (s1 == s2 ? -1 : lo2)
2408 Which we perform by
2409 lo2 |= -(s1 == s2)
2410 dst &= lo2
2412 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2413 tcg_gen_neg_tl(t1, t1);
2414 tcg_gen_or_tl(lo2, lo2, t1);
2415 tcg_gen_and_tl(dst, dst, lo2);
2417 tcg_temp_free(lo1);
2418 tcg_temp_free(lo2);
2419 tcg_temp_free(t1);
2420 tcg_temp_free(t2);
2423 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2425 TCGv tmp = tcg_temp_new();
2427 tcg_gen_add_tl(tmp, s1, s2);
2428 tcg_gen_andi_tl(dst, tmp, -8);
2429 if (left) {
2430 tcg_gen_neg_tl(tmp, tmp);
2432 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2434 tcg_temp_free(tmp);
2437 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2439 TCGv t1, t2, shift;
2441 t1 = tcg_temp_new();
2442 t2 = tcg_temp_new();
2443 shift = tcg_temp_new();
2445 tcg_gen_andi_tl(shift, gsr, 7);
2446 tcg_gen_shli_tl(shift, shift, 3);
2447 tcg_gen_shl_tl(t1, s1, shift);
2449 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2450 shift of (up to 63) followed by a constant shift of 1. */
2451 tcg_gen_xori_tl(shift, shift, 63);
2452 tcg_gen_shr_tl(t2, s2, shift);
2453 tcg_gen_shri_tl(t2, t2, 1);
2455 tcg_gen_or_tl(dst, t1, t2);
2457 tcg_temp_free(t1);
2458 tcg_temp_free(t2);
2459 tcg_temp_free(shift);
2461 #endif
2463 #define CHECK_IU_FEATURE(dc, FEATURE) \
2464 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2465 goto illegal_insn;
2466 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2467 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2468 goto nfpu_insn;
2470 /* before an instruction, dc->pc must be static */
2471 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2473 unsigned int opc, rs1, rs2, rd;
2474 TCGv cpu_src1, cpu_src2;
2475 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2476 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2477 target_long simm;
2479 opc = GET_FIELD(insn, 0, 1);
2480 rd = GET_FIELD(insn, 2, 6);
2482 switch (opc) {
2483 case 0: /* branches/sethi */
2485 unsigned int xop = GET_FIELD(insn, 7, 9);
2486 int32_t target;
2487 switch (xop) {
2488 #ifdef TARGET_SPARC64
2489 case 0x1: /* V9 BPcc */
2491 int cc;
2493 target = GET_FIELD_SP(insn, 0, 18);
2494 target = sign_extend(target, 19);
2495 target <<= 2;
2496 cc = GET_FIELD_SP(insn, 20, 21);
2497 if (cc == 0)
2498 do_branch(dc, target, insn, 0);
2499 else if (cc == 2)
2500 do_branch(dc, target, insn, 1);
2501 else
2502 goto illegal_insn;
2503 goto jmp_insn;
2505 case 0x3: /* V9 BPr */
2507 target = GET_FIELD_SP(insn, 0, 13) |
2508 (GET_FIELD_SP(insn, 20, 21) << 14);
2509 target = sign_extend(target, 16);
2510 target <<= 2;
2511 cpu_src1 = get_src1(dc, insn);
2512 do_branch_reg(dc, target, insn, cpu_src1);
2513 goto jmp_insn;
2515 case 0x5: /* V9 FBPcc */
2517 int cc = GET_FIELD_SP(insn, 20, 21);
2518 if (gen_trap_ifnofpu(dc)) {
2519 goto jmp_insn;
2521 target = GET_FIELD_SP(insn, 0, 18);
2522 target = sign_extend(target, 19);
2523 target <<= 2;
2524 do_fbranch(dc, target, insn, cc);
2525 goto jmp_insn;
2527 #else
2528 case 0x7: /* CBN+x */
2530 goto ncp_insn;
2532 #endif
2533 case 0x2: /* BN+x */
2535 target = GET_FIELD(insn, 10, 31);
2536 target = sign_extend(target, 22);
2537 target <<= 2;
2538 do_branch(dc, target, insn, 0);
2539 goto jmp_insn;
2541 case 0x6: /* FBN+x */
2543 if (gen_trap_ifnofpu(dc)) {
2544 goto jmp_insn;
2546 target = GET_FIELD(insn, 10, 31);
2547 target = sign_extend(target, 22);
2548 target <<= 2;
2549 do_fbranch(dc, target, insn, 0);
2550 goto jmp_insn;
2552 case 0x4: /* SETHI */
2553 /* Special-case %g0 because that's the canonical nop. */
2554 if (rd) {
2555 uint32_t value = GET_FIELD(insn, 10, 31);
2556 TCGv t = gen_dest_gpr(dc, rd);
2557 tcg_gen_movi_tl(t, value << 10);
2558 gen_store_gpr(dc, rd, t);
2560 break;
2561 case 0x0: /* UNIMPL */
2562 default:
2563 goto illegal_insn;
2565 break;
2567 break;
2568 case 1: /*CALL*/
2570 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2571 TCGv o7 = gen_dest_gpr(dc, 15);
2573 tcg_gen_movi_tl(o7, dc->pc);
2574 gen_store_gpr(dc, 15, o7);
2575 target += dc->pc;
2576 gen_mov_pc_npc(dc);
2577 #ifdef TARGET_SPARC64
2578 if (unlikely(AM_CHECK(dc))) {
2579 target &= 0xffffffffULL;
2581 #endif
2582 dc->npc = target;
2584 goto jmp_insn;
2585 case 2: /* FPU & Logical Operations */
2587 unsigned int xop = GET_FIELD(insn, 7, 12);
2588 TCGv cpu_dst = get_temp_tl(dc);
2589 TCGv cpu_tmp0;
2591 if (xop == 0x3a) { /* generate trap */
2592 int cond = GET_FIELD(insn, 3, 6);
2593 TCGv_i32 trap;
2594 TCGLabel *l1 = NULL;
2595 int mask;
2597 if (cond == 0) {
2598 /* Trap never. */
2599 break;
2602 save_state(dc);
2604 if (cond != 8) {
2605 /* Conditional trap. */
2606 DisasCompare cmp;
2607 #ifdef TARGET_SPARC64
2608 /* V9 icc/xcc */
2609 int cc = GET_FIELD_SP(insn, 11, 12);
2610 if (cc == 0) {
2611 gen_compare(&cmp, 0, cond, dc);
2612 } else if (cc == 2) {
2613 gen_compare(&cmp, 1, cond, dc);
2614 } else {
2615 goto illegal_insn;
2617 #else
2618 gen_compare(&cmp, 0, cond, dc);
2619 #endif
2620 l1 = gen_new_label();
2621 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2622 cmp.c1, cmp.c2, l1);
2623 free_compare(&cmp);
2626 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2627 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2629 /* Don't use the normal temporaries, as they may well have
2630 gone out of scope with the branch above. While we're
2631 doing that we might as well pre-truncate to 32-bit. */
2632 trap = tcg_temp_new_i32();
2634 rs1 = GET_FIELD_SP(insn, 14, 18);
2635 if (IS_IMM) {
2636 rs2 = GET_FIELD_SP(insn, 0, 6);
2637 if (rs1 == 0) {
2638 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2639 /* Signal that the trap value is fully constant. */
2640 mask = 0;
2641 } else {
2642 TCGv t1 = gen_load_gpr(dc, rs1);
2643 tcg_gen_trunc_tl_i32(trap, t1);
2644 tcg_gen_addi_i32(trap, trap, rs2);
2646 } else {
2647 TCGv t1, t2;
2648 rs2 = GET_FIELD_SP(insn, 0, 4);
2649 t1 = gen_load_gpr(dc, rs1);
2650 t2 = gen_load_gpr(dc, rs2);
2651 tcg_gen_add_tl(t1, t1, t2);
2652 tcg_gen_trunc_tl_i32(trap, t1);
2654 if (mask != 0) {
2655 tcg_gen_andi_i32(trap, trap, mask);
2656 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2659 gen_helper_raise_exception(cpu_env, trap);
2660 tcg_temp_free_i32(trap);
2662 if (cond == 8) {
2663 /* An unconditional trap ends the TB. */
2664 dc->is_br = 1;
2665 goto jmp_insn;
2666 } else {
2667 /* A conditional trap falls through to the next insn. */
2668 gen_set_label(l1);
2669 break;
2671 } else if (xop == 0x28) {
2672 rs1 = GET_FIELD(insn, 13, 17);
2673 switch(rs1) {
2674 case 0: /* rdy */
2675 #ifndef TARGET_SPARC64
2676 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2677 manual, rdy on the microSPARC
2678 II */
2679 case 0x0f: /* stbar in the SPARCv8 manual,
2680 rdy on the microSPARC II */
2681 case 0x10 ... 0x1f: /* implementation-dependent in the
2682 SPARCv8 manual, rdy on the
2683 microSPARC II */
2684 /* Read Asr17 */
2685 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2686 TCGv t = gen_dest_gpr(dc, rd);
2687 /* Read Asr17 for a Leon3 monoprocessor */
2688 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2689 gen_store_gpr(dc, rd, t);
2690 break;
2692 #endif
2693 gen_store_gpr(dc, rd, cpu_y);
2694 break;
2695 #ifdef TARGET_SPARC64
2696 case 0x2: /* V9 rdccr */
2697 update_psr(dc);
2698 gen_helper_rdccr(cpu_dst, cpu_env);
2699 gen_store_gpr(dc, rd, cpu_dst);
2700 break;
2701 case 0x3: /* V9 rdasi */
2702 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2703 gen_store_gpr(dc, rd, cpu_dst);
2704 break;
2705 case 0x4: /* V9 rdtick */
2707 TCGv_ptr r_tickptr;
2708 TCGv_i32 r_const;
2710 r_tickptr = tcg_temp_new_ptr();
2711 r_const = tcg_const_i32(dc->mem_idx);
2712 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2713 offsetof(CPUSPARCState, tick));
2714 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2715 r_const);
2716 tcg_temp_free_ptr(r_tickptr);
2717 tcg_temp_free_i32(r_const);
2718 gen_store_gpr(dc, rd, cpu_dst);
2720 break;
2721 case 0x5: /* V9 rdpc */
2723 TCGv t = gen_dest_gpr(dc, rd);
2724 if (unlikely(AM_CHECK(dc))) {
2725 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2726 } else {
2727 tcg_gen_movi_tl(t, dc->pc);
2729 gen_store_gpr(dc, rd, t);
2731 break;
2732 case 0x6: /* V9 rdfprs */
2733 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2734 gen_store_gpr(dc, rd, cpu_dst);
2735 break;
2736 case 0xf: /* V9 membar */
2737 break; /* no effect */
2738 case 0x13: /* Graphics Status */
2739 if (gen_trap_ifnofpu(dc)) {
2740 goto jmp_insn;
2742 gen_store_gpr(dc, rd, cpu_gsr);
2743 break;
2744 case 0x16: /* Softint */
2745 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2746 gen_store_gpr(dc, rd, cpu_dst);
2747 break;
2748 case 0x17: /* Tick compare */
2749 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2750 break;
2751 case 0x18: /* System tick */
2753 TCGv_ptr r_tickptr;
2754 TCGv_i32 r_const;
2756 r_tickptr = tcg_temp_new_ptr();
2757 r_const = tcg_const_i32(dc->mem_idx);
2758 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2759 offsetof(CPUSPARCState, stick));
2760 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2761 r_const);
2762 tcg_temp_free_ptr(r_tickptr);
2763 tcg_temp_free_i32(r_const);
2764 gen_store_gpr(dc, rd, cpu_dst);
2766 break;
2767 case 0x19: /* System tick compare */
2768 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2769 break;
2770 case 0x10: /* Performance Control */
2771 case 0x11: /* Performance Instrumentation Counter */
2772 case 0x12: /* Dispatch Control */
2773 case 0x14: /* Softint set, WO */
2774 case 0x15: /* Softint clear, WO */
2775 #endif
2776 default:
2777 goto illegal_insn;
2779 #if !defined(CONFIG_USER_ONLY)
2780 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2781 #ifndef TARGET_SPARC64
2782 if (!supervisor(dc)) {
2783 goto priv_insn;
2785 update_psr(dc);
2786 gen_helper_rdpsr(cpu_dst, cpu_env);
2787 #else
2788 CHECK_IU_FEATURE(dc, HYPV);
2789 if (!hypervisor(dc))
2790 goto priv_insn;
2791 rs1 = GET_FIELD(insn, 13, 17);
2792 switch (rs1) {
2793 case 0: // hpstate
2794 // gen_op_rdhpstate();
2795 break;
2796 case 1: // htstate
2797 // gen_op_rdhtstate();
2798 break;
2799 case 3: // hintp
2800 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2801 break;
2802 case 5: // htba
2803 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2804 break;
2805 case 6: // hver
2806 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2807 break;
2808 case 31: // hstick_cmpr
2809 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2810 break;
2811 default:
2812 goto illegal_insn;
2814 #endif
2815 gen_store_gpr(dc, rd, cpu_dst);
2816 break;
2817 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2818 if (!supervisor(dc)) {
2819 goto priv_insn;
2821 cpu_tmp0 = get_temp_tl(dc);
2822 #ifdef TARGET_SPARC64
2823 rs1 = GET_FIELD(insn, 13, 17);
2824 switch (rs1) {
2825 case 0: // tpc
2827 TCGv_ptr r_tsptr;
2829 r_tsptr = tcg_temp_new_ptr();
2830 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2831 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2832 offsetof(trap_state, tpc));
2833 tcg_temp_free_ptr(r_tsptr);
2835 break;
2836 case 1: // tnpc
2838 TCGv_ptr r_tsptr;
2840 r_tsptr = tcg_temp_new_ptr();
2841 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2842 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2843 offsetof(trap_state, tnpc));
2844 tcg_temp_free_ptr(r_tsptr);
2846 break;
2847 case 2: // tstate
2849 TCGv_ptr r_tsptr;
2851 r_tsptr = tcg_temp_new_ptr();
2852 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2853 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2854 offsetof(trap_state, tstate));
2855 tcg_temp_free_ptr(r_tsptr);
2857 break;
2858 case 3: // tt
2860 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2862 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2863 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2864 offsetof(trap_state, tt));
2865 tcg_temp_free_ptr(r_tsptr);
2867 break;
2868 case 4: // tick
2870 TCGv_ptr r_tickptr;
2871 TCGv_i32 r_const;
2873 r_tickptr = tcg_temp_new_ptr();
2874 r_const = tcg_const_i32(dc->mem_idx);
2875 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2876 offsetof(CPUSPARCState, tick));
2877 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
2878 r_tickptr, r_const);
2879 tcg_temp_free_ptr(r_tickptr);
2880 tcg_temp_free_i32(r_const);
2882 break;
2883 case 5: // tba
2884 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2885 break;
2886 case 6: // pstate
2887 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2888 offsetof(CPUSPARCState, pstate));
2889 break;
2890 case 7: // tl
2891 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2892 offsetof(CPUSPARCState, tl));
2893 break;
2894 case 8: // pil
2895 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2896 offsetof(CPUSPARCState, psrpil));
2897 break;
2898 case 9: // cwp
2899 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2900 break;
2901 case 10: // cansave
2902 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2903 offsetof(CPUSPARCState, cansave));
2904 break;
2905 case 11: // canrestore
2906 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2907 offsetof(CPUSPARCState, canrestore));
2908 break;
2909 case 12: // cleanwin
2910 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2911 offsetof(CPUSPARCState, cleanwin));
2912 break;
2913 case 13: // otherwin
2914 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2915 offsetof(CPUSPARCState, otherwin));
2916 break;
2917 case 14: // wstate
2918 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2919 offsetof(CPUSPARCState, wstate));
2920 break;
2921 case 16: // UA2005 gl
2922 CHECK_IU_FEATURE(dc, GL);
2923 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2924 offsetof(CPUSPARCState, gl));
2925 break;
2926 case 26: // UA2005 strand status
2927 CHECK_IU_FEATURE(dc, HYPV);
2928 if (!hypervisor(dc))
2929 goto priv_insn;
2930 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2931 break;
2932 case 31: // ver
2933 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2934 break;
2935 case 15: // fq
2936 default:
2937 goto illegal_insn;
2939 #else
2940 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2941 #endif
2942 gen_store_gpr(dc, rd, cpu_tmp0);
2943 break;
2944 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2945 #ifdef TARGET_SPARC64
2946 save_state(dc);
2947 gen_helper_flushw(cpu_env);
2948 #else
2949 if (!supervisor(dc))
2950 goto priv_insn;
2951 gen_store_gpr(dc, rd, cpu_tbr);
2952 #endif
2953 break;
2954 #endif
2955 } else if (xop == 0x34) { /* FPU Operations */
2956 if (gen_trap_ifnofpu(dc)) {
2957 goto jmp_insn;
2959 gen_op_clear_ieee_excp_and_FTT();
2960 rs1 = GET_FIELD(insn, 13, 17);
2961 rs2 = GET_FIELD(insn, 27, 31);
2962 xop = GET_FIELD(insn, 18, 26);
2963 save_state(dc);
2964 switch (xop) {
2965 case 0x1: /* fmovs */
2966 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2967 gen_store_fpr_F(dc, rd, cpu_src1_32);
2968 break;
2969 case 0x5: /* fnegs */
2970 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2971 break;
2972 case 0x9: /* fabss */
2973 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2974 break;
2975 case 0x29: /* fsqrts */
2976 CHECK_FPU_FEATURE(dc, FSQRT);
2977 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2978 break;
2979 case 0x2a: /* fsqrtd */
2980 CHECK_FPU_FEATURE(dc, FSQRT);
2981 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2982 break;
2983 case 0x2b: /* fsqrtq */
2984 CHECK_FPU_FEATURE(dc, FLOAT128);
2985 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2986 break;
2987 case 0x41: /* fadds */
2988 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
2989 break;
2990 case 0x42: /* faddd */
2991 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
2992 break;
2993 case 0x43: /* faddq */
2994 CHECK_FPU_FEATURE(dc, FLOAT128);
2995 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
2996 break;
2997 case 0x45: /* fsubs */
2998 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
2999 break;
3000 case 0x46: /* fsubd */
3001 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3002 break;
3003 case 0x47: /* fsubq */
3004 CHECK_FPU_FEATURE(dc, FLOAT128);
3005 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3006 break;
3007 case 0x49: /* fmuls */
3008 CHECK_FPU_FEATURE(dc, FMUL);
3009 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3010 break;
3011 case 0x4a: /* fmuld */
3012 CHECK_FPU_FEATURE(dc, FMUL);
3013 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3014 break;
3015 case 0x4b: /* fmulq */
3016 CHECK_FPU_FEATURE(dc, FLOAT128);
3017 CHECK_FPU_FEATURE(dc, FMUL);
3018 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3019 break;
3020 case 0x4d: /* fdivs */
3021 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3022 break;
3023 case 0x4e: /* fdivd */
3024 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3025 break;
3026 case 0x4f: /* fdivq */
3027 CHECK_FPU_FEATURE(dc, FLOAT128);
3028 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3029 break;
3030 case 0x69: /* fsmuld */
3031 CHECK_FPU_FEATURE(dc, FSMULD);
3032 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3033 break;
3034 case 0x6e: /* fdmulq */
3035 CHECK_FPU_FEATURE(dc, FLOAT128);
3036 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3037 break;
3038 case 0xc4: /* fitos */
3039 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3040 break;
3041 case 0xc6: /* fdtos */
3042 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3043 break;
3044 case 0xc7: /* fqtos */
3045 CHECK_FPU_FEATURE(dc, FLOAT128);
3046 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3047 break;
3048 case 0xc8: /* fitod */
3049 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3050 break;
3051 case 0xc9: /* fstod */
3052 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3053 break;
3054 case 0xcb: /* fqtod */
3055 CHECK_FPU_FEATURE(dc, FLOAT128);
3056 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3057 break;
3058 case 0xcc: /* fitoq */
3059 CHECK_FPU_FEATURE(dc, FLOAT128);
3060 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3061 break;
3062 case 0xcd: /* fstoq */
3063 CHECK_FPU_FEATURE(dc, FLOAT128);
3064 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3065 break;
3066 case 0xce: /* fdtoq */
3067 CHECK_FPU_FEATURE(dc, FLOAT128);
3068 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3069 break;
3070 case 0xd1: /* fstoi */
3071 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3072 break;
3073 case 0xd2: /* fdtoi */
3074 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3075 break;
3076 case 0xd3: /* fqtoi */
3077 CHECK_FPU_FEATURE(dc, FLOAT128);
3078 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3079 break;
3080 #ifdef TARGET_SPARC64
3081 case 0x2: /* V9 fmovd */
3082 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3083 gen_store_fpr_D(dc, rd, cpu_src1_64);
3084 break;
3085 case 0x3: /* V9 fmovq */
3086 CHECK_FPU_FEATURE(dc, FLOAT128);
3087 gen_move_Q(rd, rs2);
3088 break;
3089 case 0x6: /* V9 fnegd */
3090 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3091 break;
3092 case 0x7: /* V9 fnegq */
3093 CHECK_FPU_FEATURE(dc, FLOAT128);
3094 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3095 break;
3096 case 0xa: /* V9 fabsd */
3097 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3098 break;
3099 case 0xb: /* V9 fabsq */
3100 CHECK_FPU_FEATURE(dc, FLOAT128);
3101 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3102 break;
3103 case 0x81: /* V9 fstox */
3104 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3105 break;
3106 case 0x82: /* V9 fdtox */
3107 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3108 break;
3109 case 0x83: /* V9 fqtox */
3110 CHECK_FPU_FEATURE(dc, FLOAT128);
3111 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3112 break;
3113 case 0x84: /* V9 fxtos */
3114 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3115 break;
3116 case 0x88: /* V9 fxtod */
3117 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3118 break;
3119 case 0x8c: /* V9 fxtoq */
3120 CHECK_FPU_FEATURE(dc, FLOAT128);
3121 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3122 break;
3123 #endif
3124 default:
3125 goto illegal_insn;
3127 } else if (xop == 0x35) { /* FPU Operations */
3128 #ifdef TARGET_SPARC64
3129 int cond;
3130 #endif
3131 if (gen_trap_ifnofpu(dc)) {
3132 goto jmp_insn;
3134 gen_op_clear_ieee_excp_and_FTT();
3135 rs1 = GET_FIELD(insn, 13, 17);
3136 rs2 = GET_FIELD(insn, 27, 31);
3137 xop = GET_FIELD(insn, 18, 26);
3138 save_state(dc);
3140 #ifdef TARGET_SPARC64
3141 #define FMOVR(sz) \
3142 do { \
3143 DisasCompare cmp; \
3144 cond = GET_FIELD_SP(insn, 10, 12); \
3145 cpu_src1 = get_src1(dc, insn); \
3146 gen_compare_reg(&cmp, cond, cpu_src1); \
3147 gen_fmov##sz(dc, &cmp, rd, rs2); \
3148 free_compare(&cmp); \
3149 } while (0)
3151 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3152 FMOVR(s);
3153 break;
3154 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3155 FMOVR(d);
3156 break;
3157 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3158 CHECK_FPU_FEATURE(dc, FLOAT128);
3159 FMOVR(q);
3160 break;
3162 #undef FMOVR
3163 #endif
3164 switch (xop) {
3165 #ifdef TARGET_SPARC64
3166 #define FMOVCC(fcc, sz) \
3167 do { \
3168 DisasCompare cmp; \
3169 cond = GET_FIELD_SP(insn, 14, 17); \
3170 gen_fcompare(&cmp, fcc, cond); \
3171 gen_fmov##sz(dc, &cmp, rd, rs2); \
3172 free_compare(&cmp); \
3173 } while (0)
3175 case 0x001: /* V9 fmovscc %fcc0 */
3176 FMOVCC(0, s);
3177 break;
3178 case 0x002: /* V9 fmovdcc %fcc0 */
3179 FMOVCC(0, d);
3180 break;
3181 case 0x003: /* V9 fmovqcc %fcc0 */
3182 CHECK_FPU_FEATURE(dc, FLOAT128);
3183 FMOVCC(0, q);
3184 break;
3185 case 0x041: /* V9 fmovscc %fcc1 */
3186 FMOVCC(1, s);
3187 break;
3188 case 0x042: /* V9 fmovdcc %fcc1 */
3189 FMOVCC(1, d);
3190 break;
3191 case 0x043: /* V9 fmovqcc %fcc1 */
3192 CHECK_FPU_FEATURE(dc, FLOAT128);
3193 FMOVCC(1, q);
3194 break;
3195 case 0x081: /* V9 fmovscc %fcc2 */
3196 FMOVCC(2, s);
3197 break;
3198 case 0x082: /* V9 fmovdcc %fcc2 */
3199 FMOVCC(2, d);
3200 break;
3201 case 0x083: /* V9 fmovqcc %fcc2 */
3202 CHECK_FPU_FEATURE(dc, FLOAT128);
3203 FMOVCC(2, q);
3204 break;
3205 case 0x0c1: /* V9 fmovscc %fcc3 */
3206 FMOVCC(3, s);
3207 break;
3208 case 0x0c2: /* V9 fmovdcc %fcc3 */
3209 FMOVCC(3, d);
3210 break;
3211 case 0x0c3: /* V9 fmovqcc %fcc3 */
3212 CHECK_FPU_FEATURE(dc, FLOAT128);
3213 FMOVCC(3, q);
3214 break;
3215 #undef FMOVCC
3216 #define FMOVCC(xcc, sz) \
3217 do { \
3218 DisasCompare cmp; \
3219 cond = GET_FIELD_SP(insn, 14, 17); \
3220 gen_compare(&cmp, xcc, cond, dc); \
3221 gen_fmov##sz(dc, &cmp, rd, rs2); \
3222 free_compare(&cmp); \
3223 } while (0)
3225 case 0x101: /* V9 fmovscc %icc */
3226 FMOVCC(0, s);
3227 break;
3228 case 0x102: /* V9 fmovdcc %icc */
3229 FMOVCC(0, d);
3230 break;
3231 case 0x103: /* V9 fmovqcc %icc */
3232 CHECK_FPU_FEATURE(dc, FLOAT128);
3233 FMOVCC(0, q);
3234 break;
3235 case 0x181: /* V9 fmovscc %xcc */
3236 FMOVCC(1, s);
3237 break;
3238 case 0x182: /* V9 fmovdcc %xcc */
3239 FMOVCC(1, d);
3240 break;
3241 case 0x183: /* V9 fmovqcc %xcc */
3242 CHECK_FPU_FEATURE(dc, FLOAT128);
3243 FMOVCC(1, q);
3244 break;
3245 #undef FMOVCC
3246 #endif
3247 case 0x51: /* fcmps, V9 %fcc */
3248 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3249 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3250 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3251 break;
3252 case 0x52: /* fcmpd, V9 %fcc */
3253 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3254 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3255 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3256 break;
3257 case 0x53: /* fcmpq, V9 %fcc */
3258 CHECK_FPU_FEATURE(dc, FLOAT128);
3259 gen_op_load_fpr_QT0(QFPREG(rs1));
3260 gen_op_load_fpr_QT1(QFPREG(rs2));
3261 gen_op_fcmpq(rd & 3);
3262 break;
3263 case 0x55: /* fcmpes, V9 %fcc */
3264 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3265 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3266 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3267 break;
3268 case 0x56: /* fcmped, V9 %fcc */
3269 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3270 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3271 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3272 break;
3273 case 0x57: /* fcmpeq, V9 %fcc */
3274 CHECK_FPU_FEATURE(dc, FLOAT128);
3275 gen_op_load_fpr_QT0(QFPREG(rs1));
3276 gen_op_load_fpr_QT1(QFPREG(rs2));
3277 gen_op_fcmpeq(rd & 3);
3278 break;
3279 default:
3280 goto illegal_insn;
3282 } else if (xop == 0x2) {
3283 TCGv dst = gen_dest_gpr(dc, rd);
3284 rs1 = GET_FIELD(insn, 13, 17);
3285 if (rs1 == 0) {
3286 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3287 if (IS_IMM) { /* immediate */
3288 simm = GET_FIELDs(insn, 19, 31);
3289 tcg_gen_movi_tl(dst, simm);
3290 gen_store_gpr(dc, rd, dst);
3291 } else { /* register */
3292 rs2 = GET_FIELD(insn, 27, 31);
3293 if (rs2 == 0) {
3294 tcg_gen_movi_tl(dst, 0);
3295 gen_store_gpr(dc, rd, dst);
3296 } else {
3297 cpu_src2 = gen_load_gpr(dc, rs2);
3298 gen_store_gpr(dc, rd, cpu_src2);
3301 } else {
3302 cpu_src1 = get_src1(dc, insn);
3303 if (IS_IMM) { /* immediate */
3304 simm = GET_FIELDs(insn, 19, 31);
3305 tcg_gen_ori_tl(dst, cpu_src1, simm);
3306 gen_store_gpr(dc, rd, dst);
3307 } else { /* register */
3308 rs2 = GET_FIELD(insn, 27, 31);
3309 if (rs2 == 0) {
3310 /* mov shortcut: or x, %g0, y -> mov x, y */
3311 gen_store_gpr(dc, rd, cpu_src1);
3312 } else {
3313 cpu_src2 = gen_load_gpr(dc, rs2);
3314 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3315 gen_store_gpr(dc, rd, dst);
3319 #ifdef TARGET_SPARC64
3320 } else if (xop == 0x25) { /* sll, V9 sllx */
3321 cpu_src1 = get_src1(dc, insn);
3322 if (IS_IMM) { /* immediate */
3323 simm = GET_FIELDs(insn, 20, 31);
3324 if (insn & (1 << 12)) {
3325 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3326 } else {
3327 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3329 } else { /* register */
3330 rs2 = GET_FIELD(insn, 27, 31);
3331 cpu_src2 = gen_load_gpr(dc, rs2);
3332 cpu_tmp0 = get_temp_tl(dc);
3333 if (insn & (1 << 12)) {
3334 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3335 } else {
3336 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3338 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3340 gen_store_gpr(dc, rd, cpu_dst);
3341 } else if (xop == 0x26) { /* srl, V9 srlx */
3342 cpu_src1 = get_src1(dc, insn);
3343 if (IS_IMM) { /* immediate */
3344 simm = GET_FIELDs(insn, 20, 31);
3345 if (insn & (1 << 12)) {
3346 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3347 } else {
3348 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3349 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3351 } else { /* register */
3352 rs2 = GET_FIELD(insn, 27, 31);
3353 cpu_src2 = gen_load_gpr(dc, rs2);
3354 cpu_tmp0 = get_temp_tl(dc);
3355 if (insn & (1 << 12)) {
3356 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3357 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3358 } else {
3359 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3360 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3361 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3364 gen_store_gpr(dc, rd, cpu_dst);
3365 } else if (xop == 0x27) { /* sra, V9 srax */
3366 cpu_src1 = get_src1(dc, insn);
3367 if (IS_IMM) { /* immediate */
3368 simm = GET_FIELDs(insn, 20, 31);
3369 if (insn & (1 << 12)) {
3370 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3371 } else {
3372 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3373 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3375 } else { /* register */
3376 rs2 = GET_FIELD(insn, 27, 31);
3377 cpu_src2 = gen_load_gpr(dc, rs2);
3378 cpu_tmp0 = get_temp_tl(dc);
3379 if (insn & (1 << 12)) {
3380 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3381 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3382 } else {
3383 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3384 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3385 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3388 gen_store_gpr(dc, rd, cpu_dst);
3389 #endif
3390 } else if (xop < 0x36) {
3391 if (xop < 0x20) {
3392 cpu_src1 = get_src1(dc, insn);
3393 cpu_src2 = get_src2(dc, insn);
3394 switch (xop & ~0x10) {
3395 case 0x0: /* add */
3396 if (xop & 0x10) {
3397 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3398 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3399 dc->cc_op = CC_OP_ADD;
3400 } else {
3401 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3403 break;
3404 case 0x1: /* and */
3405 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3406 if (xop & 0x10) {
3407 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3408 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3409 dc->cc_op = CC_OP_LOGIC;
3411 break;
3412 case 0x2: /* or */
3413 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3414 if (xop & 0x10) {
3415 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3416 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3417 dc->cc_op = CC_OP_LOGIC;
3419 break;
3420 case 0x3: /* xor */
3421 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3422 if (xop & 0x10) {
3423 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3424 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3425 dc->cc_op = CC_OP_LOGIC;
3427 break;
3428 case 0x4: /* sub */
3429 if (xop & 0x10) {
3430 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3431 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3432 dc->cc_op = CC_OP_SUB;
3433 } else {
3434 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3436 break;
3437 case 0x5: /* andn */
3438 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3439 if (xop & 0x10) {
3440 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3441 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3442 dc->cc_op = CC_OP_LOGIC;
3444 break;
3445 case 0x6: /* orn */
3446 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3447 if (xop & 0x10) {
3448 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3449 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3450 dc->cc_op = CC_OP_LOGIC;
3452 break;
3453 case 0x7: /* xorn */
3454 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3455 if (xop & 0x10) {
3456 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3457 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3458 dc->cc_op = CC_OP_LOGIC;
3460 break;
3461 case 0x8: /* addx, V9 addc */
3462 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3463 (xop & 0x10));
3464 break;
3465 #ifdef TARGET_SPARC64
3466 case 0x9: /* V9 mulx */
3467 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3468 break;
3469 #endif
3470 case 0xa: /* umul */
3471 CHECK_IU_FEATURE(dc, MUL);
3472 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3473 if (xop & 0x10) {
3474 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3475 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3476 dc->cc_op = CC_OP_LOGIC;
3478 break;
3479 case 0xb: /* smul */
3480 CHECK_IU_FEATURE(dc, MUL);
3481 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3482 if (xop & 0x10) {
3483 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3484 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3485 dc->cc_op = CC_OP_LOGIC;
3487 break;
3488 case 0xc: /* subx, V9 subc */
3489 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3490 (xop & 0x10));
3491 break;
3492 #ifdef TARGET_SPARC64
3493 case 0xd: /* V9 udivx */
3494 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3495 break;
3496 #endif
3497 case 0xe: /* udiv */
3498 CHECK_IU_FEATURE(dc, DIV);
3499 if (xop & 0x10) {
3500 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3501 cpu_src2);
3502 dc->cc_op = CC_OP_DIV;
3503 } else {
3504 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3505 cpu_src2);
3507 break;
3508 case 0xf: /* sdiv */
3509 CHECK_IU_FEATURE(dc, DIV);
3510 if (xop & 0x10) {
3511 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3512 cpu_src2);
3513 dc->cc_op = CC_OP_DIV;
3514 } else {
3515 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3516 cpu_src2);
3518 break;
3519 default:
3520 goto illegal_insn;
3522 gen_store_gpr(dc, rd, cpu_dst);
3523 } else {
3524 cpu_src1 = get_src1(dc, insn);
3525 cpu_src2 = get_src2(dc, insn);
3526 switch (xop) {
3527 case 0x20: /* taddcc */
3528 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3529 gen_store_gpr(dc, rd, cpu_dst);
3530 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3531 dc->cc_op = CC_OP_TADD;
3532 break;
3533 case 0x21: /* tsubcc */
3534 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3535 gen_store_gpr(dc, rd, cpu_dst);
3536 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3537 dc->cc_op = CC_OP_TSUB;
3538 break;
3539 case 0x22: /* taddcctv */
3540 gen_helper_taddcctv(cpu_dst, cpu_env,
3541 cpu_src1, cpu_src2);
3542 gen_store_gpr(dc, rd, cpu_dst);
3543 dc->cc_op = CC_OP_TADDTV;
3544 break;
3545 case 0x23: /* tsubcctv */
3546 gen_helper_tsubcctv(cpu_dst, cpu_env,
3547 cpu_src1, cpu_src2);
3548 gen_store_gpr(dc, rd, cpu_dst);
3549 dc->cc_op = CC_OP_TSUBTV;
3550 break;
3551 case 0x24: /* mulscc */
3552 update_psr(dc);
3553 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3554 gen_store_gpr(dc, rd, cpu_dst);
3555 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3556 dc->cc_op = CC_OP_ADD;
3557 break;
3558 #ifndef TARGET_SPARC64
3559 case 0x25: /* sll */
3560 if (IS_IMM) { /* immediate */
3561 simm = GET_FIELDs(insn, 20, 31);
3562 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3563 } else { /* register */
3564 cpu_tmp0 = get_temp_tl(dc);
3565 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3566 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3568 gen_store_gpr(dc, rd, cpu_dst);
3569 break;
3570 case 0x26: /* srl */
3571 if (IS_IMM) { /* immediate */
3572 simm = GET_FIELDs(insn, 20, 31);
3573 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3574 } else { /* register */
3575 cpu_tmp0 = get_temp_tl(dc);
3576 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3577 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3579 gen_store_gpr(dc, rd, cpu_dst);
3580 break;
3581 case 0x27: /* sra */
3582 if (IS_IMM) { /* immediate */
3583 simm = GET_FIELDs(insn, 20, 31);
3584 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3585 } else { /* register */
3586 cpu_tmp0 = get_temp_tl(dc);
3587 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3588 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3590 gen_store_gpr(dc, rd, cpu_dst);
3591 break;
3592 #endif
3593 case 0x30:
3595 cpu_tmp0 = get_temp_tl(dc);
3596 switch(rd) {
3597 case 0: /* wry */
3598 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3599 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3600 break;
3601 #ifndef TARGET_SPARC64
3602 case 0x01 ... 0x0f: /* undefined in the
3603 SPARCv8 manual, nop
3604 on the microSPARC
3605 II */
3606 case 0x10 ... 0x1f: /* implementation-dependent
3607 in the SPARCv8
3608 manual, nop on the
3609 microSPARC II */
3610 if ((rd == 0x13) && (dc->def->features &
3611 CPU_FEATURE_POWERDOWN)) {
3612 /* LEON3 power-down */
3613 save_state(dc);
3614 gen_helper_power_down(cpu_env);
3616 break;
3617 #else
3618 case 0x2: /* V9 wrccr */
3619 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3620 gen_helper_wrccr(cpu_env, cpu_tmp0);
3621 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3622 dc->cc_op = CC_OP_FLAGS;
3623 break;
3624 case 0x3: /* V9 wrasi */
3625 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3626 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
3627 tcg_gen_trunc_tl_i32(cpu_asi, cpu_tmp0);
3628 break;
3629 case 0x6: /* V9 wrfprs */
3630 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3631 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
3632 save_state(dc);
3633 gen_op_next_insn();
3634 tcg_gen_exit_tb(0);
3635 dc->is_br = 1;
3636 break;
3637 case 0xf: /* V9 sir, nop if user */
3638 #if !defined(CONFIG_USER_ONLY)
3639 if (supervisor(dc)) {
3640 ; // XXX
3642 #endif
3643 break;
3644 case 0x13: /* Graphics Status */
3645 if (gen_trap_ifnofpu(dc)) {
3646 goto jmp_insn;
3648 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3649 break;
3650 case 0x14: /* Softint set */
3651 if (!supervisor(dc))
3652 goto illegal_insn;
3653 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3654 gen_helper_set_softint(cpu_env, cpu_tmp0);
3655 break;
3656 case 0x15: /* Softint clear */
3657 if (!supervisor(dc))
3658 goto illegal_insn;
3659 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3660 gen_helper_clear_softint(cpu_env, cpu_tmp0);
3661 break;
3662 case 0x16: /* Softint write */
3663 if (!supervisor(dc))
3664 goto illegal_insn;
3665 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3666 gen_helper_write_softint(cpu_env, cpu_tmp0);
3667 break;
3668 case 0x17: /* Tick compare */
3669 #if !defined(CONFIG_USER_ONLY)
3670 if (!supervisor(dc))
3671 goto illegal_insn;
3672 #endif
3674 TCGv_ptr r_tickptr;
3676 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3677 cpu_src2);
3678 r_tickptr = tcg_temp_new_ptr();
3679 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3680 offsetof(CPUSPARCState, tick));
3681 gen_helper_tick_set_limit(r_tickptr,
3682 cpu_tick_cmpr);
3683 tcg_temp_free_ptr(r_tickptr);
3685 break;
3686 case 0x18: /* System tick */
3687 #if !defined(CONFIG_USER_ONLY)
3688 if (!supervisor(dc))
3689 goto illegal_insn;
3690 #endif
3692 TCGv_ptr r_tickptr;
3694 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
3695 cpu_src2);
3696 r_tickptr = tcg_temp_new_ptr();
3697 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3698 offsetof(CPUSPARCState, stick));
3699 gen_helper_tick_set_count(r_tickptr,
3700 cpu_tmp0);
3701 tcg_temp_free_ptr(r_tickptr);
3703 break;
3704 case 0x19: /* System tick compare */
3705 #if !defined(CONFIG_USER_ONLY)
3706 if (!supervisor(dc))
3707 goto illegal_insn;
3708 #endif
3710 TCGv_ptr r_tickptr;
3712 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3713 cpu_src2);
3714 r_tickptr = tcg_temp_new_ptr();
3715 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3716 offsetof(CPUSPARCState, stick));
3717 gen_helper_tick_set_limit(r_tickptr,
3718 cpu_stick_cmpr);
3719 tcg_temp_free_ptr(r_tickptr);
3721 break;
3723 case 0x10: /* Performance Control */
3724 case 0x11: /* Performance Instrumentation
3725 Counter */
3726 case 0x12: /* Dispatch Control */
3727 #endif
3728 default:
3729 goto illegal_insn;
3732 break;
3733 #if !defined(CONFIG_USER_ONLY)
3734 case 0x31: /* wrpsr, V9 saved, restored */
3736 if (!supervisor(dc))
3737 goto priv_insn;
3738 #ifdef TARGET_SPARC64
3739 switch (rd) {
3740 case 0:
3741 gen_helper_saved(cpu_env);
3742 break;
3743 case 1:
3744 gen_helper_restored(cpu_env);
3745 break;
3746 case 2: /* UA2005 allclean */
3747 case 3: /* UA2005 otherw */
3748 case 4: /* UA2005 normalw */
3749 case 5: /* UA2005 invalw */
3750 // XXX
3751 default:
3752 goto illegal_insn;
3754 #else
3755 cpu_tmp0 = get_temp_tl(dc);
3756 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3757 gen_helper_wrpsr(cpu_env, cpu_tmp0);
3758 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3759 dc->cc_op = CC_OP_FLAGS;
3760 save_state(dc);
3761 gen_op_next_insn();
3762 tcg_gen_exit_tb(0);
3763 dc->is_br = 1;
3764 #endif
3766 break;
3767 case 0x32: /* wrwim, V9 wrpr */
3769 if (!supervisor(dc))
3770 goto priv_insn;
3771 cpu_tmp0 = get_temp_tl(dc);
3772 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3773 #ifdef TARGET_SPARC64
3774 switch (rd) {
3775 case 0: // tpc
3777 TCGv_ptr r_tsptr;
3779 r_tsptr = tcg_temp_new_ptr();
3780 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3781 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3782 offsetof(trap_state, tpc));
3783 tcg_temp_free_ptr(r_tsptr);
3785 break;
3786 case 1: // tnpc
3788 TCGv_ptr r_tsptr;
3790 r_tsptr = tcg_temp_new_ptr();
3791 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3792 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3793 offsetof(trap_state, tnpc));
3794 tcg_temp_free_ptr(r_tsptr);
3796 break;
3797 case 2: // tstate
3799 TCGv_ptr r_tsptr;
3801 r_tsptr = tcg_temp_new_ptr();
3802 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3803 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3804 offsetof(trap_state,
3805 tstate));
3806 tcg_temp_free_ptr(r_tsptr);
3808 break;
3809 case 3: // tt
3811 TCGv_ptr r_tsptr;
3813 r_tsptr = tcg_temp_new_ptr();
3814 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3815 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
3816 offsetof(trap_state, tt));
3817 tcg_temp_free_ptr(r_tsptr);
3819 break;
3820 case 4: // tick
3822 TCGv_ptr r_tickptr;
3824 r_tickptr = tcg_temp_new_ptr();
3825 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3826 offsetof(CPUSPARCState, tick));
3827 gen_helper_tick_set_count(r_tickptr,
3828 cpu_tmp0);
3829 tcg_temp_free_ptr(r_tickptr);
3831 break;
3832 case 5: // tba
3833 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3834 break;
3835 case 6: // pstate
3836 save_state(dc);
3837 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3838 dc->npc = DYNAMIC_PC;
3839 break;
3840 case 7: // tl
3841 save_state(dc);
3842 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3843 offsetof(CPUSPARCState, tl));
3844 dc->npc = DYNAMIC_PC;
3845 break;
3846 case 8: // pil
3847 gen_helper_wrpil(cpu_env, cpu_tmp0);
3848 break;
3849 case 9: // cwp
3850 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3851 break;
3852 case 10: // cansave
3853 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3854 offsetof(CPUSPARCState,
3855 cansave));
3856 break;
3857 case 11: // canrestore
3858 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3859 offsetof(CPUSPARCState,
3860 canrestore));
3861 break;
3862 case 12: // cleanwin
3863 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3864 offsetof(CPUSPARCState,
3865 cleanwin));
3866 break;
3867 case 13: // otherwin
3868 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3869 offsetof(CPUSPARCState,
3870 otherwin));
3871 break;
3872 case 14: // wstate
3873 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3874 offsetof(CPUSPARCState,
3875 wstate));
3876 break;
3877 case 16: // UA2005 gl
3878 CHECK_IU_FEATURE(dc, GL);
3879 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3880 offsetof(CPUSPARCState, gl));
3881 break;
3882 case 26: // UA2005 strand status
3883 CHECK_IU_FEATURE(dc, HYPV);
3884 if (!hypervisor(dc))
3885 goto priv_insn;
3886 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3887 break;
3888 default:
3889 goto illegal_insn;
3891 #else
3892 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
3893 if (dc->def->nwindows != 32) {
3894 tcg_gen_andi_tl(cpu_wim, cpu_wim,
3895 (1 << dc->def->nwindows) - 1);
3897 #endif
3899 break;
3900 case 0x33: /* wrtbr, UA2005 wrhpr */
3902 #ifndef TARGET_SPARC64
3903 if (!supervisor(dc))
3904 goto priv_insn;
3905 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3906 #else
3907 CHECK_IU_FEATURE(dc, HYPV);
3908 if (!hypervisor(dc))
3909 goto priv_insn;
3910 cpu_tmp0 = get_temp_tl(dc);
3911 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3912 switch (rd) {
3913 case 0: // hpstate
3914 // XXX gen_op_wrhpstate();
3915 save_state(dc);
3916 gen_op_next_insn();
3917 tcg_gen_exit_tb(0);
3918 dc->is_br = 1;
3919 break;
3920 case 1: // htstate
3921 // XXX gen_op_wrhtstate();
3922 break;
3923 case 3: // hintp
3924 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3925 break;
3926 case 5: // htba
3927 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3928 break;
3929 case 31: // hstick_cmpr
3931 TCGv_ptr r_tickptr;
3933 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3934 r_tickptr = tcg_temp_new_ptr();
3935 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3936 offsetof(CPUSPARCState, hstick));
3937 gen_helper_tick_set_limit(r_tickptr,
3938 cpu_hstick_cmpr);
3939 tcg_temp_free_ptr(r_tickptr);
3941 break;
3942 case 6: // hver readonly
3943 default:
3944 goto illegal_insn;
3946 #endif
3948 break;
3949 #endif
3950 #ifdef TARGET_SPARC64
3951 case 0x2c: /* V9 movcc */
3953 int cc = GET_FIELD_SP(insn, 11, 12);
3954 int cond = GET_FIELD_SP(insn, 14, 17);
3955 DisasCompare cmp;
3956 TCGv dst;
3958 if (insn & (1 << 18)) {
3959 if (cc == 0) {
3960 gen_compare(&cmp, 0, cond, dc);
3961 } else if (cc == 2) {
3962 gen_compare(&cmp, 1, cond, dc);
3963 } else {
3964 goto illegal_insn;
3966 } else {
3967 gen_fcompare(&cmp, cc, cond);
3970 /* The get_src2 above loaded the normal 13-bit
3971 immediate field, not the 11-bit field we have
3972 in movcc. But it did handle the reg case. */
3973 if (IS_IMM) {
3974 simm = GET_FIELD_SPs(insn, 0, 10);
3975 tcg_gen_movi_tl(cpu_src2, simm);
3978 dst = gen_load_gpr(dc, rd);
3979 tcg_gen_movcond_tl(cmp.cond, dst,
3980 cmp.c1, cmp.c2,
3981 cpu_src2, dst);
3982 free_compare(&cmp);
3983 gen_store_gpr(dc, rd, dst);
3984 break;
3986 case 0x2d: /* V9 sdivx */
3987 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3988 gen_store_gpr(dc, rd, cpu_dst);
3989 break;
3990 case 0x2e: /* V9 popc */
3991 gen_helper_popc(cpu_dst, cpu_src2);
3992 gen_store_gpr(dc, rd, cpu_dst);
3993 break;
3994 case 0x2f: /* V9 movr */
3996 int cond = GET_FIELD_SP(insn, 10, 12);
3997 DisasCompare cmp;
3998 TCGv dst;
4000 gen_compare_reg(&cmp, cond, cpu_src1);
4002 /* The get_src2 above loaded the normal 13-bit
4003 immediate field, not the 10-bit field we have
4004 in movr. But it did handle the reg case. */
4005 if (IS_IMM) {
4006 simm = GET_FIELD_SPs(insn, 0, 9);
4007 tcg_gen_movi_tl(cpu_src2, simm);
4010 dst = gen_load_gpr(dc, rd);
4011 tcg_gen_movcond_tl(cmp.cond, dst,
4012 cmp.c1, cmp.c2,
4013 cpu_src2, dst);
4014 free_compare(&cmp);
4015 gen_store_gpr(dc, rd, dst);
4016 break;
4018 #endif
4019 default:
4020 goto illegal_insn;
4023 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4024 #ifdef TARGET_SPARC64
4025 int opf = GET_FIELD_SP(insn, 5, 13);
4026 rs1 = GET_FIELD(insn, 13, 17);
4027 rs2 = GET_FIELD(insn, 27, 31);
4028 if (gen_trap_ifnofpu(dc)) {
4029 goto jmp_insn;
4032 switch (opf) {
4033 case 0x000: /* VIS I edge8cc */
4034 CHECK_FPU_FEATURE(dc, VIS1);
4035 cpu_src1 = gen_load_gpr(dc, rs1);
4036 cpu_src2 = gen_load_gpr(dc, rs2);
4037 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4038 gen_store_gpr(dc, rd, cpu_dst);
4039 break;
4040 case 0x001: /* VIS II edge8n */
4041 CHECK_FPU_FEATURE(dc, VIS2);
4042 cpu_src1 = gen_load_gpr(dc, rs1);
4043 cpu_src2 = gen_load_gpr(dc, rs2);
4044 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4045 gen_store_gpr(dc, rd, cpu_dst);
4046 break;
4047 case 0x002: /* VIS I edge8lcc */
4048 CHECK_FPU_FEATURE(dc, VIS1);
4049 cpu_src1 = gen_load_gpr(dc, rs1);
4050 cpu_src2 = gen_load_gpr(dc, rs2);
4051 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4052 gen_store_gpr(dc, rd, cpu_dst);
4053 break;
4054 case 0x003: /* VIS II edge8ln */
4055 CHECK_FPU_FEATURE(dc, VIS2);
4056 cpu_src1 = gen_load_gpr(dc, rs1);
4057 cpu_src2 = gen_load_gpr(dc, rs2);
4058 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4059 gen_store_gpr(dc, rd, cpu_dst);
4060 break;
4061 case 0x004: /* VIS I edge16cc */
4062 CHECK_FPU_FEATURE(dc, VIS1);
4063 cpu_src1 = gen_load_gpr(dc, rs1);
4064 cpu_src2 = gen_load_gpr(dc, rs2);
4065 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4066 gen_store_gpr(dc, rd, cpu_dst);
4067 break;
4068 case 0x005: /* VIS II edge16n */
4069 CHECK_FPU_FEATURE(dc, VIS2);
4070 cpu_src1 = gen_load_gpr(dc, rs1);
4071 cpu_src2 = gen_load_gpr(dc, rs2);
4072 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4073 gen_store_gpr(dc, rd, cpu_dst);
4074 break;
4075 case 0x006: /* VIS I edge16lcc */
4076 CHECK_FPU_FEATURE(dc, VIS1);
4077 cpu_src1 = gen_load_gpr(dc, rs1);
4078 cpu_src2 = gen_load_gpr(dc, rs2);
4079 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4080 gen_store_gpr(dc, rd, cpu_dst);
4081 break;
4082 case 0x007: /* VIS II edge16ln */
4083 CHECK_FPU_FEATURE(dc, VIS2);
4084 cpu_src1 = gen_load_gpr(dc, rs1);
4085 cpu_src2 = gen_load_gpr(dc, rs2);
4086 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4087 gen_store_gpr(dc, rd, cpu_dst);
4088 break;
4089 case 0x008: /* VIS I edge32cc */
4090 CHECK_FPU_FEATURE(dc, VIS1);
4091 cpu_src1 = gen_load_gpr(dc, rs1);
4092 cpu_src2 = gen_load_gpr(dc, rs2);
4093 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4094 gen_store_gpr(dc, rd, cpu_dst);
4095 break;
4096 case 0x009: /* VIS II edge32n */
4097 CHECK_FPU_FEATURE(dc, VIS2);
4098 cpu_src1 = gen_load_gpr(dc, rs1);
4099 cpu_src2 = gen_load_gpr(dc, rs2);
4100 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4101 gen_store_gpr(dc, rd, cpu_dst);
4102 break;
4103 case 0x00a: /* VIS I edge32lcc */
4104 CHECK_FPU_FEATURE(dc, VIS1);
4105 cpu_src1 = gen_load_gpr(dc, rs1);
4106 cpu_src2 = gen_load_gpr(dc, rs2);
4107 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4108 gen_store_gpr(dc, rd, cpu_dst);
4109 break;
4110 case 0x00b: /* VIS II edge32ln */
4111 CHECK_FPU_FEATURE(dc, VIS2);
4112 cpu_src1 = gen_load_gpr(dc, rs1);
4113 cpu_src2 = gen_load_gpr(dc, rs2);
4114 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4115 gen_store_gpr(dc, rd, cpu_dst);
4116 break;
4117 case 0x010: /* VIS I array8 */
4118 CHECK_FPU_FEATURE(dc, VIS1);
4119 cpu_src1 = gen_load_gpr(dc, rs1);
4120 cpu_src2 = gen_load_gpr(dc, rs2);
4121 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4122 gen_store_gpr(dc, rd, cpu_dst);
4123 break;
4124 case 0x012: /* VIS I array16 */
4125 CHECK_FPU_FEATURE(dc, VIS1);
4126 cpu_src1 = gen_load_gpr(dc, rs1);
4127 cpu_src2 = gen_load_gpr(dc, rs2);
4128 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4129 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4130 gen_store_gpr(dc, rd, cpu_dst);
4131 break;
4132 case 0x014: /* VIS I array32 */
4133 CHECK_FPU_FEATURE(dc, VIS1);
4134 cpu_src1 = gen_load_gpr(dc, rs1);
4135 cpu_src2 = gen_load_gpr(dc, rs2);
4136 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4137 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4138 gen_store_gpr(dc, rd, cpu_dst);
4139 break;
4140 case 0x018: /* VIS I alignaddr */
4141 CHECK_FPU_FEATURE(dc, VIS1);
4142 cpu_src1 = gen_load_gpr(dc, rs1);
4143 cpu_src2 = gen_load_gpr(dc, rs2);
4144 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4145 gen_store_gpr(dc, rd, cpu_dst);
4146 break;
4147 case 0x01a: /* VIS I alignaddrl */
4148 CHECK_FPU_FEATURE(dc, VIS1);
4149 cpu_src1 = gen_load_gpr(dc, rs1);
4150 cpu_src2 = gen_load_gpr(dc, rs2);
4151 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4152 gen_store_gpr(dc, rd, cpu_dst);
4153 break;
4154 case 0x019: /* VIS II bmask */
4155 CHECK_FPU_FEATURE(dc, VIS2);
4156 cpu_src1 = gen_load_gpr(dc, rs1);
4157 cpu_src2 = gen_load_gpr(dc, rs2);
4158 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4159 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4160 gen_store_gpr(dc, rd, cpu_dst);
4161 break;
4162 case 0x020: /* VIS I fcmple16 */
4163 CHECK_FPU_FEATURE(dc, VIS1);
4164 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4165 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4166 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4167 gen_store_gpr(dc, rd, cpu_dst);
4168 break;
4169 case 0x022: /* VIS I fcmpne16 */
4170 CHECK_FPU_FEATURE(dc, VIS1);
4171 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4172 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4173 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4174 gen_store_gpr(dc, rd, cpu_dst);
4175 break;
4176 case 0x024: /* VIS I fcmple32 */
4177 CHECK_FPU_FEATURE(dc, VIS1);
4178 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4179 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4180 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4181 gen_store_gpr(dc, rd, cpu_dst);
4182 break;
4183 case 0x026: /* VIS I fcmpne32 */
4184 CHECK_FPU_FEATURE(dc, VIS1);
4185 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4186 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4187 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4188 gen_store_gpr(dc, rd, cpu_dst);
4189 break;
4190 case 0x028: /* VIS I fcmpgt16 */
4191 CHECK_FPU_FEATURE(dc, VIS1);
4192 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4193 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4194 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4195 gen_store_gpr(dc, rd, cpu_dst);
4196 break;
4197 case 0x02a: /* VIS I fcmpeq16 */
4198 CHECK_FPU_FEATURE(dc, VIS1);
4199 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4200 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4201 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4202 gen_store_gpr(dc, rd, cpu_dst);
4203 break;
4204 case 0x02c: /* VIS I fcmpgt32 */
4205 CHECK_FPU_FEATURE(dc, VIS1);
4206 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4207 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4208 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4209 gen_store_gpr(dc, rd, cpu_dst);
4210 break;
4211 case 0x02e: /* VIS I fcmpeq32 */
4212 CHECK_FPU_FEATURE(dc, VIS1);
4213 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4214 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4215 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4216 gen_store_gpr(dc, rd, cpu_dst);
4217 break;
4218 case 0x031: /* VIS I fmul8x16 */
4219 CHECK_FPU_FEATURE(dc, VIS1);
4220 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4221 break;
4222 case 0x033: /* VIS I fmul8x16au */
4223 CHECK_FPU_FEATURE(dc, VIS1);
4224 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4225 break;
4226 case 0x035: /* VIS I fmul8x16al */
4227 CHECK_FPU_FEATURE(dc, VIS1);
4228 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4229 break;
4230 case 0x036: /* VIS I fmul8sux16 */
4231 CHECK_FPU_FEATURE(dc, VIS1);
4232 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4233 break;
4234 case 0x037: /* VIS I fmul8ulx16 */
4235 CHECK_FPU_FEATURE(dc, VIS1);
4236 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4237 break;
4238 case 0x038: /* VIS I fmuld8sux16 */
4239 CHECK_FPU_FEATURE(dc, VIS1);
4240 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4241 break;
4242 case 0x039: /* VIS I fmuld8ulx16 */
4243 CHECK_FPU_FEATURE(dc, VIS1);
4244 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4245 break;
4246 case 0x03a: /* VIS I fpack32 */
4247 CHECK_FPU_FEATURE(dc, VIS1);
4248 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4249 break;
4250 case 0x03b: /* VIS I fpack16 */
4251 CHECK_FPU_FEATURE(dc, VIS1);
4252 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4253 cpu_dst_32 = gen_dest_fpr_F(dc);
4254 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4255 gen_store_fpr_F(dc, rd, cpu_dst_32);
4256 break;
4257 case 0x03d: /* VIS I fpackfix */
4258 CHECK_FPU_FEATURE(dc, VIS1);
4259 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4260 cpu_dst_32 = gen_dest_fpr_F(dc);
4261 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4262 gen_store_fpr_F(dc, rd, cpu_dst_32);
4263 break;
4264 case 0x03e: /* VIS I pdist */
4265 CHECK_FPU_FEATURE(dc, VIS1);
4266 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4267 break;
4268 case 0x048: /* VIS I faligndata */
4269 CHECK_FPU_FEATURE(dc, VIS1);
4270 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4271 break;
4272 case 0x04b: /* VIS I fpmerge */
4273 CHECK_FPU_FEATURE(dc, VIS1);
4274 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4275 break;
4276 case 0x04c: /* VIS II bshuffle */
4277 CHECK_FPU_FEATURE(dc, VIS2);
4278 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4279 break;
4280 case 0x04d: /* VIS I fexpand */
4281 CHECK_FPU_FEATURE(dc, VIS1);
4282 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4283 break;
4284 case 0x050: /* VIS I fpadd16 */
4285 CHECK_FPU_FEATURE(dc, VIS1);
4286 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4287 break;
4288 case 0x051: /* VIS I fpadd16s */
4289 CHECK_FPU_FEATURE(dc, VIS1);
4290 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4291 break;
4292 case 0x052: /* VIS I fpadd32 */
4293 CHECK_FPU_FEATURE(dc, VIS1);
4294 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4295 break;
4296 case 0x053: /* VIS I fpadd32s */
4297 CHECK_FPU_FEATURE(dc, VIS1);
4298 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4299 break;
4300 case 0x054: /* VIS I fpsub16 */
4301 CHECK_FPU_FEATURE(dc, VIS1);
4302 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4303 break;
4304 case 0x055: /* VIS I fpsub16s */
4305 CHECK_FPU_FEATURE(dc, VIS1);
4306 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4307 break;
4308 case 0x056: /* VIS I fpsub32 */
4309 CHECK_FPU_FEATURE(dc, VIS1);
4310 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4311 break;
4312 case 0x057: /* VIS I fpsub32s */
4313 CHECK_FPU_FEATURE(dc, VIS1);
4314 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4315 break;
4316 case 0x060: /* VIS I fzero */
4317 CHECK_FPU_FEATURE(dc, VIS1);
4318 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4319 tcg_gen_movi_i64(cpu_dst_64, 0);
4320 gen_store_fpr_D(dc, rd, cpu_dst_64);
4321 break;
4322 case 0x061: /* VIS I fzeros */
4323 CHECK_FPU_FEATURE(dc, VIS1);
4324 cpu_dst_32 = gen_dest_fpr_F(dc);
4325 tcg_gen_movi_i32(cpu_dst_32, 0);
4326 gen_store_fpr_F(dc, rd, cpu_dst_32);
4327 break;
4328 case 0x062: /* VIS I fnor */
4329 CHECK_FPU_FEATURE(dc, VIS1);
4330 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4331 break;
4332 case 0x063: /* VIS I fnors */
4333 CHECK_FPU_FEATURE(dc, VIS1);
4334 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4335 break;
4336 case 0x064: /* VIS I fandnot2 */
4337 CHECK_FPU_FEATURE(dc, VIS1);
4338 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4339 break;
4340 case 0x065: /* VIS I fandnot2s */
4341 CHECK_FPU_FEATURE(dc, VIS1);
4342 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4343 break;
4344 case 0x066: /* VIS I fnot2 */
4345 CHECK_FPU_FEATURE(dc, VIS1);
4346 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4347 break;
4348 case 0x067: /* VIS I fnot2s */
4349 CHECK_FPU_FEATURE(dc, VIS1);
4350 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4351 break;
4352 case 0x068: /* VIS I fandnot1 */
4353 CHECK_FPU_FEATURE(dc, VIS1);
4354 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4355 break;
4356 case 0x069: /* VIS I fandnot1s */
4357 CHECK_FPU_FEATURE(dc, VIS1);
4358 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4359 break;
4360 case 0x06a: /* VIS I fnot1 */
4361 CHECK_FPU_FEATURE(dc, VIS1);
4362 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4363 break;
4364 case 0x06b: /* VIS I fnot1s */
4365 CHECK_FPU_FEATURE(dc, VIS1);
4366 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4367 break;
4368 case 0x06c: /* VIS I fxor */
4369 CHECK_FPU_FEATURE(dc, VIS1);
4370 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4371 break;
4372 case 0x06d: /* VIS I fxors */
4373 CHECK_FPU_FEATURE(dc, VIS1);
4374 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4375 break;
4376 case 0x06e: /* VIS I fnand */
4377 CHECK_FPU_FEATURE(dc, VIS1);
4378 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4379 break;
4380 case 0x06f: /* VIS I fnands */
4381 CHECK_FPU_FEATURE(dc, VIS1);
4382 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4383 break;
4384 case 0x070: /* VIS I fand */
4385 CHECK_FPU_FEATURE(dc, VIS1);
4386 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4387 break;
4388 case 0x071: /* VIS I fands */
4389 CHECK_FPU_FEATURE(dc, VIS1);
4390 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4391 break;
4392 case 0x072: /* VIS I fxnor */
4393 CHECK_FPU_FEATURE(dc, VIS1);
4394 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4395 break;
4396 case 0x073: /* VIS I fxnors */
4397 CHECK_FPU_FEATURE(dc, VIS1);
4398 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4399 break;
4400 case 0x074: /* VIS I fsrc1 */
4401 CHECK_FPU_FEATURE(dc, VIS1);
4402 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4403 gen_store_fpr_D(dc, rd, cpu_src1_64);
4404 break;
4405 case 0x075: /* VIS I fsrc1s */
4406 CHECK_FPU_FEATURE(dc, VIS1);
4407 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4408 gen_store_fpr_F(dc, rd, cpu_src1_32);
4409 break;
4410 case 0x076: /* VIS I fornot2 */
4411 CHECK_FPU_FEATURE(dc, VIS1);
4412 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4413 break;
4414 case 0x077: /* VIS I fornot2s */
4415 CHECK_FPU_FEATURE(dc, VIS1);
4416 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4417 break;
4418 case 0x078: /* VIS I fsrc2 */
4419 CHECK_FPU_FEATURE(dc, VIS1);
4420 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4421 gen_store_fpr_D(dc, rd, cpu_src1_64);
4422 break;
4423 case 0x079: /* VIS I fsrc2s */
4424 CHECK_FPU_FEATURE(dc, VIS1);
4425 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4426 gen_store_fpr_F(dc, rd, cpu_src1_32);
4427 break;
4428 case 0x07a: /* VIS I fornot1 */
4429 CHECK_FPU_FEATURE(dc, VIS1);
4430 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4431 break;
4432 case 0x07b: /* VIS I fornot1s */
4433 CHECK_FPU_FEATURE(dc, VIS1);
4434 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4435 break;
4436 case 0x07c: /* VIS I for */
4437 CHECK_FPU_FEATURE(dc, VIS1);
4438 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4439 break;
4440 case 0x07d: /* VIS I fors */
4441 CHECK_FPU_FEATURE(dc, VIS1);
4442 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4443 break;
4444 case 0x07e: /* VIS I fone */
4445 CHECK_FPU_FEATURE(dc, VIS1);
4446 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4447 tcg_gen_movi_i64(cpu_dst_64, -1);
4448 gen_store_fpr_D(dc, rd, cpu_dst_64);
4449 break;
4450 case 0x07f: /* VIS I fones */
4451 CHECK_FPU_FEATURE(dc, VIS1);
4452 cpu_dst_32 = gen_dest_fpr_F(dc);
4453 tcg_gen_movi_i32(cpu_dst_32, -1);
4454 gen_store_fpr_F(dc, rd, cpu_dst_32);
4455 break;
4456 case 0x080: /* VIS I shutdown */
4457 case 0x081: /* VIS II siam */
4458 // XXX
4459 goto illegal_insn;
4460 default:
4461 goto illegal_insn;
4463 #else
4464 goto ncp_insn;
4465 #endif
4466 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4467 #ifdef TARGET_SPARC64
4468 goto illegal_insn;
4469 #else
4470 goto ncp_insn;
4471 #endif
4472 #ifdef TARGET_SPARC64
4473 } else if (xop == 0x39) { /* V9 return */
4474 TCGv_i32 r_const;
4476 save_state(dc);
4477 cpu_src1 = get_src1(dc, insn);
4478 cpu_tmp0 = get_temp_tl(dc);
4479 if (IS_IMM) { /* immediate */
4480 simm = GET_FIELDs(insn, 19, 31);
4481 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4482 } else { /* register */
4483 rs2 = GET_FIELD(insn, 27, 31);
4484 if (rs2) {
4485 cpu_src2 = gen_load_gpr(dc, rs2);
4486 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4487 } else {
4488 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4491 gen_helper_restore(cpu_env);
4492 gen_mov_pc_npc(dc);
4493 r_const = tcg_const_i32(3);
4494 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4495 tcg_temp_free_i32(r_const);
4496 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4497 dc->npc = DYNAMIC_PC;
4498 goto jmp_insn;
4499 #endif
4500 } else {
4501 cpu_src1 = get_src1(dc, insn);
4502 cpu_tmp0 = get_temp_tl(dc);
4503 if (IS_IMM) { /* immediate */
4504 simm = GET_FIELDs(insn, 19, 31);
4505 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4506 } else { /* register */
4507 rs2 = GET_FIELD(insn, 27, 31);
4508 if (rs2) {
4509 cpu_src2 = gen_load_gpr(dc, rs2);
4510 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4511 } else {
4512 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4515 switch (xop) {
4516 case 0x38: /* jmpl */
4518 TCGv t;
4519 TCGv_i32 r_const;
4521 t = gen_dest_gpr(dc, rd);
4522 tcg_gen_movi_tl(t, dc->pc);
4523 gen_store_gpr(dc, rd, t);
4524 gen_mov_pc_npc(dc);
4525 r_const = tcg_const_i32(3);
4526 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4527 tcg_temp_free_i32(r_const);
4528 gen_address_mask(dc, cpu_tmp0);
4529 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4530 dc->npc = DYNAMIC_PC;
4532 goto jmp_insn;
4533 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4534 case 0x39: /* rett, V9 return */
4536 TCGv_i32 r_const;
4538 if (!supervisor(dc))
4539 goto priv_insn;
4540 gen_mov_pc_npc(dc);
4541 r_const = tcg_const_i32(3);
4542 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4543 tcg_temp_free_i32(r_const);
4544 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4545 dc->npc = DYNAMIC_PC;
4546 gen_helper_rett(cpu_env);
4548 goto jmp_insn;
4549 #endif
4550 case 0x3b: /* flush */
4551 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4552 goto unimp_flush;
4553 /* nop */
4554 break;
4555 case 0x3c: /* save */
4556 save_state(dc);
4557 gen_helper_save(cpu_env);
4558 gen_store_gpr(dc, rd, cpu_tmp0);
4559 break;
4560 case 0x3d: /* restore */
4561 save_state(dc);
4562 gen_helper_restore(cpu_env);
4563 gen_store_gpr(dc, rd, cpu_tmp0);
4564 break;
4565 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4566 case 0x3e: /* V9 done/retry */
4568 switch (rd) {
4569 case 0:
4570 if (!supervisor(dc))
4571 goto priv_insn;
4572 dc->npc = DYNAMIC_PC;
4573 dc->pc = DYNAMIC_PC;
4574 gen_helper_done(cpu_env);
4575 goto jmp_insn;
4576 case 1:
4577 if (!supervisor(dc))
4578 goto priv_insn;
4579 dc->npc = DYNAMIC_PC;
4580 dc->pc = DYNAMIC_PC;
4581 gen_helper_retry(cpu_env);
4582 goto jmp_insn;
4583 default:
4584 goto illegal_insn;
4587 break;
4588 #endif
4589 default:
4590 goto illegal_insn;
4593 break;
4595 break;
4596 case 3: /* load/store instructions */
4598 unsigned int xop = GET_FIELD(insn, 7, 12);
4599 /* ??? gen_address_mask prevents us from using a source
4600 register directly. Always generate a temporary. */
4601 TCGv cpu_addr = get_temp_tl(dc);
4603 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
4604 if (xop == 0x3c || xop == 0x3e) {
4605 /* V9 casa/casxa : no offset */
4606 } else if (IS_IMM) { /* immediate */
4607 simm = GET_FIELDs(insn, 19, 31);
4608 if (simm != 0) {
4609 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
4611 } else { /* register */
4612 rs2 = GET_FIELD(insn, 27, 31);
4613 if (rs2 != 0) {
4614 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4617 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4618 (xop > 0x17 && xop <= 0x1d ) ||
4619 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4620 TCGv cpu_val = gen_dest_gpr(dc, rd);
4622 switch (xop) {
4623 case 0x0: /* ld, V9 lduw, load unsigned word */
4624 gen_address_mask(dc, cpu_addr);
4625 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4626 break;
4627 case 0x1: /* ldub, load unsigned byte */
4628 gen_address_mask(dc, cpu_addr);
4629 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4630 break;
4631 case 0x2: /* lduh, load unsigned halfword */
4632 gen_address_mask(dc, cpu_addr);
4633 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4634 break;
4635 case 0x3: /* ldd, load double word */
4636 if (rd & 1)
4637 goto illegal_insn;
4638 else {
4639 TCGv_i32 r_const;
4640 TCGv_i64 t64;
4642 save_state(dc);
4643 r_const = tcg_const_i32(7);
4644 /* XXX remove alignment check */
4645 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4646 tcg_temp_free_i32(r_const);
4647 gen_address_mask(dc, cpu_addr);
4648 t64 = tcg_temp_new_i64();
4649 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4650 tcg_gen_trunc_i64_tl(cpu_val, t64);
4651 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4652 gen_store_gpr(dc, rd + 1, cpu_val);
4653 tcg_gen_shri_i64(t64, t64, 32);
4654 tcg_gen_trunc_i64_tl(cpu_val, t64);
4655 tcg_temp_free_i64(t64);
4656 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4658 break;
4659 case 0x9: /* ldsb, load signed byte */
4660 gen_address_mask(dc, cpu_addr);
4661 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4662 break;
4663 case 0xa: /* ldsh, load signed halfword */
4664 gen_address_mask(dc, cpu_addr);
4665 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4666 break;
4667 case 0xd: /* ldstub -- XXX: should be atomically */
4669 TCGv r_const;
4671 gen_address_mask(dc, cpu_addr);
4672 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4673 r_const = tcg_const_tl(0xff);
4674 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4675 tcg_temp_free(r_const);
4677 break;
4678 case 0x0f:
4679 /* swap, swap register with memory. Also atomically */
4681 TCGv t0 = get_temp_tl(dc);
4682 CHECK_IU_FEATURE(dc, SWAP);
4683 cpu_src1 = gen_load_gpr(dc, rd);
4684 gen_address_mask(dc, cpu_addr);
4685 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4686 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4687 tcg_gen_mov_tl(cpu_val, t0);
4689 break;
4690 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4691 case 0x10: /* lda, V9 lduwa, load word alternate */
4692 #ifndef TARGET_SPARC64
4693 if (IS_IMM)
4694 goto illegal_insn;
4695 if (!supervisor(dc))
4696 goto priv_insn;
4697 #endif
4698 save_state(dc);
4699 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4700 break;
4701 case 0x11: /* lduba, load unsigned byte alternate */
4702 #ifndef TARGET_SPARC64
4703 if (IS_IMM)
4704 goto illegal_insn;
4705 if (!supervisor(dc))
4706 goto priv_insn;
4707 #endif
4708 save_state(dc);
4709 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4710 break;
4711 case 0x12: /* lduha, load unsigned halfword alternate */
4712 #ifndef TARGET_SPARC64
4713 if (IS_IMM)
4714 goto illegal_insn;
4715 if (!supervisor(dc))
4716 goto priv_insn;
4717 #endif
4718 save_state(dc);
4719 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4720 break;
4721 case 0x13: /* ldda, load double word alternate */
4722 #ifndef TARGET_SPARC64
4723 if (IS_IMM)
4724 goto illegal_insn;
4725 if (!supervisor(dc))
4726 goto priv_insn;
4727 #endif
4728 if (rd & 1)
4729 goto illegal_insn;
4730 save_state(dc);
4731 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4732 goto skip_move;
4733 case 0x19: /* ldsba, load signed byte alternate */
4734 #ifndef TARGET_SPARC64
4735 if (IS_IMM)
4736 goto illegal_insn;
4737 if (!supervisor(dc))
4738 goto priv_insn;
4739 #endif
4740 save_state(dc);
4741 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4742 break;
4743 case 0x1a: /* ldsha, load signed halfword alternate */
4744 #ifndef TARGET_SPARC64
4745 if (IS_IMM)
4746 goto illegal_insn;
4747 if (!supervisor(dc))
4748 goto priv_insn;
4749 #endif
4750 save_state(dc);
4751 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4752 break;
4753 case 0x1d: /* ldstuba -- XXX: should be atomically */
4754 #ifndef TARGET_SPARC64
4755 if (IS_IMM)
4756 goto illegal_insn;
4757 if (!supervisor(dc))
4758 goto priv_insn;
4759 #endif
4760 save_state(dc);
4761 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4762 break;
4763 case 0x1f: /* swapa, swap reg with alt. memory. Also
4764 atomically */
4765 CHECK_IU_FEATURE(dc, SWAP);
4766 #ifndef TARGET_SPARC64
4767 if (IS_IMM)
4768 goto illegal_insn;
4769 if (!supervisor(dc))
4770 goto priv_insn;
4771 #endif
4772 save_state(dc);
4773 cpu_src1 = gen_load_gpr(dc, rd);
4774 gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
4775 break;
4777 #ifndef TARGET_SPARC64
4778 case 0x30: /* ldc */
4779 case 0x31: /* ldcsr */
4780 case 0x33: /* lddc */
4781 goto ncp_insn;
4782 #endif
4783 #endif
4784 #ifdef TARGET_SPARC64
4785 case 0x08: /* V9 ldsw */
4786 gen_address_mask(dc, cpu_addr);
4787 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4788 break;
4789 case 0x0b: /* V9 ldx */
4790 gen_address_mask(dc, cpu_addr);
4791 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4792 break;
4793 case 0x18: /* V9 ldswa */
4794 save_state(dc);
4795 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4796 break;
4797 case 0x1b: /* V9 ldxa */
4798 save_state(dc);
4799 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4800 break;
4801 case 0x2d: /* V9 prefetch, no effect */
4802 goto skip_move;
4803 case 0x30: /* V9 ldfa */
4804 if (gen_trap_ifnofpu(dc)) {
4805 goto jmp_insn;
4807 save_state(dc);
4808 gen_ldf_asi(cpu_addr, insn, 4, rd);
4809 gen_update_fprs_dirty(rd);
4810 goto skip_move;
4811 case 0x33: /* V9 lddfa */
4812 if (gen_trap_ifnofpu(dc)) {
4813 goto jmp_insn;
4815 save_state(dc);
4816 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4817 gen_update_fprs_dirty(DFPREG(rd));
4818 goto skip_move;
4819 case 0x3d: /* V9 prefetcha, no effect */
4820 goto skip_move;
4821 case 0x32: /* V9 ldqfa */
4822 CHECK_FPU_FEATURE(dc, FLOAT128);
4823 if (gen_trap_ifnofpu(dc)) {
4824 goto jmp_insn;
4826 save_state(dc);
4827 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4828 gen_update_fprs_dirty(QFPREG(rd));
4829 goto skip_move;
4830 #endif
4831 default:
4832 goto illegal_insn;
4834 gen_store_gpr(dc, rd, cpu_val);
4835 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4836 skip_move: ;
4837 #endif
4838 } else if (xop >= 0x20 && xop < 0x24) {
4839 TCGv t0;
4841 if (gen_trap_ifnofpu(dc)) {
4842 goto jmp_insn;
4844 save_state(dc);
4845 switch (xop) {
4846 case 0x20: /* ldf, load fpreg */
4847 gen_address_mask(dc, cpu_addr);
4848 t0 = get_temp_tl(dc);
4849 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4850 cpu_dst_32 = gen_dest_fpr_F(dc);
4851 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4852 gen_store_fpr_F(dc, rd, cpu_dst_32);
4853 break;
4854 case 0x21: /* ldfsr, V9 ldxfsr */
4855 #ifdef TARGET_SPARC64
4856 gen_address_mask(dc, cpu_addr);
4857 if (rd == 1) {
4858 TCGv_i64 t64 = tcg_temp_new_i64();
4859 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4860 gen_helper_ldxfsr(cpu_env, t64);
4861 tcg_temp_free_i64(t64);
4862 break;
4864 #endif
4865 cpu_dst_32 = get_temp_i32(dc);
4866 t0 = get_temp_tl(dc);
4867 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4868 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4869 gen_helper_ldfsr(cpu_env, cpu_dst_32);
4870 break;
4871 case 0x22: /* ldqf, load quad fpreg */
4873 TCGv_i32 r_const;
4875 CHECK_FPU_FEATURE(dc, FLOAT128);
4876 r_const = tcg_const_i32(dc->mem_idx);
4877 gen_address_mask(dc, cpu_addr);
4878 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4879 tcg_temp_free_i32(r_const);
4880 gen_op_store_QT0_fpr(QFPREG(rd));
4881 gen_update_fprs_dirty(QFPREG(rd));
4883 break;
4884 case 0x23: /* lddf, load double fpreg */
4885 gen_address_mask(dc, cpu_addr);
4886 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4887 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4888 gen_store_fpr_D(dc, rd, cpu_dst_64);
4889 break;
4890 default:
4891 goto illegal_insn;
4893 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4894 xop == 0xe || xop == 0x1e) {
4895 TCGv cpu_val = gen_load_gpr(dc, rd);
4897 switch (xop) {
4898 case 0x4: /* st, store word */
4899 gen_address_mask(dc, cpu_addr);
4900 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4901 break;
4902 case 0x5: /* stb, store byte */
4903 gen_address_mask(dc, cpu_addr);
4904 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4905 break;
4906 case 0x6: /* sth, store halfword */
4907 gen_address_mask(dc, cpu_addr);
4908 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4909 break;
4910 case 0x7: /* std, store double word */
4911 if (rd & 1)
4912 goto illegal_insn;
4913 else {
4914 TCGv_i32 r_const;
4915 TCGv_i64 t64;
4916 TCGv lo;
4918 save_state(dc);
4919 gen_address_mask(dc, cpu_addr);
4920 r_const = tcg_const_i32(7);
4921 /* XXX remove alignment check */
4922 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4923 tcg_temp_free_i32(r_const);
4924 lo = gen_load_gpr(dc, rd + 1);
4926 t64 = tcg_temp_new_i64();
4927 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
4928 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
4929 tcg_temp_free_i64(t64);
4931 break;
4932 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4933 case 0x14: /* sta, V9 stwa, store word alternate */
4934 #ifndef TARGET_SPARC64
4935 if (IS_IMM)
4936 goto illegal_insn;
4937 if (!supervisor(dc))
4938 goto priv_insn;
4939 #endif
4940 save_state(dc);
4941 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4942 dc->npc = DYNAMIC_PC;
4943 break;
4944 case 0x15: /* stba, store byte alternate */
4945 #ifndef TARGET_SPARC64
4946 if (IS_IMM)
4947 goto illegal_insn;
4948 if (!supervisor(dc))
4949 goto priv_insn;
4950 #endif
4951 save_state(dc);
4952 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4953 dc->npc = DYNAMIC_PC;
4954 break;
4955 case 0x16: /* stha, store halfword alternate */
4956 #ifndef TARGET_SPARC64
4957 if (IS_IMM)
4958 goto illegal_insn;
4959 if (!supervisor(dc))
4960 goto priv_insn;
4961 #endif
4962 save_state(dc);
4963 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4964 dc->npc = DYNAMIC_PC;
4965 break;
4966 case 0x17: /* stda, store double word alternate */
4967 #ifndef TARGET_SPARC64
4968 if (IS_IMM)
4969 goto illegal_insn;
4970 if (!supervisor(dc))
4971 goto priv_insn;
4972 #endif
4973 if (rd & 1)
4974 goto illegal_insn;
4975 else {
4976 save_state(dc);
4977 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4979 break;
4980 #endif
4981 #ifdef TARGET_SPARC64
4982 case 0x0e: /* V9 stx */
4983 gen_address_mask(dc, cpu_addr);
4984 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4985 break;
4986 case 0x1e: /* V9 stxa */
4987 save_state(dc);
4988 gen_st_asi(cpu_val, cpu_addr, insn, 8);
4989 dc->npc = DYNAMIC_PC;
4990 break;
4991 #endif
4992 default:
4993 goto illegal_insn;
4995 } else if (xop > 0x23 && xop < 0x28) {
4996 if (gen_trap_ifnofpu(dc)) {
4997 goto jmp_insn;
4999 save_state(dc);
5000 switch (xop) {
5001 case 0x24: /* stf, store fpreg */
5003 TCGv t = get_temp_tl(dc);
5004 gen_address_mask(dc, cpu_addr);
5005 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5006 tcg_gen_ext_i32_tl(t, cpu_src1_32);
5007 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5009 break;
5010 case 0x25: /* stfsr, V9 stxfsr */
5012 TCGv t = get_temp_tl(dc);
5014 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUSPARCState, fsr));
5015 #ifdef TARGET_SPARC64
5016 gen_address_mask(dc, cpu_addr);
5017 if (rd == 1) {
5018 tcg_gen_qemu_st64(t, cpu_addr, dc->mem_idx);
5019 break;
5021 #endif
5022 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5024 break;
5025 case 0x26:
5026 #ifdef TARGET_SPARC64
5027 /* V9 stqf, store quad fpreg */
5029 TCGv_i32 r_const;
5031 CHECK_FPU_FEATURE(dc, FLOAT128);
5032 gen_op_load_fpr_QT0(QFPREG(rd));
5033 r_const = tcg_const_i32(dc->mem_idx);
5034 gen_address_mask(dc, cpu_addr);
5035 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5036 tcg_temp_free_i32(r_const);
5038 break;
5039 #else /* !TARGET_SPARC64 */
5040 /* stdfq, store floating point queue */
5041 #if defined(CONFIG_USER_ONLY)
5042 goto illegal_insn;
5043 #else
5044 if (!supervisor(dc))
5045 goto priv_insn;
5046 if (gen_trap_ifnofpu(dc)) {
5047 goto jmp_insn;
5049 goto nfq_insn;
5050 #endif
5051 #endif
5052 case 0x27: /* stdf, store double fpreg */
5053 gen_address_mask(dc, cpu_addr);
5054 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5055 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5056 break;
5057 default:
5058 goto illegal_insn;
5060 } else if (xop > 0x33 && xop < 0x3f) {
5061 save_state(dc);
5062 switch (xop) {
5063 #ifdef TARGET_SPARC64
5064 case 0x34: /* V9 stfa */
5065 if (gen_trap_ifnofpu(dc)) {
5066 goto jmp_insn;
5068 gen_stf_asi(cpu_addr, insn, 4, rd);
5069 break;
5070 case 0x36: /* V9 stqfa */
5072 TCGv_i32 r_const;
5074 CHECK_FPU_FEATURE(dc, FLOAT128);
5075 if (gen_trap_ifnofpu(dc)) {
5076 goto jmp_insn;
5078 r_const = tcg_const_i32(7);
5079 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5080 tcg_temp_free_i32(r_const);
5081 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5083 break;
5084 case 0x37: /* V9 stdfa */
5085 if (gen_trap_ifnofpu(dc)) {
5086 goto jmp_insn;
5088 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5089 break;
5090 case 0x3e: /* V9 casxa */
5091 rs2 = GET_FIELD(insn, 27, 31);
5092 cpu_src2 = gen_load_gpr(dc, rs2);
5093 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5094 break;
5095 #else
5096 case 0x34: /* stc */
5097 case 0x35: /* stcsr */
5098 case 0x36: /* stdcq */
5099 case 0x37: /* stdc */
5100 goto ncp_insn;
5101 #endif
5102 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5103 case 0x3c: /* V9 or LEON3 casa */
5104 #ifndef TARGET_SPARC64
5105 CHECK_IU_FEATURE(dc, CASA);
5106 if (IS_IMM) {
5107 goto illegal_insn;
5109 /* LEON3 allows CASA from user space with ASI 0xa */
5110 if ((GET_FIELD(insn, 19, 26) != 0xa) && !supervisor(dc)) {
5111 goto priv_insn;
5113 #endif
5114 rs2 = GET_FIELD(insn, 27, 31);
5115 cpu_src2 = gen_load_gpr(dc, rs2);
5116 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5117 break;
5118 #endif
5119 default:
5120 goto illegal_insn;
5122 } else {
5123 goto illegal_insn;
5126 break;
5128 /* default case for non jump instructions */
5129 if (dc->npc == DYNAMIC_PC) {
5130 dc->pc = DYNAMIC_PC;
5131 gen_op_next_insn();
5132 } else if (dc->npc == JUMP_PC) {
5133 /* we can do a static jump */
5134 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5135 dc->is_br = 1;
5136 } else {
5137 dc->pc = dc->npc;
5138 dc->npc = dc->npc + 4;
5140 jmp_insn:
5141 goto egress;
5142 illegal_insn:
5144 TCGv_i32 r_const;
5146 save_state(dc);
5147 r_const = tcg_const_i32(TT_ILL_INSN);
5148 gen_helper_raise_exception(cpu_env, r_const);
5149 tcg_temp_free_i32(r_const);
5150 dc->is_br = 1;
5152 goto egress;
5153 unimp_flush:
5155 TCGv_i32 r_const;
5157 save_state(dc);
5158 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5159 gen_helper_raise_exception(cpu_env, r_const);
5160 tcg_temp_free_i32(r_const);
5161 dc->is_br = 1;
5163 goto egress;
5164 #if !defined(CONFIG_USER_ONLY)
5165 priv_insn:
5167 TCGv_i32 r_const;
5169 save_state(dc);
5170 r_const = tcg_const_i32(TT_PRIV_INSN);
5171 gen_helper_raise_exception(cpu_env, r_const);
5172 tcg_temp_free_i32(r_const);
5173 dc->is_br = 1;
5175 goto egress;
5176 #endif
5177 nfpu_insn:
5178 save_state(dc);
5179 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5180 dc->is_br = 1;
5181 goto egress;
5182 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5183 nfq_insn:
5184 save_state(dc);
5185 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5186 dc->is_br = 1;
5187 goto egress;
5188 #endif
5189 #ifndef TARGET_SPARC64
5190 ncp_insn:
5192 TCGv r_const;
5194 save_state(dc);
5195 r_const = tcg_const_i32(TT_NCP_INSN);
5196 gen_helper_raise_exception(cpu_env, r_const);
5197 tcg_temp_free(r_const);
5198 dc->is_br = 1;
5200 goto egress;
5201 #endif
5202 egress:
5203 if (dc->n_t32 != 0) {
5204 int i;
5205 for (i = dc->n_t32 - 1; i >= 0; --i) {
5206 tcg_temp_free_i32(dc->t32[i]);
5208 dc->n_t32 = 0;
5210 if (dc->n_ttl != 0) {
5211 int i;
5212 for (i = dc->n_ttl - 1; i >= 0; --i) {
5213 tcg_temp_free(dc->ttl[i]);
5215 dc->n_ttl = 0;
5219 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5221 SPARCCPU *cpu = sparc_env_get_cpu(env);
5222 CPUState *cs = CPU(cpu);
5223 target_ulong pc_start, last_pc;
5224 DisasContext dc1, *dc = &dc1;
5225 int num_insns;
5226 int max_insns;
5227 unsigned int insn;
5229 memset(dc, 0, sizeof(DisasContext));
5230 dc->tb = tb;
5231 pc_start = tb->pc;
5232 dc->pc = pc_start;
5233 last_pc = dc->pc;
5234 dc->npc = (target_ulong) tb->cs_base;
5235 dc->cc_op = CC_OP_DYNAMIC;
5236 dc->mem_idx = cpu_mmu_index(env, false);
5237 dc->def = env->def;
5238 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5239 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5240 dc->singlestep = (cs->singlestep_enabled || singlestep);
5242 num_insns = 0;
5243 max_insns = tb->cflags & CF_COUNT_MASK;
5244 if (max_insns == 0) {
5245 max_insns = CF_COUNT_MASK;
5247 if (max_insns > TCG_MAX_INSNS) {
5248 max_insns = TCG_MAX_INSNS;
5251 gen_tb_start(tb);
5252 do {
5253 if (dc->npc & JUMP_PC) {
5254 assert(dc->jump_pc[1] == dc->pc + 4);
5255 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5256 } else {
5257 tcg_gen_insn_start(dc->pc, dc->npc);
5259 num_insns++;
5260 last_pc = dc->pc;
5262 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5263 if (dc->pc != pc_start) {
5264 save_state(dc);
5266 gen_helper_debug(cpu_env);
5267 tcg_gen_exit_tb(0);
5268 dc->is_br = 1;
5269 goto exit_gen_loop;
5272 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5273 gen_io_start();
5276 insn = cpu_ldl_code(env, dc->pc);
5278 disas_sparc_insn(dc, insn);
5280 if (dc->is_br)
5281 break;
5282 /* if the next PC is different, we abort now */
5283 if (dc->pc != (last_pc + 4))
5284 break;
5285 /* if we reach a page boundary, we stop generation so that the
5286 PC of a TT_TFAULT exception is always in the right page */
5287 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5288 break;
5289 /* if single step mode, we generate only one instruction and
5290 generate an exception */
5291 if (dc->singlestep) {
5292 break;
5294 } while (!tcg_op_buf_full() &&
5295 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5296 num_insns < max_insns);
5298 exit_gen_loop:
5299 if (tb->cflags & CF_LAST_IO) {
5300 gen_io_end();
5302 if (!dc->is_br) {
5303 if (dc->pc != DYNAMIC_PC &&
5304 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5305 /* static PC and NPC: we can use direct chaining */
5306 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5307 } else {
5308 if (dc->pc != DYNAMIC_PC) {
5309 tcg_gen_movi_tl(cpu_pc, dc->pc);
5311 save_npc(dc);
5312 tcg_gen_exit_tb(0);
5315 gen_tb_end(tb, num_insns);
5317 tb->size = last_pc + 4 - pc_start;
5318 tb->icount = num_insns;
5320 #ifdef DEBUG_DISAS
5321 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5322 qemu_log("--------------\n");
5323 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5324 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5325 qemu_log("\n");
5327 #endif
5330 void gen_intermediate_code_init(CPUSPARCState *env)
5332 static int inited;
5333 static const char gregnames[32][4] = {
5334 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5335 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5336 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5337 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5339 static const char fregnames[32][4] = {
5340 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5341 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5342 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5343 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5346 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5347 #ifdef TARGET_SPARC64
5348 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5349 { &cpu_asi, offsetof(CPUSPARCState, asi), "asi" },
5350 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5351 { &cpu_softint, offsetof(CPUSPARCState, softint), "softint" },
5352 #else
5353 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5354 #endif
5355 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5356 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5359 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5360 #ifdef TARGET_SPARC64
5361 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5362 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5363 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5364 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5365 "hstick_cmpr" },
5366 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5367 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5368 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5369 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5370 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5371 #endif
5372 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5373 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5374 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5375 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5376 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5377 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5378 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5379 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5380 #ifndef CONFIG_USER_ONLY
5381 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5382 #endif
5385 unsigned int i;
5387 /* init various static tables */
5388 if (inited) {
5389 return;
5391 inited = 1;
5393 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5395 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5396 offsetof(CPUSPARCState, regwptr),
5397 "regwptr");
5399 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5400 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5403 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5404 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5407 TCGV_UNUSED(cpu_regs[0]);
5408 for (i = 1; i < 8; ++i) {
5409 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5410 offsetof(CPUSPARCState, gregs[i]),
5411 gregnames[i]);
5414 for (i = 8; i < 32; ++i) {
5415 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5416 (i - 8) * sizeof(target_ulong),
5417 gregnames[i]);
5420 for (i = 0; i < TARGET_DPREGS; i++) {
5421 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5422 offsetof(CPUSPARCState, fpr[i]),
5423 fregnames[i]);
5427 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5428 target_ulong *data)
5430 target_ulong pc = data[0];
5431 target_ulong npc = data[1];
5433 env->pc = pc;
5434 if (npc == DYNAMIC_PC) {
5435 /* dynamic NPC: already stored */
5436 } else if (npc & JUMP_PC) {
5437 /* jump PC: use 'cond' and the jump targets of the translation */
5438 if (env->cond) {
5439 env->npc = npc & ~3;
5440 } else {
5441 env->npc = pc + 4;
5443 } else {
5444 env->npc = npc;