Merge remote-tracking branch 'remotes/cody/tags/block-pull-request' into staging
[qemu.git] / target-sparc / translate.c
blob7998ff57bf09598f63b2a7870f782b0d67ff9ee3
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
32 #include "exec/log.h"
35 #define DEBUG_DISAS
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
41 /* global register indexes */
42 static TCGv_env cpu_env;
43 static TCGv_ptr cpu_regwptr;
44 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
45 static TCGv_i32 cpu_cc_op;
46 static TCGv_i32 cpu_psr;
47 static TCGv cpu_fsr, cpu_pc, cpu_npc;
48 static TCGv cpu_regs[32];
49 static TCGv cpu_y;
50 #ifndef CONFIG_USER_ONLY
51 static TCGv cpu_tbr;
52 #endif
53 static TCGv cpu_cond;
54 #ifdef TARGET_SPARC64
55 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
56 static TCGv cpu_gsr;
57 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
58 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
59 static TCGv_i32 cpu_softint;
60 #else
61 static TCGv cpu_wim;
62 #endif
63 /* Floating point registers */
64 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
66 #include "exec/gen-icount.h"
68 typedef struct DisasContext {
69 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
70 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
71 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
72 int is_br;
73 int mem_idx;
74 int fpu_enabled;
75 int address_mask_32bit;
76 int singlestep;
77 uint32_t cc_op; /* current CC operation */
78 struct TranslationBlock *tb;
79 sparc_def_t *def;
80 TCGv_i32 t32[3];
81 TCGv ttl[5];
82 int n_t32;
83 int n_ttl;
84 } DisasContext;
86 typedef struct {
87 TCGCond cond;
88 bool is_bool;
89 bool g1, g2;
90 TCGv c1, c2;
91 } DisasCompare;
93 // This function uses non-native bit order
94 #define GET_FIELD(X, FROM, TO) \
95 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
97 // This function uses the order in the manuals, i.e. bit 0 is 2^0
98 #define GET_FIELD_SP(X, FROM, TO) \
99 GET_FIELD(X, 31 - (TO), 31 - (FROM))
101 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
102 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
104 #ifdef TARGET_SPARC64
105 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
106 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
107 #else
108 #define DFPREG(r) (r & 0x1e)
109 #define QFPREG(r) (r & 0x1c)
110 #endif
112 #define UA2005_HTRAP_MASK 0xff
113 #define V8_TRAP_MASK 0x7f
115 static int sign_extend(int x, int len)
117 len = 32 - len;
118 return (x << len) >> len;
121 #define IS_IMM (insn & (1<<13))
123 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
125 TCGv_i32 t;
126 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
127 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
128 return t;
131 static inline TCGv get_temp_tl(DisasContext *dc)
133 TCGv t;
134 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
135 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
136 return t;
139 static inline void gen_update_fprs_dirty(int rd)
141 #if defined(TARGET_SPARC64)
142 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
143 #endif
146 /* floating point registers moves */
147 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
149 #if TCG_TARGET_REG_BITS == 32
150 if (src & 1) {
151 return TCGV_LOW(cpu_fpr[src / 2]);
152 } else {
153 return TCGV_HIGH(cpu_fpr[src / 2]);
155 #else
156 if (src & 1) {
157 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
158 } else {
159 TCGv_i32 ret = get_temp_i32(dc);
160 TCGv_i64 t = tcg_temp_new_i64();
162 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
163 tcg_gen_extrl_i64_i32(ret, t);
164 tcg_temp_free_i64(t);
166 return ret;
168 #endif
171 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
173 #if TCG_TARGET_REG_BITS == 32
174 if (dst & 1) {
175 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
176 } else {
177 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
179 #else
180 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
181 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
182 (dst & 1 ? 0 : 32), 32);
183 #endif
184 gen_update_fprs_dirty(dst);
187 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
189 return get_temp_i32(dc);
192 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
194 src = DFPREG(src);
195 return cpu_fpr[src / 2];
198 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
200 dst = DFPREG(dst);
201 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
202 gen_update_fprs_dirty(dst);
205 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
207 return cpu_fpr[DFPREG(dst) / 2];
210 static void gen_op_load_fpr_QT0(unsigned int src)
212 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
213 offsetof(CPU_QuadU, ll.upper));
214 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
215 offsetof(CPU_QuadU, ll.lower));
218 static void gen_op_load_fpr_QT1(unsigned int src)
220 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
221 offsetof(CPU_QuadU, ll.upper));
222 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
223 offsetof(CPU_QuadU, ll.lower));
226 static void gen_op_store_QT0_fpr(unsigned int dst)
228 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
229 offsetof(CPU_QuadU, ll.upper));
230 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
231 offsetof(CPU_QuadU, ll.lower));
234 #ifdef TARGET_SPARC64
235 static void gen_move_Q(unsigned int rd, unsigned int rs)
237 rd = QFPREG(rd);
238 rs = QFPREG(rs);
240 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
241 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
242 gen_update_fprs_dirty(rd);
244 #endif
246 /* moves */
247 #ifdef CONFIG_USER_ONLY
248 #define supervisor(dc) 0
249 #ifdef TARGET_SPARC64
250 #define hypervisor(dc) 0
251 #endif
252 #else
253 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
254 #ifdef TARGET_SPARC64
255 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
256 #else
257 #endif
258 #endif
260 #ifdef TARGET_SPARC64
261 #ifndef TARGET_ABI32
262 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
263 #else
264 #define AM_CHECK(dc) (1)
265 #endif
266 #endif
268 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
270 #ifdef TARGET_SPARC64
271 if (AM_CHECK(dc))
272 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
273 #endif
276 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
278 if (reg > 0) {
279 assert(reg < 32);
280 return cpu_regs[reg];
281 } else {
282 TCGv t = get_temp_tl(dc);
283 tcg_gen_movi_tl(t, 0);
284 return t;
288 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
290 if (reg > 0) {
291 assert(reg < 32);
292 tcg_gen_mov_tl(cpu_regs[reg], v);
296 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
298 if (reg > 0) {
299 assert(reg < 32);
300 return cpu_regs[reg];
301 } else {
302 return get_temp_tl(dc);
306 static inline void gen_goto_tb(DisasContext *s, int tb_num,
307 target_ulong pc, target_ulong npc)
309 TranslationBlock *tb;
311 tb = s->tb;
312 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
313 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
314 !s->singlestep) {
315 /* jump to same page: we can use a direct jump */
316 tcg_gen_goto_tb(tb_num);
317 tcg_gen_movi_tl(cpu_pc, pc);
318 tcg_gen_movi_tl(cpu_npc, npc);
319 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
320 } else {
321 /* jump to another page: currently not optimized */
322 tcg_gen_movi_tl(cpu_pc, pc);
323 tcg_gen_movi_tl(cpu_npc, npc);
324 tcg_gen_exit_tb(0);
328 // XXX suboptimal
329 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
331 tcg_gen_extu_i32_tl(reg, src);
332 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
333 tcg_gen_andi_tl(reg, reg, 0x1);
336 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
338 tcg_gen_extu_i32_tl(reg, src);
339 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
340 tcg_gen_andi_tl(reg, reg, 0x1);
343 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
345 tcg_gen_extu_i32_tl(reg, src);
346 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
347 tcg_gen_andi_tl(reg, reg, 0x1);
350 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
352 tcg_gen_extu_i32_tl(reg, src);
353 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
354 tcg_gen_andi_tl(reg, reg, 0x1);
357 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
359 tcg_gen_mov_tl(cpu_cc_src, src1);
360 tcg_gen_mov_tl(cpu_cc_src2, src2);
361 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
362 tcg_gen_mov_tl(dst, cpu_cc_dst);
365 static TCGv_i32 gen_add32_carry32(void)
367 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
369 /* Carry is computed from a previous add: (dst < src) */
370 #if TARGET_LONG_BITS == 64
371 cc_src1_32 = tcg_temp_new_i32();
372 cc_src2_32 = tcg_temp_new_i32();
373 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
374 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
375 #else
376 cc_src1_32 = cpu_cc_dst;
377 cc_src2_32 = cpu_cc_src;
378 #endif
380 carry_32 = tcg_temp_new_i32();
381 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
383 #if TARGET_LONG_BITS == 64
384 tcg_temp_free_i32(cc_src1_32);
385 tcg_temp_free_i32(cc_src2_32);
386 #endif
388 return carry_32;
391 static TCGv_i32 gen_sub32_carry32(void)
393 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
395 /* Carry is computed from a previous borrow: (src1 < src2) */
396 #if TARGET_LONG_BITS == 64
397 cc_src1_32 = tcg_temp_new_i32();
398 cc_src2_32 = tcg_temp_new_i32();
399 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
400 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
401 #else
402 cc_src1_32 = cpu_cc_src;
403 cc_src2_32 = cpu_cc_src2;
404 #endif
406 carry_32 = tcg_temp_new_i32();
407 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
409 #if TARGET_LONG_BITS == 64
410 tcg_temp_free_i32(cc_src1_32);
411 tcg_temp_free_i32(cc_src2_32);
412 #endif
414 return carry_32;
417 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
418 TCGv src2, int update_cc)
420 TCGv_i32 carry_32;
421 TCGv carry;
423 switch (dc->cc_op) {
424 case CC_OP_DIV:
425 case CC_OP_LOGIC:
426 /* Carry is known to be zero. Fall back to plain ADD. */
427 if (update_cc) {
428 gen_op_add_cc(dst, src1, src2);
429 } else {
430 tcg_gen_add_tl(dst, src1, src2);
432 return;
434 case CC_OP_ADD:
435 case CC_OP_TADD:
436 case CC_OP_TADDTV:
437 if (TARGET_LONG_BITS == 32) {
438 /* We can re-use the host's hardware carry generation by using
439 an ADD2 opcode. We discard the low part of the output.
440 Ideally we'd combine this operation with the add that
441 generated the carry in the first place. */
442 carry = tcg_temp_new();
443 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
444 tcg_temp_free(carry);
445 goto add_done;
447 carry_32 = gen_add32_carry32();
448 break;
450 case CC_OP_SUB:
451 case CC_OP_TSUB:
452 case CC_OP_TSUBTV:
453 carry_32 = gen_sub32_carry32();
454 break;
456 default:
457 /* We need external help to produce the carry. */
458 carry_32 = tcg_temp_new_i32();
459 gen_helper_compute_C_icc(carry_32, cpu_env);
460 break;
463 #if TARGET_LONG_BITS == 64
464 carry = tcg_temp_new();
465 tcg_gen_extu_i32_i64(carry, carry_32);
466 #else
467 carry = carry_32;
468 #endif
470 tcg_gen_add_tl(dst, src1, src2);
471 tcg_gen_add_tl(dst, dst, carry);
473 tcg_temp_free_i32(carry_32);
474 #if TARGET_LONG_BITS == 64
475 tcg_temp_free(carry);
476 #endif
478 add_done:
479 if (update_cc) {
480 tcg_gen_mov_tl(cpu_cc_src, src1);
481 tcg_gen_mov_tl(cpu_cc_src2, src2);
482 tcg_gen_mov_tl(cpu_cc_dst, dst);
483 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
484 dc->cc_op = CC_OP_ADDX;
488 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
490 tcg_gen_mov_tl(cpu_cc_src, src1);
491 tcg_gen_mov_tl(cpu_cc_src2, src2);
492 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
493 tcg_gen_mov_tl(dst, cpu_cc_dst);
496 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
497 TCGv src2, int update_cc)
499 TCGv_i32 carry_32;
500 TCGv carry;
502 switch (dc->cc_op) {
503 case CC_OP_DIV:
504 case CC_OP_LOGIC:
505 /* Carry is known to be zero. Fall back to plain SUB. */
506 if (update_cc) {
507 gen_op_sub_cc(dst, src1, src2);
508 } else {
509 tcg_gen_sub_tl(dst, src1, src2);
511 return;
513 case CC_OP_ADD:
514 case CC_OP_TADD:
515 case CC_OP_TADDTV:
516 carry_32 = gen_add32_carry32();
517 break;
519 case CC_OP_SUB:
520 case CC_OP_TSUB:
521 case CC_OP_TSUBTV:
522 if (TARGET_LONG_BITS == 32) {
523 /* We can re-use the host's hardware carry generation by using
524 a SUB2 opcode. We discard the low part of the output.
525 Ideally we'd combine this operation with the add that
526 generated the carry in the first place. */
527 carry = tcg_temp_new();
528 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
529 tcg_temp_free(carry);
530 goto sub_done;
532 carry_32 = gen_sub32_carry32();
533 break;
535 default:
536 /* We need external help to produce the carry. */
537 carry_32 = tcg_temp_new_i32();
538 gen_helper_compute_C_icc(carry_32, cpu_env);
539 break;
542 #if TARGET_LONG_BITS == 64
543 carry = tcg_temp_new();
544 tcg_gen_extu_i32_i64(carry, carry_32);
545 #else
546 carry = carry_32;
547 #endif
549 tcg_gen_sub_tl(dst, src1, src2);
550 tcg_gen_sub_tl(dst, dst, carry);
552 tcg_temp_free_i32(carry_32);
553 #if TARGET_LONG_BITS == 64
554 tcg_temp_free(carry);
555 #endif
557 sub_done:
558 if (update_cc) {
559 tcg_gen_mov_tl(cpu_cc_src, src1);
560 tcg_gen_mov_tl(cpu_cc_src2, src2);
561 tcg_gen_mov_tl(cpu_cc_dst, dst);
562 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
563 dc->cc_op = CC_OP_SUBX;
567 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
569 TCGv r_temp, zero, t0;
571 r_temp = tcg_temp_new();
572 t0 = tcg_temp_new();
574 /* old op:
575 if (!(env->y & 1))
576 T1 = 0;
578 zero = tcg_const_tl(0);
579 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
580 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
581 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
582 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
583 zero, cpu_cc_src2);
584 tcg_temp_free(zero);
586 // b2 = T0 & 1;
587 // env->y = (b2 << 31) | (env->y >> 1);
588 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
589 tcg_gen_shli_tl(r_temp, r_temp, 31);
590 tcg_gen_shri_tl(t0, cpu_y, 1);
591 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
592 tcg_gen_or_tl(t0, t0, r_temp);
593 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
595 // b1 = N ^ V;
596 gen_mov_reg_N(t0, cpu_psr);
597 gen_mov_reg_V(r_temp, cpu_psr);
598 tcg_gen_xor_tl(t0, t0, r_temp);
599 tcg_temp_free(r_temp);
601 // T0 = (b1 << 31) | (T0 >> 1);
602 // src1 = T0;
603 tcg_gen_shli_tl(t0, t0, 31);
604 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
605 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
606 tcg_temp_free(t0);
608 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
610 tcg_gen_mov_tl(dst, cpu_cc_dst);
613 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
615 #if TARGET_LONG_BITS == 32
616 if (sign_ext) {
617 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
618 } else {
619 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
621 #else
622 TCGv t0 = tcg_temp_new_i64();
623 TCGv t1 = tcg_temp_new_i64();
625 if (sign_ext) {
626 tcg_gen_ext32s_i64(t0, src1);
627 tcg_gen_ext32s_i64(t1, src2);
628 } else {
629 tcg_gen_ext32u_i64(t0, src1);
630 tcg_gen_ext32u_i64(t1, src2);
633 tcg_gen_mul_i64(dst, t0, t1);
634 tcg_temp_free(t0);
635 tcg_temp_free(t1);
637 tcg_gen_shri_i64(cpu_y, dst, 32);
638 #endif
641 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
643 /* zero-extend truncated operands before multiplication */
644 gen_op_multiply(dst, src1, src2, 0);
647 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
649 /* sign-extend truncated operands before multiplication */
650 gen_op_multiply(dst, src1, src2, 1);
653 // 1
654 static inline void gen_op_eval_ba(TCGv dst)
656 tcg_gen_movi_tl(dst, 1);
659 // Z
660 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
662 gen_mov_reg_Z(dst, src);
665 // Z | (N ^ V)
666 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
668 TCGv t0 = tcg_temp_new();
669 gen_mov_reg_N(t0, src);
670 gen_mov_reg_V(dst, src);
671 tcg_gen_xor_tl(dst, dst, t0);
672 gen_mov_reg_Z(t0, src);
673 tcg_gen_or_tl(dst, dst, t0);
674 tcg_temp_free(t0);
677 // N ^ V
678 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
680 TCGv t0 = tcg_temp_new();
681 gen_mov_reg_V(t0, src);
682 gen_mov_reg_N(dst, src);
683 tcg_gen_xor_tl(dst, dst, t0);
684 tcg_temp_free(t0);
687 // C | Z
688 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
690 TCGv t0 = tcg_temp_new();
691 gen_mov_reg_Z(t0, src);
692 gen_mov_reg_C(dst, src);
693 tcg_gen_or_tl(dst, dst, t0);
694 tcg_temp_free(t0);
697 // C
698 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
700 gen_mov_reg_C(dst, src);
703 // V
704 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
706 gen_mov_reg_V(dst, src);
709 // 0
710 static inline void gen_op_eval_bn(TCGv dst)
712 tcg_gen_movi_tl(dst, 0);
715 // N
716 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
718 gen_mov_reg_N(dst, src);
721 // !Z
722 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
724 gen_mov_reg_Z(dst, src);
725 tcg_gen_xori_tl(dst, dst, 0x1);
728 // !(Z | (N ^ V))
729 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
731 gen_op_eval_ble(dst, src);
732 tcg_gen_xori_tl(dst, dst, 0x1);
735 // !(N ^ V)
736 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
738 gen_op_eval_bl(dst, src);
739 tcg_gen_xori_tl(dst, dst, 0x1);
742 // !(C | Z)
743 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
745 gen_op_eval_bleu(dst, src);
746 tcg_gen_xori_tl(dst, dst, 0x1);
749 // !C
750 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
752 gen_mov_reg_C(dst, src);
753 tcg_gen_xori_tl(dst, dst, 0x1);
756 // !N
757 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
759 gen_mov_reg_N(dst, src);
760 tcg_gen_xori_tl(dst, dst, 0x1);
763 // !V
764 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
766 gen_mov_reg_V(dst, src);
767 tcg_gen_xori_tl(dst, dst, 0x1);
771 FPSR bit field FCC1 | FCC0:
775 3 unordered
777 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
778 unsigned int fcc_offset)
780 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
781 tcg_gen_andi_tl(reg, reg, 0x1);
784 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
785 unsigned int fcc_offset)
787 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
788 tcg_gen_andi_tl(reg, reg, 0x1);
791 // !0: FCC0 | FCC1
792 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
793 unsigned int fcc_offset)
795 TCGv t0 = tcg_temp_new();
796 gen_mov_reg_FCC0(dst, src, fcc_offset);
797 gen_mov_reg_FCC1(t0, src, fcc_offset);
798 tcg_gen_or_tl(dst, dst, t0);
799 tcg_temp_free(t0);
802 // 1 or 2: FCC0 ^ FCC1
803 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
804 unsigned int fcc_offset)
806 TCGv t0 = tcg_temp_new();
807 gen_mov_reg_FCC0(dst, src, fcc_offset);
808 gen_mov_reg_FCC1(t0, src, fcc_offset);
809 tcg_gen_xor_tl(dst, dst, t0);
810 tcg_temp_free(t0);
813 // 1 or 3: FCC0
814 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
815 unsigned int fcc_offset)
817 gen_mov_reg_FCC0(dst, src, fcc_offset);
820 // 1: FCC0 & !FCC1
821 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
822 unsigned int fcc_offset)
824 TCGv t0 = tcg_temp_new();
825 gen_mov_reg_FCC0(dst, src, fcc_offset);
826 gen_mov_reg_FCC1(t0, src, fcc_offset);
827 tcg_gen_andc_tl(dst, dst, t0);
828 tcg_temp_free(t0);
831 // 2 or 3: FCC1
832 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
833 unsigned int fcc_offset)
835 gen_mov_reg_FCC1(dst, src, fcc_offset);
838 // 2: !FCC0 & FCC1
839 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
840 unsigned int fcc_offset)
842 TCGv t0 = tcg_temp_new();
843 gen_mov_reg_FCC0(dst, src, fcc_offset);
844 gen_mov_reg_FCC1(t0, src, fcc_offset);
845 tcg_gen_andc_tl(dst, t0, dst);
846 tcg_temp_free(t0);
849 // 3: FCC0 & FCC1
850 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
851 unsigned int fcc_offset)
853 TCGv t0 = tcg_temp_new();
854 gen_mov_reg_FCC0(dst, src, fcc_offset);
855 gen_mov_reg_FCC1(t0, src, fcc_offset);
856 tcg_gen_and_tl(dst, dst, t0);
857 tcg_temp_free(t0);
860 // 0: !(FCC0 | FCC1)
861 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
862 unsigned int fcc_offset)
864 TCGv t0 = tcg_temp_new();
865 gen_mov_reg_FCC0(dst, src, fcc_offset);
866 gen_mov_reg_FCC1(t0, src, fcc_offset);
867 tcg_gen_or_tl(dst, dst, t0);
868 tcg_gen_xori_tl(dst, dst, 0x1);
869 tcg_temp_free(t0);
872 // 0 or 3: !(FCC0 ^ FCC1)
873 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
874 unsigned int fcc_offset)
876 TCGv t0 = tcg_temp_new();
877 gen_mov_reg_FCC0(dst, src, fcc_offset);
878 gen_mov_reg_FCC1(t0, src, fcc_offset);
879 tcg_gen_xor_tl(dst, dst, t0);
880 tcg_gen_xori_tl(dst, dst, 0x1);
881 tcg_temp_free(t0);
884 // 0 or 2: !FCC0
885 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
886 unsigned int fcc_offset)
888 gen_mov_reg_FCC0(dst, src, fcc_offset);
889 tcg_gen_xori_tl(dst, dst, 0x1);
892 // !1: !(FCC0 & !FCC1)
893 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
894 unsigned int fcc_offset)
896 TCGv t0 = tcg_temp_new();
897 gen_mov_reg_FCC0(dst, src, fcc_offset);
898 gen_mov_reg_FCC1(t0, src, fcc_offset);
899 tcg_gen_andc_tl(dst, dst, t0);
900 tcg_gen_xori_tl(dst, dst, 0x1);
901 tcg_temp_free(t0);
904 // 0 or 1: !FCC1
905 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
906 unsigned int fcc_offset)
908 gen_mov_reg_FCC1(dst, src, fcc_offset);
909 tcg_gen_xori_tl(dst, dst, 0x1);
912 // !2: !(!FCC0 & FCC1)
913 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
914 unsigned int fcc_offset)
916 TCGv t0 = tcg_temp_new();
917 gen_mov_reg_FCC0(dst, src, fcc_offset);
918 gen_mov_reg_FCC1(t0, src, fcc_offset);
919 tcg_gen_andc_tl(dst, t0, dst);
920 tcg_gen_xori_tl(dst, dst, 0x1);
921 tcg_temp_free(t0);
924 // !3: !(FCC0 & FCC1)
925 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
926 unsigned int fcc_offset)
928 TCGv t0 = tcg_temp_new();
929 gen_mov_reg_FCC0(dst, src, fcc_offset);
930 gen_mov_reg_FCC1(t0, src, fcc_offset);
931 tcg_gen_and_tl(dst, dst, t0);
932 tcg_gen_xori_tl(dst, dst, 0x1);
933 tcg_temp_free(t0);
936 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
937 target_ulong pc2, TCGv r_cond)
939 TCGLabel *l1 = gen_new_label();
941 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
943 gen_goto_tb(dc, 0, pc1, pc1 + 4);
945 gen_set_label(l1);
946 gen_goto_tb(dc, 1, pc2, pc2 + 4);
949 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
951 TCGLabel *l1 = gen_new_label();
952 target_ulong npc = dc->npc;
954 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
956 gen_goto_tb(dc, 0, npc, pc1);
958 gen_set_label(l1);
959 gen_goto_tb(dc, 1, npc + 4, npc + 8);
961 dc->is_br = 1;
964 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
966 target_ulong npc = dc->npc;
968 if (likely(npc != DYNAMIC_PC)) {
969 dc->pc = npc;
970 dc->jump_pc[0] = pc1;
971 dc->jump_pc[1] = npc + 4;
972 dc->npc = JUMP_PC;
973 } else {
974 TCGv t, z;
976 tcg_gen_mov_tl(cpu_pc, cpu_npc);
978 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
979 t = tcg_const_tl(pc1);
980 z = tcg_const_tl(0);
981 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
982 tcg_temp_free(t);
983 tcg_temp_free(z);
985 dc->pc = DYNAMIC_PC;
989 static inline void gen_generic_branch(DisasContext *dc)
991 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
992 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
993 TCGv zero = tcg_const_tl(0);
995 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
997 tcg_temp_free(npc0);
998 tcg_temp_free(npc1);
999 tcg_temp_free(zero);
1002 /* call this function before using the condition register as it may
1003 have been set for a jump */
1004 static inline void flush_cond(DisasContext *dc)
1006 if (dc->npc == JUMP_PC) {
1007 gen_generic_branch(dc);
1008 dc->npc = DYNAMIC_PC;
1012 static inline void save_npc(DisasContext *dc)
1014 if (dc->npc == JUMP_PC) {
1015 gen_generic_branch(dc);
1016 dc->npc = DYNAMIC_PC;
1017 } else if (dc->npc != DYNAMIC_PC) {
1018 tcg_gen_movi_tl(cpu_npc, dc->npc);
1022 static inline void update_psr(DisasContext *dc)
1024 if (dc->cc_op != CC_OP_FLAGS) {
1025 dc->cc_op = CC_OP_FLAGS;
1026 gen_helper_compute_psr(cpu_env);
1030 static inline void save_state(DisasContext *dc)
1032 tcg_gen_movi_tl(cpu_pc, dc->pc);
1033 save_npc(dc);
1036 static inline void gen_mov_pc_npc(DisasContext *dc)
1038 if (dc->npc == JUMP_PC) {
1039 gen_generic_branch(dc);
1040 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1041 dc->pc = DYNAMIC_PC;
1042 } else if (dc->npc == DYNAMIC_PC) {
1043 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1044 dc->pc = DYNAMIC_PC;
1045 } else {
1046 dc->pc = dc->npc;
1050 static inline void gen_op_next_insn(void)
1052 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1053 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1056 static void free_compare(DisasCompare *cmp)
1058 if (!cmp->g1) {
1059 tcg_temp_free(cmp->c1);
1061 if (!cmp->g2) {
1062 tcg_temp_free(cmp->c2);
1066 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1067 DisasContext *dc)
1069 static int subcc_cond[16] = {
1070 TCG_COND_NEVER,
1071 TCG_COND_EQ,
1072 TCG_COND_LE,
1073 TCG_COND_LT,
1074 TCG_COND_LEU,
1075 TCG_COND_LTU,
1076 -1, /* neg */
1077 -1, /* overflow */
1078 TCG_COND_ALWAYS,
1079 TCG_COND_NE,
1080 TCG_COND_GT,
1081 TCG_COND_GE,
1082 TCG_COND_GTU,
1083 TCG_COND_GEU,
1084 -1, /* pos */
1085 -1, /* no overflow */
1088 static int logic_cond[16] = {
1089 TCG_COND_NEVER,
1090 TCG_COND_EQ, /* eq: Z */
1091 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1092 TCG_COND_LT, /* lt: N ^ V -> N */
1093 TCG_COND_EQ, /* leu: C | Z -> Z */
1094 TCG_COND_NEVER, /* ltu: C -> 0 */
1095 TCG_COND_LT, /* neg: N */
1096 TCG_COND_NEVER, /* vs: V -> 0 */
1097 TCG_COND_ALWAYS,
1098 TCG_COND_NE, /* ne: !Z */
1099 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1100 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1101 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1102 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1103 TCG_COND_GE, /* pos: !N */
1104 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1107 TCGv_i32 r_src;
1108 TCGv r_dst;
1110 #ifdef TARGET_SPARC64
1111 if (xcc) {
1112 r_src = cpu_xcc;
1113 } else {
1114 r_src = cpu_psr;
1116 #else
1117 r_src = cpu_psr;
1118 #endif
1120 switch (dc->cc_op) {
1121 case CC_OP_LOGIC:
1122 cmp->cond = logic_cond[cond];
1123 do_compare_dst_0:
1124 cmp->is_bool = false;
1125 cmp->g2 = false;
1126 cmp->c2 = tcg_const_tl(0);
1127 #ifdef TARGET_SPARC64
1128 if (!xcc) {
1129 cmp->g1 = false;
1130 cmp->c1 = tcg_temp_new();
1131 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1132 break;
1134 #endif
1135 cmp->g1 = true;
1136 cmp->c1 = cpu_cc_dst;
1137 break;
1139 case CC_OP_SUB:
1140 switch (cond) {
1141 case 6: /* neg */
1142 case 14: /* pos */
1143 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1144 goto do_compare_dst_0;
1146 case 7: /* overflow */
1147 case 15: /* !overflow */
1148 goto do_dynamic;
1150 default:
1151 cmp->cond = subcc_cond[cond];
1152 cmp->is_bool = false;
1153 #ifdef TARGET_SPARC64
1154 if (!xcc) {
1155 /* Note that sign-extension works for unsigned compares as
1156 long as both operands are sign-extended. */
1157 cmp->g1 = cmp->g2 = false;
1158 cmp->c1 = tcg_temp_new();
1159 cmp->c2 = tcg_temp_new();
1160 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1161 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1162 break;
1164 #endif
1165 cmp->g1 = cmp->g2 = true;
1166 cmp->c1 = cpu_cc_src;
1167 cmp->c2 = cpu_cc_src2;
1168 break;
1170 break;
1172 default:
1173 do_dynamic:
1174 gen_helper_compute_psr(cpu_env);
1175 dc->cc_op = CC_OP_FLAGS;
1176 /* FALLTHRU */
1178 case CC_OP_FLAGS:
1179 /* We're going to generate a boolean result. */
1180 cmp->cond = TCG_COND_NE;
1181 cmp->is_bool = true;
1182 cmp->g1 = cmp->g2 = false;
1183 cmp->c1 = r_dst = tcg_temp_new();
1184 cmp->c2 = tcg_const_tl(0);
1186 switch (cond) {
1187 case 0x0:
1188 gen_op_eval_bn(r_dst);
1189 break;
1190 case 0x1:
1191 gen_op_eval_be(r_dst, r_src);
1192 break;
1193 case 0x2:
1194 gen_op_eval_ble(r_dst, r_src);
1195 break;
1196 case 0x3:
1197 gen_op_eval_bl(r_dst, r_src);
1198 break;
1199 case 0x4:
1200 gen_op_eval_bleu(r_dst, r_src);
1201 break;
1202 case 0x5:
1203 gen_op_eval_bcs(r_dst, r_src);
1204 break;
1205 case 0x6:
1206 gen_op_eval_bneg(r_dst, r_src);
1207 break;
1208 case 0x7:
1209 gen_op_eval_bvs(r_dst, r_src);
1210 break;
1211 case 0x8:
1212 gen_op_eval_ba(r_dst);
1213 break;
1214 case 0x9:
1215 gen_op_eval_bne(r_dst, r_src);
1216 break;
1217 case 0xa:
1218 gen_op_eval_bg(r_dst, r_src);
1219 break;
1220 case 0xb:
1221 gen_op_eval_bge(r_dst, r_src);
1222 break;
1223 case 0xc:
1224 gen_op_eval_bgu(r_dst, r_src);
1225 break;
1226 case 0xd:
1227 gen_op_eval_bcc(r_dst, r_src);
1228 break;
1229 case 0xe:
1230 gen_op_eval_bpos(r_dst, r_src);
1231 break;
1232 case 0xf:
1233 gen_op_eval_bvc(r_dst, r_src);
1234 break;
1236 break;
1240 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1242 unsigned int offset;
1243 TCGv r_dst;
1245 /* For now we still generate a straight boolean result. */
1246 cmp->cond = TCG_COND_NE;
1247 cmp->is_bool = true;
1248 cmp->g1 = cmp->g2 = false;
1249 cmp->c1 = r_dst = tcg_temp_new();
1250 cmp->c2 = tcg_const_tl(0);
1252 switch (cc) {
1253 default:
1254 case 0x0:
1255 offset = 0;
1256 break;
1257 case 0x1:
1258 offset = 32 - 10;
1259 break;
1260 case 0x2:
1261 offset = 34 - 10;
1262 break;
1263 case 0x3:
1264 offset = 36 - 10;
1265 break;
1268 switch (cond) {
1269 case 0x0:
1270 gen_op_eval_bn(r_dst);
1271 break;
1272 case 0x1:
1273 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1274 break;
1275 case 0x2:
1276 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1277 break;
1278 case 0x3:
1279 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1280 break;
1281 case 0x4:
1282 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1283 break;
1284 case 0x5:
1285 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1286 break;
1287 case 0x6:
1288 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1289 break;
1290 case 0x7:
1291 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1292 break;
1293 case 0x8:
1294 gen_op_eval_ba(r_dst);
1295 break;
1296 case 0x9:
1297 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1298 break;
1299 case 0xa:
1300 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1301 break;
1302 case 0xb:
1303 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1304 break;
1305 case 0xc:
1306 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1307 break;
1308 case 0xd:
1309 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1310 break;
1311 case 0xe:
1312 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1313 break;
1314 case 0xf:
1315 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1316 break;
1320 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1321 DisasContext *dc)
1323 DisasCompare cmp;
1324 gen_compare(&cmp, cc, cond, dc);
1326 /* The interface is to return a boolean in r_dst. */
1327 if (cmp.is_bool) {
1328 tcg_gen_mov_tl(r_dst, cmp.c1);
1329 } else {
1330 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1333 free_compare(&cmp);
1336 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1338 DisasCompare cmp;
1339 gen_fcompare(&cmp, cc, cond);
1341 /* The interface is to return a boolean in r_dst. */
1342 if (cmp.is_bool) {
1343 tcg_gen_mov_tl(r_dst, cmp.c1);
1344 } else {
1345 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1348 free_compare(&cmp);
1351 #ifdef TARGET_SPARC64
1352 // Inverted logic
1353 static const int gen_tcg_cond_reg[8] = {
1355 TCG_COND_NE,
1356 TCG_COND_GT,
1357 TCG_COND_GE,
1359 TCG_COND_EQ,
1360 TCG_COND_LE,
1361 TCG_COND_LT,
1364 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1366 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1367 cmp->is_bool = false;
1368 cmp->g1 = true;
1369 cmp->g2 = false;
1370 cmp->c1 = r_src;
1371 cmp->c2 = tcg_const_tl(0);
1374 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1376 DisasCompare cmp;
1377 gen_compare_reg(&cmp, cond, r_src);
1379 /* The interface is to return a boolean in r_dst. */
1380 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1382 free_compare(&cmp);
1384 #endif
1386 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1388 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1389 target_ulong target = dc->pc + offset;
1391 #ifdef TARGET_SPARC64
1392 if (unlikely(AM_CHECK(dc))) {
1393 target &= 0xffffffffULL;
1395 #endif
1396 if (cond == 0x0) {
1397 /* unconditional not taken */
1398 if (a) {
1399 dc->pc = dc->npc + 4;
1400 dc->npc = dc->pc + 4;
1401 } else {
1402 dc->pc = dc->npc;
1403 dc->npc = dc->pc + 4;
1405 } else if (cond == 0x8) {
1406 /* unconditional taken */
1407 if (a) {
1408 dc->pc = target;
1409 dc->npc = dc->pc + 4;
1410 } else {
1411 dc->pc = dc->npc;
1412 dc->npc = target;
1413 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1415 } else {
1416 flush_cond(dc);
1417 gen_cond(cpu_cond, cc, cond, dc);
1418 if (a) {
1419 gen_branch_a(dc, target);
1420 } else {
1421 gen_branch_n(dc, target);
1426 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1428 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1429 target_ulong target = dc->pc + offset;
1431 #ifdef TARGET_SPARC64
1432 if (unlikely(AM_CHECK(dc))) {
1433 target &= 0xffffffffULL;
1435 #endif
1436 if (cond == 0x0) {
1437 /* unconditional not taken */
1438 if (a) {
1439 dc->pc = dc->npc + 4;
1440 dc->npc = dc->pc + 4;
1441 } else {
1442 dc->pc = dc->npc;
1443 dc->npc = dc->pc + 4;
1445 } else if (cond == 0x8) {
1446 /* unconditional taken */
1447 if (a) {
1448 dc->pc = target;
1449 dc->npc = dc->pc + 4;
1450 } else {
1451 dc->pc = dc->npc;
1452 dc->npc = target;
1453 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1455 } else {
1456 flush_cond(dc);
1457 gen_fcond(cpu_cond, cc, cond);
1458 if (a) {
1459 gen_branch_a(dc, target);
1460 } else {
1461 gen_branch_n(dc, target);
1466 #ifdef TARGET_SPARC64
1467 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1468 TCGv r_reg)
1470 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1471 target_ulong target = dc->pc + offset;
1473 if (unlikely(AM_CHECK(dc))) {
1474 target &= 0xffffffffULL;
1476 flush_cond(dc);
1477 gen_cond_reg(cpu_cond, cond, r_reg);
1478 if (a) {
1479 gen_branch_a(dc, target);
1480 } else {
1481 gen_branch_n(dc, target);
1485 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1487 switch (fccno) {
1488 case 0:
1489 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1490 break;
1491 case 1:
1492 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1493 break;
1494 case 2:
1495 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1496 break;
1497 case 3:
1498 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1499 break;
1503 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1505 switch (fccno) {
1506 case 0:
1507 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1508 break;
1509 case 1:
1510 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1511 break;
1512 case 2:
1513 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1514 break;
1515 case 3:
1516 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1517 break;
1521 static inline void gen_op_fcmpq(int fccno)
1523 switch (fccno) {
1524 case 0:
1525 gen_helper_fcmpq(cpu_env);
1526 break;
1527 case 1:
1528 gen_helper_fcmpq_fcc1(cpu_env);
1529 break;
1530 case 2:
1531 gen_helper_fcmpq_fcc2(cpu_env);
1532 break;
1533 case 3:
1534 gen_helper_fcmpq_fcc3(cpu_env);
1535 break;
1539 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1541 switch (fccno) {
1542 case 0:
1543 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1544 break;
1545 case 1:
1546 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1547 break;
1548 case 2:
1549 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1550 break;
1551 case 3:
1552 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1553 break;
1557 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1559 switch (fccno) {
1560 case 0:
1561 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1562 break;
1563 case 1:
1564 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1565 break;
1566 case 2:
1567 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1568 break;
1569 case 3:
1570 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1571 break;
1575 static inline void gen_op_fcmpeq(int fccno)
1577 switch (fccno) {
1578 case 0:
1579 gen_helper_fcmpeq(cpu_env);
1580 break;
1581 case 1:
1582 gen_helper_fcmpeq_fcc1(cpu_env);
1583 break;
1584 case 2:
1585 gen_helper_fcmpeq_fcc2(cpu_env);
1586 break;
1587 case 3:
1588 gen_helper_fcmpeq_fcc3(cpu_env);
1589 break;
1593 #else
1595 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1597 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1600 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1602 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1605 static inline void gen_op_fcmpq(int fccno)
1607 gen_helper_fcmpq(cpu_env);
1610 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1612 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1615 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1617 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1620 static inline void gen_op_fcmpeq(int fccno)
1622 gen_helper_fcmpeq(cpu_env);
1624 #endif
1626 static inline void gen_op_fpexception_im(int fsr_flags)
1628 TCGv_i32 r_const;
1630 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1631 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1632 r_const = tcg_const_i32(TT_FP_EXCP);
1633 gen_helper_raise_exception(cpu_env, r_const);
1634 tcg_temp_free_i32(r_const);
1637 static int gen_trap_ifnofpu(DisasContext *dc)
1639 #if !defined(CONFIG_USER_ONLY)
1640 if (!dc->fpu_enabled) {
1641 TCGv_i32 r_const;
1643 save_state(dc);
1644 r_const = tcg_const_i32(TT_NFPU_INSN);
1645 gen_helper_raise_exception(cpu_env, r_const);
1646 tcg_temp_free_i32(r_const);
1647 dc->is_br = 1;
1648 return 1;
1650 #endif
1651 return 0;
1654 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1656 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1659 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1660 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1662 TCGv_i32 dst, src;
1664 src = gen_load_fpr_F(dc, rs);
1665 dst = gen_dest_fpr_F(dc);
1667 gen(dst, cpu_env, src);
1669 gen_store_fpr_F(dc, rd, dst);
1672 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1673 void (*gen)(TCGv_i32, TCGv_i32))
1675 TCGv_i32 dst, src;
1677 src = gen_load_fpr_F(dc, rs);
1678 dst = gen_dest_fpr_F(dc);
1680 gen(dst, src);
1682 gen_store_fpr_F(dc, rd, dst);
1685 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1686 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1688 TCGv_i32 dst, src1, src2;
1690 src1 = gen_load_fpr_F(dc, rs1);
1691 src2 = gen_load_fpr_F(dc, rs2);
1692 dst = gen_dest_fpr_F(dc);
1694 gen(dst, cpu_env, src1, src2);
1696 gen_store_fpr_F(dc, rd, dst);
1699 #ifdef TARGET_SPARC64
1700 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1701 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1703 TCGv_i32 dst, src1, src2;
1705 src1 = gen_load_fpr_F(dc, rs1);
1706 src2 = gen_load_fpr_F(dc, rs2);
1707 dst = gen_dest_fpr_F(dc);
1709 gen(dst, src1, src2);
1711 gen_store_fpr_F(dc, rd, dst);
1713 #endif
1715 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1716 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1718 TCGv_i64 dst, src;
1720 src = gen_load_fpr_D(dc, rs);
1721 dst = gen_dest_fpr_D(dc, rd);
1723 gen(dst, cpu_env, src);
1725 gen_store_fpr_D(dc, rd, dst);
1728 #ifdef TARGET_SPARC64
1729 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1730 void (*gen)(TCGv_i64, TCGv_i64))
1732 TCGv_i64 dst, src;
1734 src = gen_load_fpr_D(dc, rs);
1735 dst = gen_dest_fpr_D(dc, rd);
1737 gen(dst, src);
1739 gen_store_fpr_D(dc, rd, dst);
1741 #endif
1743 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1744 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1746 TCGv_i64 dst, src1, src2;
1748 src1 = gen_load_fpr_D(dc, rs1);
1749 src2 = gen_load_fpr_D(dc, rs2);
1750 dst = gen_dest_fpr_D(dc, rd);
1752 gen(dst, cpu_env, src1, src2);
1754 gen_store_fpr_D(dc, rd, dst);
1757 #ifdef TARGET_SPARC64
1758 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1759 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1761 TCGv_i64 dst, src1, src2;
1763 src1 = gen_load_fpr_D(dc, rs1);
1764 src2 = gen_load_fpr_D(dc, rs2);
1765 dst = gen_dest_fpr_D(dc, rd);
1767 gen(dst, src1, src2);
1769 gen_store_fpr_D(dc, rd, dst);
1772 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1773 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1775 TCGv_i64 dst, src1, src2;
1777 src1 = gen_load_fpr_D(dc, rs1);
1778 src2 = gen_load_fpr_D(dc, rs2);
1779 dst = gen_dest_fpr_D(dc, rd);
1781 gen(dst, cpu_gsr, src1, src2);
1783 gen_store_fpr_D(dc, rd, dst);
1786 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1787 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1789 TCGv_i64 dst, src0, src1, src2;
1791 src1 = gen_load_fpr_D(dc, rs1);
1792 src2 = gen_load_fpr_D(dc, rs2);
1793 src0 = gen_load_fpr_D(dc, rd);
1794 dst = gen_dest_fpr_D(dc, rd);
1796 gen(dst, src0, src1, src2);
1798 gen_store_fpr_D(dc, rd, dst);
1800 #endif
1802 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1803 void (*gen)(TCGv_ptr))
1805 gen_op_load_fpr_QT1(QFPREG(rs));
1807 gen(cpu_env);
1809 gen_op_store_QT0_fpr(QFPREG(rd));
1810 gen_update_fprs_dirty(QFPREG(rd));
1813 #ifdef TARGET_SPARC64
1814 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1815 void (*gen)(TCGv_ptr))
1817 gen_op_load_fpr_QT1(QFPREG(rs));
1819 gen(cpu_env);
1821 gen_op_store_QT0_fpr(QFPREG(rd));
1822 gen_update_fprs_dirty(QFPREG(rd));
1824 #endif
1826 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1827 void (*gen)(TCGv_ptr))
1829 gen_op_load_fpr_QT0(QFPREG(rs1));
1830 gen_op_load_fpr_QT1(QFPREG(rs2));
1832 gen(cpu_env);
1834 gen_op_store_QT0_fpr(QFPREG(rd));
1835 gen_update_fprs_dirty(QFPREG(rd));
1838 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1839 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1841 TCGv_i64 dst;
1842 TCGv_i32 src1, src2;
1844 src1 = gen_load_fpr_F(dc, rs1);
1845 src2 = gen_load_fpr_F(dc, rs2);
1846 dst = gen_dest_fpr_D(dc, rd);
1848 gen(dst, cpu_env, src1, src2);
1850 gen_store_fpr_D(dc, rd, dst);
1853 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1854 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1856 TCGv_i64 src1, src2;
1858 src1 = gen_load_fpr_D(dc, rs1);
1859 src2 = gen_load_fpr_D(dc, rs2);
1861 gen(cpu_env, src1, src2);
1863 gen_op_store_QT0_fpr(QFPREG(rd));
1864 gen_update_fprs_dirty(QFPREG(rd));
1867 #ifdef TARGET_SPARC64
1868 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1869 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1871 TCGv_i64 dst;
1872 TCGv_i32 src;
1874 src = gen_load_fpr_F(dc, rs);
1875 dst = gen_dest_fpr_D(dc, rd);
1877 gen(dst, cpu_env, src);
1879 gen_store_fpr_D(dc, rd, dst);
1881 #endif
1883 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1884 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1886 TCGv_i64 dst;
1887 TCGv_i32 src;
1889 src = gen_load_fpr_F(dc, rs);
1890 dst = gen_dest_fpr_D(dc, rd);
1892 gen(dst, cpu_env, src);
1894 gen_store_fpr_D(dc, rd, dst);
1897 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1898 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1900 TCGv_i32 dst;
1901 TCGv_i64 src;
1903 src = gen_load_fpr_D(dc, rs);
1904 dst = gen_dest_fpr_F(dc);
1906 gen(dst, cpu_env, src);
1908 gen_store_fpr_F(dc, rd, dst);
1911 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1912 void (*gen)(TCGv_i32, TCGv_ptr))
1914 TCGv_i32 dst;
1916 gen_op_load_fpr_QT1(QFPREG(rs));
1917 dst = gen_dest_fpr_F(dc);
1919 gen(dst, cpu_env);
1921 gen_store_fpr_F(dc, rd, dst);
1924 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1925 void (*gen)(TCGv_i64, TCGv_ptr))
1927 TCGv_i64 dst;
1929 gen_op_load_fpr_QT1(QFPREG(rs));
1930 dst = gen_dest_fpr_D(dc, rd);
1932 gen(dst, cpu_env);
1934 gen_store_fpr_D(dc, rd, dst);
1937 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1938 void (*gen)(TCGv_ptr, TCGv_i32))
1940 TCGv_i32 src;
1942 src = gen_load_fpr_F(dc, rs);
1944 gen(cpu_env, src);
1946 gen_op_store_QT0_fpr(QFPREG(rd));
1947 gen_update_fprs_dirty(QFPREG(rd));
1950 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1951 void (*gen)(TCGv_ptr, TCGv_i64))
1953 TCGv_i64 src;
1955 src = gen_load_fpr_D(dc, rs);
1957 gen(cpu_env, src);
1959 gen_op_store_QT0_fpr(QFPREG(rd));
1960 gen_update_fprs_dirty(QFPREG(rd));
1963 /* asi moves */
1964 #ifdef TARGET_SPARC64
1965 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
1967 int asi;
1968 TCGv_i32 r_asi;
1970 if (IS_IMM) {
1971 r_asi = tcg_temp_new_i32();
1972 tcg_gen_mov_i32(r_asi, cpu_asi);
1973 } else {
1974 asi = GET_FIELD(insn, 19, 26);
1975 r_asi = tcg_const_i32(asi);
1977 return r_asi;
1980 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
1981 int sign)
1983 TCGv_i32 r_asi, r_size, r_sign;
1985 r_asi = gen_get_asi(insn, addr);
1986 r_size = tcg_const_i32(size);
1987 r_sign = tcg_const_i32(sign);
1988 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
1989 tcg_temp_free_i32(r_sign);
1990 tcg_temp_free_i32(r_size);
1991 tcg_temp_free_i32(r_asi);
1994 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
1996 TCGv_i32 r_asi, r_size;
1998 r_asi = gen_get_asi(insn, addr);
1999 r_size = tcg_const_i32(size);
2000 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2001 tcg_temp_free_i32(r_size);
2002 tcg_temp_free_i32(r_asi);
2005 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2007 TCGv_i32 r_asi, r_size, r_rd;
2009 r_asi = gen_get_asi(insn, addr);
2010 r_size = tcg_const_i32(size);
2011 r_rd = tcg_const_i32(rd);
2012 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2013 tcg_temp_free_i32(r_rd);
2014 tcg_temp_free_i32(r_size);
2015 tcg_temp_free_i32(r_asi);
2018 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2020 TCGv_i32 r_asi, r_size, r_rd;
2022 r_asi = gen_get_asi(insn, addr);
2023 r_size = tcg_const_i32(size);
2024 r_rd = tcg_const_i32(rd);
2025 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2026 tcg_temp_free_i32(r_rd);
2027 tcg_temp_free_i32(r_size);
2028 tcg_temp_free_i32(r_asi);
2031 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2033 TCGv_i32 r_asi, r_size, r_sign;
2034 TCGv_i64 t64 = tcg_temp_new_i64();
2036 r_asi = gen_get_asi(insn, addr);
2037 r_size = tcg_const_i32(4);
2038 r_sign = tcg_const_i32(0);
2039 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2040 tcg_temp_free_i32(r_sign);
2041 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2042 tcg_temp_free_i32(r_size);
2043 tcg_temp_free_i32(r_asi);
2044 tcg_gen_trunc_i64_tl(dst, t64);
2045 tcg_temp_free_i64(t64);
2048 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2049 int insn, int rd)
2051 TCGv_i32 r_asi, r_rd;
2053 r_asi = gen_get_asi(insn, addr);
2054 r_rd = tcg_const_i32(rd);
2055 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2056 tcg_temp_free_i32(r_rd);
2057 tcg_temp_free_i32(r_asi);
2060 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2061 int insn, int rd)
2063 TCGv_i32 r_asi, r_size;
2064 TCGv lo = gen_load_gpr(dc, rd + 1);
2065 TCGv_i64 t64 = tcg_temp_new_i64();
2067 tcg_gen_concat_tl_i64(t64, lo, hi);
2068 r_asi = gen_get_asi(insn, addr);
2069 r_size = tcg_const_i32(8);
2070 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2071 tcg_temp_free_i32(r_size);
2072 tcg_temp_free_i32(r_asi);
2073 tcg_temp_free_i64(t64);
2076 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2077 TCGv val2, int insn, int rd)
2079 TCGv val1 = gen_load_gpr(dc, rd);
2080 TCGv dst = gen_dest_gpr(dc, rd);
2081 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2083 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2084 tcg_temp_free_i32(r_asi);
2085 gen_store_gpr(dc, rd, dst);
2088 #elif !defined(CONFIG_USER_ONLY)
2090 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2091 int sign)
2093 TCGv_i32 r_asi, r_size, r_sign;
2094 TCGv_i64 t64 = tcg_temp_new_i64();
2096 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2097 r_size = tcg_const_i32(size);
2098 r_sign = tcg_const_i32(sign);
2099 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2100 tcg_temp_free_i32(r_sign);
2101 tcg_temp_free_i32(r_size);
2102 tcg_temp_free_i32(r_asi);
2103 tcg_gen_trunc_i64_tl(dst, t64);
2104 tcg_temp_free_i64(t64);
2107 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2109 TCGv_i32 r_asi, r_size;
2110 TCGv_i64 t64 = tcg_temp_new_i64();
2112 tcg_gen_extu_tl_i64(t64, src);
2113 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2114 r_size = tcg_const_i32(size);
2115 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2116 tcg_temp_free_i32(r_size);
2117 tcg_temp_free_i32(r_asi);
2118 tcg_temp_free_i64(t64);
2121 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2123 TCGv_i32 r_asi, r_size, r_sign;
2124 TCGv_i64 r_val, t64;
2126 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2127 r_size = tcg_const_i32(4);
2128 r_sign = tcg_const_i32(0);
2129 t64 = tcg_temp_new_i64();
2130 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2131 tcg_temp_free(r_sign);
2132 r_val = tcg_temp_new_i64();
2133 tcg_gen_extu_tl_i64(r_val, src);
2134 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2135 tcg_temp_free_i64(r_val);
2136 tcg_temp_free_i32(r_size);
2137 tcg_temp_free_i32(r_asi);
2138 tcg_gen_trunc_i64_tl(dst, t64);
2139 tcg_temp_free_i64(t64);
2142 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2143 int insn, int rd)
2145 TCGv_i32 r_asi, r_size, r_sign;
2146 TCGv t;
2147 TCGv_i64 t64;
2149 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2150 r_size = tcg_const_i32(8);
2151 r_sign = tcg_const_i32(0);
2152 t64 = tcg_temp_new_i64();
2153 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2154 tcg_temp_free_i32(r_sign);
2155 tcg_temp_free_i32(r_size);
2156 tcg_temp_free_i32(r_asi);
2158 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2159 whereby "rd + 1" elicits "error: array subscript is above array".
2160 Since we have already asserted that rd is even, the semantics
2161 are unchanged. */
2162 t = gen_dest_gpr(dc, rd | 1);
2163 tcg_gen_trunc_i64_tl(t, t64);
2164 gen_store_gpr(dc, rd | 1, t);
2166 tcg_gen_shri_i64(t64, t64, 32);
2167 tcg_gen_trunc_i64_tl(hi, t64);
2168 tcg_temp_free_i64(t64);
2169 gen_store_gpr(dc, rd, hi);
2172 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2173 int insn, int rd)
2175 TCGv_i32 r_asi, r_size;
2176 TCGv lo = gen_load_gpr(dc, rd + 1);
2177 TCGv_i64 t64 = tcg_temp_new_i64();
2179 tcg_gen_concat_tl_i64(t64, lo, hi);
2180 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2181 r_size = tcg_const_i32(8);
2182 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2183 tcg_temp_free_i32(r_size);
2184 tcg_temp_free_i32(r_asi);
2185 tcg_temp_free_i64(t64);
2187 #endif
2189 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2190 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2191 TCGv val2, int insn, int rd)
2193 TCGv val1 = gen_load_gpr(dc, rd);
2194 TCGv dst = gen_dest_gpr(dc, rd);
2195 #ifdef TARGET_SPARC64
2196 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2197 #else
2198 TCGv_i32 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2199 #endif
2201 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2202 tcg_temp_free_i32(r_asi);
2203 gen_store_gpr(dc, rd, dst);
2206 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2208 TCGv_i64 r_val;
2209 TCGv_i32 r_asi, r_size;
2211 gen_ld_asi(dst, addr, insn, 1, 0);
2213 r_val = tcg_const_i64(0xffULL);
2214 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2215 r_size = tcg_const_i32(1);
2216 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2217 tcg_temp_free_i32(r_size);
2218 tcg_temp_free_i32(r_asi);
2219 tcg_temp_free_i64(r_val);
2221 #endif
2223 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2225 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2226 return gen_load_gpr(dc, rs1);
2229 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2231 if (IS_IMM) { /* immediate */
2232 target_long simm = GET_FIELDs(insn, 19, 31);
2233 TCGv t = get_temp_tl(dc);
2234 tcg_gen_movi_tl(t, simm);
2235 return t;
2236 } else { /* register */
2237 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2238 return gen_load_gpr(dc, rs2);
2242 #ifdef TARGET_SPARC64
2243 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2245 TCGv_i32 c32, zero, dst, s1, s2;
2247 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2248 or fold the comparison down to 32 bits and use movcond_i32. Choose
2249 the later. */
2250 c32 = tcg_temp_new_i32();
2251 if (cmp->is_bool) {
2252 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2253 } else {
2254 TCGv_i64 c64 = tcg_temp_new_i64();
2255 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2256 tcg_gen_extrl_i64_i32(c32, c64);
2257 tcg_temp_free_i64(c64);
2260 s1 = gen_load_fpr_F(dc, rs);
2261 s2 = gen_load_fpr_F(dc, rd);
2262 dst = gen_dest_fpr_F(dc);
2263 zero = tcg_const_i32(0);
2265 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2267 tcg_temp_free_i32(c32);
2268 tcg_temp_free_i32(zero);
2269 gen_store_fpr_F(dc, rd, dst);
2272 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2274 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2275 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2276 gen_load_fpr_D(dc, rs),
2277 gen_load_fpr_D(dc, rd));
2278 gen_store_fpr_D(dc, rd, dst);
2281 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2283 int qd = QFPREG(rd);
2284 int qs = QFPREG(rs);
2286 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2287 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2288 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2289 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2291 gen_update_fprs_dirty(qd);
2294 #ifndef CONFIG_USER_ONLY
2295 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2297 TCGv_i32 r_tl = tcg_temp_new_i32();
2299 /* load env->tl into r_tl */
2300 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2302 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2303 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2305 /* calculate offset to current trap state from env->ts, reuse r_tl */
2306 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2307 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2309 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2311 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2312 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2313 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2314 tcg_temp_free_ptr(r_tl_tmp);
2317 tcg_temp_free_i32(r_tl);
2319 #endif
2321 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2322 int width, bool cc, bool left)
2324 TCGv lo1, lo2, t1, t2;
2325 uint64_t amask, tabl, tabr;
2326 int shift, imask, omask;
2328 if (cc) {
2329 tcg_gen_mov_tl(cpu_cc_src, s1);
2330 tcg_gen_mov_tl(cpu_cc_src2, s2);
2331 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2332 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2333 dc->cc_op = CC_OP_SUB;
2336 /* Theory of operation: there are two tables, left and right (not to
2337 be confused with the left and right versions of the opcode). These
2338 are indexed by the low 3 bits of the inputs. To make things "easy",
2339 these tables are loaded into two constants, TABL and TABR below.
2340 The operation index = (input & imask) << shift calculates the index
2341 into the constant, while val = (table >> index) & omask calculates
2342 the value we're looking for. */
2343 switch (width) {
2344 case 8:
2345 imask = 0x7;
2346 shift = 3;
2347 omask = 0xff;
2348 if (left) {
2349 tabl = 0x80c0e0f0f8fcfeffULL;
2350 tabr = 0xff7f3f1f0f070301ULL;
2351 } else {
2352 tabl = 0x0103070f1f3f7fffULL;
2353 tabr = 0xfffefcf8f0e0c080ULL;
2355 break;
2356 case 16:
2357 imask = 0x6;
2358 shift = 1;
2359 omask = 0xf;
2360 if (left) {
2361 tabl = 0x8cef;
2362 tabr = 0xf731;
2363 } else {
2364 tabl = 0x137f;
2365 tabr = 0xfec8;
2367 break;
2368 case 32:
2369 imask = 0x4;
2370 shift = 0;
2371 omask = 0x3;
2372 if (left) {
2373 tabl = (2 << 2) | 3;
2374 tabr = (3 << 2) | 1;
2375 } else {
2376 tabl = (1 << 2) | 3;
2377 tabr = (3 << 2) | 2;
2379 break;
2380 default:
2381 abort();
2384 lo1 = tcg_temp_new();
2385 lo2 = tcg_temp_new();
2386 tcg_gen_andi_tl(lo1, s1, imask);
2387 tcg_gen_andi_tl(lo2, s2, imask);
2388 tcg_gen_shli_tl(lo1, lo1, shift);
2389 tcg_gen_shli_tl(lo2, lo2, shift);
2391 t1 = tcg_const_tl(tabl);
2392 t2 = tcg_const_tl(tabr);
2393 tcg_gen_shr_tl(lo1, t1, lo1);
2394 tcg_gen_shr_tl(lo2, t2, lo2);
2395 tcg_gen_andi_tl(dst, lo1, omask);
2396 tcg_gen_andi_tl(lo2, lo2, omask);
2398 amask = -8;
2399 if (AM_CHECK(dc)) {
2400 amask &= 0xffffffffULL;
2402 tcg_gen_andi_tl(s1, s1, amask);
2403 tcg_gen_andi_tl(s2, s2, amask);
2405 /* We want to compute
2406 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2407 We've already done dst = lo1, so this reduces to
2408 dst &= (s1 == s2 ? -1 : lo2)
2409 Which we perform by
2410 lo2 |= -(s1 == s2)
2411 dst &= lo2
2413 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2414 tcg_gen_neg_tl(t1, t1);
2415 tcg_gen_or_tl(lo2, lo2, t1);
2416 tcg_gen_and_tl(dst, dst, lo2);
2418 tcg_temp_free(lo1);
2419 tcg_temp_free(lo2);
2420 tcg_temp_free(t1);
2421 tcg_temp_free(t2);
2424 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2426 TCGv tmp = tcg_temp_new();
2428 tcg_gen_add_tl(tmp, s1, s2);
2429 tcg_gen_andi_tl(dst, tmp, -8);
2430 if (left) {
2431 tcg_gen_neg_tl(tmp, tmp);
2433 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2435 tcg_temp_free(tmp);
2438 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2440 TCGv t1, t2, shift;
2442 t1 = tcg_temp_new();
2443 t2 = tcg_temp_new();
2444 shift = tcg_temp_new();
2446 tcg_gen_andi_tl(shift, gsr, 7);
2447 tcg_gen_shli_tl(shift, shift, 3);
2448 tcg_gen_shl_tl(t1, s1, shift);
2450 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2451 shift of (up to 63) followed by a constant shift of 1. */
2452 tcg_gen_xori_tl(shift, shift, 63);
2453 tcg_gen_shr_tl(t2, s2, shift);
2454 tcg_gen_shri_tl(t2, t2, 1);
2456 tcg_gen_or_tl(dst, t1, t2);
2458 tcg_temp_free(t1);
2459 tcg_temp_free(t2);
2460 tcg_temp_free(shift);
2462 #endif
2464 #define CHECK_IU_FEATURE(dc, FEATURE) \
2465 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2466 goto illegal_insn;
2467 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2468 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2469 goto nfpu_insn;
2471 /* before an instruction, dc->pc must be static */
2472 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2474 unsigned int opc, rs1, rs2, rd;
2475 TCGv cpu_src1, cpu_src2;
2476 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2477 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2478 target_long simm;
2480 opc = GET_FIELD(insn, 0, 1);
2481 rd = GET_FIELD(insn, 2, 6);
2483 switch (opc) {
2484 case 0: /* branches/sethi */
2486 unsigned int xop = GET_FIELD(insn, 7, 9);
2487 int32_t target;
2488 switch (xop) {
2489 #ifdef TARGET_SPARC64
2490 case 0x1: /* V9 BPcc */
2492 int cc;
2494 target = GET_FIELD_SP(insn, 0, 18);
2495 target = sign_extend(target, 19);
2496 target <<= 2;
2497 cc = GET_FIELD_SP(insn, 20, 21);
2498 if (cc == 0)
2499 do_branch(dc, target, insn, 0);
2500 else if (cc == 2)
2501 do_branch(dc, target, insn, 1);
2502 else
2503 goto illegal_insn;
2504 goto jmp_insn;
2506 case 0x3: /* V9 BPr */
2508 target = GET_FIELD_SP(insn, 0, 13) |
2509 (GET_FIELD_SP(insn, 20, 21) << 14);
2510 target = sign_extend(target, 16);
2511 target <<= 2;
2512 cpu_src1 = get_src1(dc, insn);
2513 do_branch_reg(dc, target, insn, cpu_src1);
2514 goto jmp_insn;
2516 case 0x5: /* V9 FBPcc */
2518 int cc = GET_FIELD_SP(insn, 20, 21);
2519 if (gen_trap_ifnofpu(dc)) {
2520 goto jmp_insn;
2522 target = GET_FIELD_SP(insn, 0, 18);
2523 target = sign_extend(target, 19);
2524 target <<= 2;
2525 do_fbranch(dc, target, insn, cc);
2526 goto jmp_insn;
2528 #else
2529 case 0x7: /* CBN+x */
2531 goto ncp_insn;
2533 #endif
2534 case 0x2: /* BN+x */
2536 target = GET_FIELD(insn, 10, 31);
2537 target = sign_extend(target, 22);
2538 target <<= 2;
2539 do_branch(dc, target, insn, 0);
2540 goto jmp_insn;
2542 case 0x6: /* FBN+x */
2544 if (gen_trap_ifnofpu(dc)) {
2545 goto jmp_insn;
2547 target = GET_FIELD(insn, 10, 31);
2548 target = sign_extend(target, 22);
2549 target <<= 2;
2550 do_fbranch(dc, target, insn, 0);
2551 goto jmp_insn;
2553 case 0x4: /* SETHI */
2554 /* Special-case %g0 because that's the canonical nop. */
2555 if (rd) {
2556 uint32_t value = GET_FIELD(insn, 10, 31);
2557 TCGv t = gen_dest_gpr(dc, rd);
2558 tcg_gen_movi_tl(t, value << 10);
2559 gen_store_gpr(dc, rd, t);
2561 break;
2562 case 0x0: /* UNIMPL */
2563 default:
2564 goto illegal_insn;
2566 break;
2568 break;
2569 case 1: /*CALL*/
2571 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2572 TCGv o7 = gen_dest_gpr(dc, 15);
2574 tcg_gen_movi_tl(o7, dc->pc);
2575 gen_store_gpr(dc, 15, o7);
2576 target += dc->pc;
2577 gen_mov_pc_npc(dc);
2578 #ifdef TARGET_SPARC64
2579 if (unlikely(AM_CHECK(dc))) {
2580 target &= 0xffffffffULL;
2582 #endif
2583 dc->npc = target;
2585 goto jmp_insn;
2586 case 2: /* FPU & Logical Operations */
2588 unsigned int xop = GET_FIELD(insn, 7, 12);
2589 TCGv cpu_dst = get_temp_tl(dc);
2590 TCGv cpu_tmp0;
2592 if (xop == 0x3a) { /* generate trap */
2593 int cond = GET_FIELD(insn, 3, 6);
2594 TCGv_i32 trap;
2595 TCGLabel *l1 = NULL;
2596 int mask;
2598 if (cond == 0) {
2599 /* Trap never. */
2600 break;
2603 save_state(dc);
2605 if (cond != 8) {
2606 /* Conditional trap. */
2607 DisasCompare cmp;
2608 #ifdef TARGET_SPARC64
2609 /* V9 icc/xcc */
2610 int cc = GET_FIELD_SP(insn, 11, 12);
2611 if (cc == 0) {
2612 gen_compare(&cmp, 0, cond, dc);
2613 } else if (cc == 2) {
2614 gen_compare(&cmp, 1, cond, dc);
2615 } else {
2616 goto illegal_insn;
2618 #else
2619 gen_compare(&cmp, 0, cond, dc);
2620 #endif
2621 l1 = gen_new_label();
2622 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2623 cmp.c1, cmp.c2, l1);
2624 free_compare(&cmp);
2627 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2628 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2630 /* Don't use the normal temporaries, as they may well have
2631 gone out of scope with the branch above. While we're
2632 doing that we might as well pre-truncate to 32-bit. */
2633 trap = tcg_temp_new_i32();
2635 rs1 = GET_FIELD_SP(insn, 14, 18);
2636 if (IS_IMM) {
2637 rs2 = GET_FIELD_SP(insn, 0, 6);
2638 if (rs1 == 0) {
2639 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2640 /* Signal that the trap value is fully constant. */
2641 mask = 0;
2642 } else {
2643 TCGv t1 = gen_load_gpr(dc, rs1);
2644 tcg_gen_trunc_tl_i32(trap, t1);
2645 tcg_gen_addi_i32(trap, trap, rs2);
2647 } else {
2648 TCGv t1, t2;
2649 rs2 = GET_FIELD_SP(insn, 0, 4);
2650 t1 = gen_load_gpr(dc, rs1);
2651 t2 = gen_load_gpr(dc, rs2);
2652 tcg_gen_add_tl(t1, t1, t2);
2653 tcg_gen_trunc_tl_i32(trap, t1);
2655 if (mask != 0) {
2656 tcg_gen_andi_i32(trap, trap, mask);
2657 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2660 gen_helper_raise_exception(cpu_env, trap);
2661 tcg_temp_free_i32(trap);
2663 if (cond == 8) {
2664 /* An unconditional trap ends the TB. */
2665 dc->is_br = 1;
2666 goto jmp_insn;
2667 } else {
2668 /* A conditional trap falls through to the next insn. */
2669 gen_set_label(l1);
2670 break;
2672 } else if (xop == 0x28) {
2673 rs1 = GET_FIELD(insn, 13, 17);
2674 switch(rs1) {
2675 case 0: /* rdy */
2676 #ifndef TARGET_SPARC64
2677 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2678 manual, rdy on the microSPARC
2679 II */
2680 case 0x0f: /* stbar in the SPARCv8 manual,
2681 rdy on the microSPARC II */
2682 case 0x10 ... 0x1f: /* implementation-dependent in the
2683 SPARCv8 manual, rdy on the
2684 microSPARC II */
2685 /* Read Asr17 */
2686 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2687 TCGv t = gen_dest_gpr(dc, rd);
2688 /* Read Asr17 for a Leon3 monoprocessor */
2689 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2690 gen_store_gpr(dc, rd, t);
2691 break;
2693 #endif
2694 gen_store_gpr(dc, rd, cpu_y);
2695 break;
2696 #ifdef TARGET_SPARC64
2697 case 0x2: /* V9 rdccr */
2698 update_psr(dc);
2699 gen_helper_rdccr(cpu_dst, cpu_env);
2700 gen_store_gpr(dc, rd, cpu_dst);
2701 break;
2702 case 0x3: /* V9 rdasi */
2703 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2704 gen_store_gpr(dc, rd, cpu_dst);
2705 break;
2706 case 0x4: /* V9 rdtick */
2708 TCGv_ptr r_tickptr;
2709 TCGv_i32 r_const;
2711 r_tickptr = tcg_temp_new_ptr();
2712 r_const = tcg_const_i32(dc->mem_idx);
2713 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2714 offsetof(CPUSPARCState, tick));
2715 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2716 r_const);
2717 tcg_temp_free_ptr(r_tickptr);
2718 tcg_temp_free_i32(r_const);
2719 gen_store_gpr(dc, rd, cpu_dst);
2721 break;
2722 case 0x5: /* V9 rdpc */
2724 TCGv t = gen_dest_gpr(dc, rd);
2725 if (unlikely(AM_CHECK(dc))) {
2726 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2727 } else {
2728 tcg_gen_movi_tl(t, dc->pc);
2730 gen_store_gpr(dc, rd, t);
2732 break;
2733 case 0x6: /* V9 rdfprs */
2734 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2735 gen_store_gpr(dc, rd, cpu_dst);
2736 break;
2737 case 0xf: /* V9 membar */
2738 break; /* no effect */
2739 case 0x13: /* Graphics Status */
2740 if (gen_trap_ifnofpu(dc)) {
2741 goto jmp_insn;
2743 gen_store_gpr(dc, rd, cpu_gsr);
2744 break;
2745 case 0x16: /* Softint */
2746 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2747 gen_store_gpr(dc, rd, cpu_dst);
2748 break;
2749 case 0x17: /* Tick compare */
2750 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2751 break;
2752 case 0x18: /* System tick */
2754 TCGv_ptr r_tickptr;
2755 TCGv_i32 r_const;
2757 r_tickptr = tcg_temp_new_ptr();
2758 r_const = tcg_const_i32(dc->mem_idx);
2759 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2760 offsetof(CPUSPARCState, stick));
2761 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2762 r_const);
2763 tcg_temp_free_ptr(r_tickptr);
2764 tcg_temp_free_i32(r_const);
2765 gen_store_gpr(dc, rd, cpu_dst);
2767 break;
2768 case 0x19: /* System tick compare */
2769 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2770 break;
2771 case 0x10: /* Performance Control */
2772 case 0x11: /* Performance Instrumentation Counter */
2773 case 0x12: /* Dispatch Control */
2774 case 0x14: /* Softint set, WO */
2775 case 0x15: /* Softint clear, WO */
2776 #endif
2777 default:
2778 goto illegal_insn;
2780 #if !defined(CONFIG_USER_ONLY)
2781 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2782 #ifndef TARGET_SPARC64
2783 if (!supervisor(dc)) {
2784 goto priv_insn;
2786 update_psr(dc);
2787 gen_helper_rdpsr(cpu_dst, cpu_env);
2788 #else
2789 CHECK_IU_FEATURE(dc, HYPV);
2790 if (!hypervisor(dc))
2791 goto priv_insn;
2792 rs1 = GET_FIELD(insn, 13, 17);
2793 switch (rs1) {
2794 case 0: // hpstate
2795 // gen_op_rdhpstate();
2796 break;
2797 case 1: // htstate
2798 // gen_op_rdhtstate();
2799 break;
2800 case 3: // hintp
2801 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2802 break;
2803 case 5: // htba
2804 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2805 break;
2806 case 6: // hver
2807 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2808 break;
2809 case 31: // hstick_cmpr
2810 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2811 break;
2812 default:
2813 goto illegal_insn;
2815 #endif
2816 gen_store_gpr(dc, rd, cpu_dst);
2817 break;
2818 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2819 if (!supervisor(dc)) {
2820 goto priv_insn;
2822 cpu_tmp0 = get_temp_tl(dc);
2823 #ifdef TARGET_SPARC64
2824 rs1 = GET_FIELD(insn, 13, 17);
2825 switch (rs1) {
2826 case 0: // tpc
2828 TCGv_ptr r_tsptr;
2830 r_tsptr = tcg_temp_new_ptr();
2831 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2832 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2833 offsetof(trap_state, tpc));
2834 tcg_temp_free_ptr(r_tsptr);
2836 break;
2837 case 1: // tnpc
2839 TCGv_ptr r_tsptr;
2841 r_tsptr = tcg_temp_new_ptr();
2842 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2843 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2844 offsetof(trap_state, tnpc));
2845 tcg_temp_free_ptr(r_tsptr);
2847 break;
2848 case 2: // tstate
2850 TCGv_ptr r_tsptr;
2852 r_tsptr = tcg_temp_new_ptr();
2853 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2854 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2855 offsetof(trap_state, tstate));
2856 tcg_temp_free_ptr(r_tsptr);
2858 break;
2859 case 3: // tt
2861 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2863 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2864 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2865 offsetof(trap_state, tt));
2866 tcg_temp_free_ptr(r_tsptr);
2868 break;
2869 case 4: // tick
2871 TCGv_ptr r_tickptr;
2872 TCGv_i32 r_const;
2874 r_tickptr = tcg_temp_new_ptr();
2875 r_const = tcg_const_i32(dc->mem_idx);
2876 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2877 offsetof(CPUSPARCState, tick));
2878 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
2879 r_tickptr, r_const);
2880 tcg_temp_free_ptr(r_tickptr);
2881 tcg_temp_free_i32(r_const);
2883 break;
2884 case 5: // tba
2885 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2886 break;
2887 case 6: // pstate
2888 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2889 offsetof(CPUSPARCState, pstate));
2890 break;
2891 case 7: // tl
2892 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2893 offsetof(CPUSPARCState, tl));
2894 break;
2895 case 8: // pil
2896 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2897 offsetof(CPUSPARCState, psrpil));
2898 break;
2899 case 9: // cwp
2900 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2901 break;
2902 case 10: // cansave
2903 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2904 offsetof(CPUSPARCState, cansave));
2905 break;
2906 case 11: // canrestore
2907 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2908 offsetof(CPUSPARCState, canrestore));
2909 break;
2910 case 12: // cleanwin
2911 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2912 offsetof(CPUSPARCState, cleanwin));
2913 break;
2914 case 13: // otherwin
2915 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2916 offsetof(CPUSPARCState, otherwin));
2917 break;
2918 case 14: // wstate
2919 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2920 offsetof(CPUSPARCState, wstate));
2921 break;
2922 case 16: // UA2005 gl
2923 CHECK_IU_FEATURE(dc, GL);
2924 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2925 offsetof(CPUSPARCState, gl));
2926 break;
2927 case 26: // UA2005 strand status
2928 CHECK_IU_FEATURE(dc, HYPV);
2929 if (!hypervisor(dc))
2930 goto priv_insn;
2931 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2932 break;
2933 case 31: // ver
2934 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2935 break;
2936 case 15: // fq
2937 default:
2938 goto illegal_insn;
2940 #else
2941 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2942 #endif
2943 gen_store_gpr(dc, rd, cpu_tmp0);
2944 break;
2945 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2946 #ifdef TARGET_SPARC64
2947 save_state(dc);
2948 gen_helper_flushw(cpu_env);
2949 #else
2950 if (!supervisor(dc))
2951 goto priv_insn;
2952 gen_store_gpr(dc, rd, cpu_tbr);
2953 #endif
2954 break;
2955 #endif
2956 } else if (xop == 0x34) { /* FPU Operations */
2957 if (gen_trap_ifnofpu(dc)) {
2958 goto jmp_insn;
2960 gen_op_clear_ieee_excp_and_FTT();
2961 rs1 = GET_FIELD(insn, 13, 17);
2962 rs2 = GET_FIELD(insn, 27, 31);
2963 xop = GET_FIELD(insn, 18, 26);
2964 save_state(dc);
2965 switch (xop) {
2966 case 0x1: /* fmovs */
2967 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2968 gen_store_fpr_F(dc, rd, cpu_src1_32);
2969 break;
2970 case 0x5: /* fnegs */
2971 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2972 break;
2973 case 0x9: /* fabss */
2974 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2975 break;
2976 case 0x29: /* fsqrts */
2977 CHECK_FPU_FEATURE(dc, FSQRT);
2978 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2979 break;
2980 case 0x2a: /* fsqrtd */
2981 CHECK_FPU_FEATURE(dc, FSQRT);
2982 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2983 break;
2984 case 0x2b: /* fsqrtq */
2985 CHECK_FPU_FEATURE(dc, FLOAT128);
2986 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2987 break;
2988 case 0x41: /* fadds */
2989 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
2990 break;
2991 case 0x42: /* faddd */
2992 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
2993 break;
2994 case 0x43: /* faddq */
2995 CHECK_FPU_FEATURE(dc, FLOAT128);
2996 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
2997 break;
2998 case 0x45: /* fsubs */
2999 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3000 break;
3001 case 0x46: /* fsubd */
3002 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3003 break;
3004 case 0x47: /* fsubq */
3005 CHECK_FPU_FEATURE(dc, FLOAT128);
3006 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3007 break;
3008 case 0x49: /* fmuls */
3009 CHECK_FPU_FEATURE(dc, FMUL);
3010 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3011 break;
3012 case 0x4a: /* fmuld */
3013 CHECK_FPU_FEATURE(dc, FMUL);
3014 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3015 break;
3016 case 0x4b: /* fmulq */
3017 CHECK_FPU_FEATURE(dc, FLOAT128);
3018 CHECK_FPU_FEATURE(dc, FMUL);
3019 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3020 break;
3021 case 0x4d: /* fdivs */
3022 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3023 break;
3024 case 0x4e: /* fdivd */
3025 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3026 break;
3027 case 0x4f: /* fdivq */
3028 CHECK_FPU_FEATURE(dc, FLOAT128);
3029 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3030 break;
3031 case 0x69: /* fsmuld */
3032 CHECK_FPU_FEATURE(dc, FSMULD);
3033 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3034 break;
3035 case 0x6e: /* fdmulq */
3036 CHECK_FPU_FEATURE(dc, FLOAT128);
3037 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3038 break;
3039 case 0xc4: /* fitos */
3040 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3041 break;
3042 case 0xc6: /* fdtos */
3043 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3044 break;
3045 case 0xc7: /* fqtos */
3046 CHECK_FPU_FEATURE(dc, FLOAT128);
3047 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3048 break;
3049 case 0xc8: /* fitod */
3050 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3051 break;
3052 case 0xc9: /* fstod */
3053 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3054 break;
3055 case 0xcb: /* fqtod */
3056 CHECK_FPU_FEATURE(dc, FLOAT128);
3057 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3058 break;
3059 case 0xcc: /* fitoq */
3060 CHECK_FPU_FEATURE(dc, FLOAT128);
3061 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3062 break;
3063 case 0xcd: /* fstoq */
3064 CHECK_FPU_FEATURE(dc, FLOAT128);
3065 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3066 break;
3067 case 0xce: /* fdtoq */
3068 CHECK_FPU_FEATURE(dc, FLOAT128);
3069 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3070 break;
3071 case 0xd1: /* fstoi */
3072 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3073 break;
3074 case 0xd2: /* fdtoi */
3075 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3076 break;
3077 case 0xd3: /* fqtoi */
3078 CHECK_FPU_FEATURE(dc, FLOAT128);
3079 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3080 break;
3081 #ifdef TARGET_SPARC64
3082 case 0x2: /* V9 fmovd */
3083 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3084 gen_store_fpr_D(dc, rd, cpu_src1_64);
3085 break;
3086 case 0x3: /* V9 fmovq */
3087 CHECK_FPU_FEATURE(dc, FLOAT128);
3088 gen_move_Q(rd, rs2);
3089 break;
3090 case 0x6: /* V9 fnegd */
3091 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3092 break;
3093 case 0x7: /* V9 fnegq */
3094 CHECK_FPU_FEATURE(dc, FLOAT128);
3095 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3096 break;
3097 case 0xa: /* V9 fabsd */
3098 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3099 break;
3100 case 0xb: /* V9 fabsq */
3101 CHECK_FPU_FEATURE(dc, FLOAT128);
3102 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3103 break;
3104 case 0x81: /* V9 fstox */
3105 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3106 break;
3107 case 0x82: /* V9 fdtox */
3108 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3109 break;
3110 case 0x83: /* V9 fqtox */
3111 CHECK_FPU_FEATURE(dc, FLOAT128);
3112 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3113 break;
3114 case 0x84: /* V9 fxtos */
3115 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3116 break;
3117 case 0x88: /* V9 fxtod */
3118 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3119 break;
3120 case 0x8c: /* V9 fxtoq */
3121 CHECK_FPU_FEATURE(dc, FLOAT128);
3122 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3123 break;
3124 #endif
3125 default:
3126 goto illegal_insn;
3128 } else if (xop == 0x35) { /* FPU Operations */
3129 #ifdef TARGET_SPARC64
3130 int cond;
3131 #endif
3132 if (gen_trap_ifnofpu(dc)) {
3133 goto jmp_insn;
3135 gen_op_clear_ieee_excp_and_FTT();
3136 rs1 = GET_FIELD(insn, 13, 17);
3137 rs2 = GET_FIELD(insn, 27, 31);
3138 xop = GET_FIELD(insn, 18, 26);
3139 save_state(dc);
3141 #ifdef TARGET_SPARC64
3142 #define FMOVR(sz) \
3143 do { \
3144 DisasCompare cmp; \
3145 cond = GET_FIELD_SP(insn, 10, 12); \
3146 cpu_src1 = get_src1(dc, insn); \
3147 gen_compare_reg(&cmp, cond, cpu_src1); \
3148 gen_fmov##sz(dc, &cmp, rd, rs2); \
3149 free_compare(&cmp); \
3150 } while (0)
3152 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3153 FMOVR(s);
3154 break;
3155 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3156 FMOVR(d);
3157 break;
3158 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3159 CHECK_FPU_FEATURE(dc, FLOAT128);
3160 FMOVR(q);
3161 break;
3163 #undef FMOVR
3164 #endif
3165 switch (xop) {
3166 #ifdef TARGET_SPARC64
3167 #define FMOVCC(fcc, sz) \
3168 do { \
3169 DisasCompare cmp; \
3170 cond = GET_FIELD_SP(insn, 14, 17); \
3171 gen_fcompare(&cmp, fcc, cond); \
3172 gen_fmov##sz(dc, &cmp, rd, rs2); \
3173 free_compare(&cmp); \
3174 } while (0)
3176 case 0x001: /* V9 fmovscc %fcc0 */
3177 FMOVCC(0, s);
3178 break;
3179 case 0x002: /* V9 fmovdcc %fcc0 */
3180 FMOVCC(0, d);
3181 break;
3182 case 0x003: /* V9 fmovqcc %fcc0 */
3183 CHECK_FPU_FEATURE(dc, FLOAT128);
3184 FMOVCC(0, q);
3185 break;
3186 case 0x041: /* V9 fmovscc %fcc1 */
3187 FMOVCC(1, s);
3188 break;
3189 case 0x042: /* V9 fmovdcc %fcc1 */
3190 FMOVCC(1, d);
3191 break;
3192 case 0x043: /* V9 fmovqcc %fcc1 */
3193 CHECK_FPU_FEATURE(dc, FLOAT128);
3194 FMOVCC(1, q);
3195 break;
3196 case 0x081: /* V9 fmovscc %fcc2 */
3197 FMOVCC(2, s);
3198 break;
3199 case 0x082: /* V9 fmovdcc %fcc2 */
3200 FMOVCC(2, d);
3201 break;
3202 case 0x083: /* V9 fmovqcc %fcc2 */
3203 CHECK_FPU_FEATURE(dc, FLOAT128);
3204 FMOVCC(2, q);
3205 break;
3206 case 0x0c1: /* V9 fmovscc %fcc3 */
3207 FMOVCC(3, s);
3208 break;
3209 case 0x0c2: /* V9 fmovdcc %fcc3 */
3210 FMOVCC(3, d);
3211 break;
3212 case 0x0c3: /* V9 fmovqcc %fcc3 */
3213 CHECK_FPU_FEATURE(dc, FLOAT128);
3214 FMOVCC(3, q);
3215 break;
3216 #undef FMOVCC
3217 #define FMOVCC(xcc, sz) \
3218 do { \
3219 DisasCompare cmp; \
3220 cond = GET_FIELD_SP(insn, 14, 17); \
3221 gen_compare(&cmp, xcc, cond, dc); \
3222 gen_fmov##sz(dc, &cmp, rd, rs2); \
3223 free_compare(&cmp); \
3224 } while (0)
3226 case 0x101: /* V9 fmovscc %icc */
3227 FMOVCC(0, s);
3228 break;
3229 case 0x102: /* V9 fmovdcc %icc */
3230 FMOVCC(0, d);
3231 break;
3232 case 0x103: /* V9 fmovqcc %icc */
3233 CHECK_FPU_FEATURE(dc, FLOAT128);
3234 FMOVCC(0, q);
3235 break;
3236 case 0x181: /* V9 fmovscc %xcc */
3237 FMOVCC(1, s);
3238 break;
3239 case 0x182: /* V9 fmovdcc %xcc */
3240 FMOVCC(1, d);
3241 break;
3242 case 0x183: /* V9 fmovqcc %xcc */
3243 CHECK_FPU_FEATURE(dc, FLOAT128);
3244 FMOVCC(1, q);
3245 break;
3246 #undef FMOVCC
3247 #endif
3248 case 0x51: /* fcmps, V9 %fcc */
3249 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3250 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3251 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3252 break;
3253 case 0x52: /* fcmpd, V9 %fcc */
3254 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3255 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3256 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3257 break;
3258 case 0x53: /* fcmpq, V9 %fcc */
3259 CHECK_FPU_FEATURE(dc, FLOAT128);
3260 gen_op_load_fpr_QT0(QFPREG(rs1));
3261 gen_op_load_fpr_QT1(QFPREG(rs2));
3262 gen_op_fcmpq(rd & 3);
3263 break;
3264 case 0x55: /* fcmpes, V9 %fcc */
3265 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3266 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3267 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3268 break;
3269 case 0x56: /* fcmped, V9 %fcc */
3270 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3271 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3272 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3273 break;
3274 case 0x57: /* fcmpeq, V9 %fcc */
3275 CHECK_FPU_FEATURE(dc, FLOAT128);
3276 gen_op_load_fpr_QT0(QFPREG(rs1));
3277 gen_op_load_fpr_QT1(QFPREG(rs2));
3278 gen_op_fcmpeq(rd & 3);
3279 break;
3280 default:
3281 goto illegal_insn;
3283 } else if (xop == 0x2) {
3284 TCGv dst = gen_dest_gpr(dc, rd);
3285 rs1 = GET_FIELD(insn, 13, 17);
3286 if (rs1 == 0) {
3287 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3288 if (IS_IMM) { /* immediate */
3289 simm = GET_FIELDs(insn, 19, 31);
3290 tcg_gen_movi_tl(dst, simm);
3291 gen_store_gpr(dc, rd, dst);
3292 } else { /* register */
3293 rs2 = GET_FIELD(insn, 27, 31);
3294 if (rs2 == 0) {
3295 tcg_gen_movi_tl(dst, 0);
3296 gen_store_gpr(dc, rd, dst);
3297 } else {
3298 cpu_src2 = gen_load_gpr(dc, rs2);
3299 gen_store_gpr(dc, rd, cpu_src2);
3302 } else {
3303 cpu_src1 = get_src1(dc, insn);
3304 if (IS_IMM) { /* immediate */
3305 simm = GET_FIELDs(insn, 19, 31);
3306 tcg_gen_ori_tl(dst, cpu_src1, simm);
3307 gen_store_gpr(dc, rd, dst);
3308 } else { /* register */
3309 rs2 = GET_FIELD(insn, 27, 31);
3310 if (rs2 == 0) {
3311 /* mov shortcut: or x, %g0, y -> mov x, y */
3312 gen_store_gpr(dc, rd, cpu_src1);
3313 } else {
3314 cpu_src2 = gen_load_gpr(dc, rs2);
3315 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3316 gen_store_gpr(dc, rd, dst);
3320 #ifdef TARGET_SPARC64
3321 } else if (xop == 0x25) { /* sll, V9 sllx */
3322 cpu_src1 = get_src1(dc, insn);
3323 if (IS_IMM) { /* immediate */
3324 simm = GET_FIELDs(insn, 20, 31);
3325 if (insn & (1 << 12)) {
3326 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3327 } else {
3328 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3330 } else { /* register */
3331 rs2 = GET_FIELD(insn, 27, 31);
3332 cpu_src2 = gen_load_gpr(dc, rs2);
3333 cpu_tmp0 = get_temp_tl(dc);
3334 if (insn & (1 << 12)) {
3335 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3336 } else {
3337 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3339 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3341 gen_store_gpr(dc, rd, cpu_dst);
3342 } else if (xop == 0x26) { /* srl, V9 srlx */
3343 cpu_src1 = get_src1(dc, insn);
3344 if (IS_IMM) { /* immediate */
3345 simm = GET_FIELDs(insn, 20, 31);
3346 if (insn & (1 << 12)) {
3347 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3348 } else {
3349 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3350 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3352 } else { /* register */
3353 rs2 = GET_FIELD(insn, 27, 31);
3354 cpu_src2 = gen_load_gpr(dc, rs2);
3355 cpu_tmp0 = get_temp_tl(dc);
3356 if (insn & (1 << 12)) {
3357 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3358 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3359 } else {
3360 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3361 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3362 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3365 gen_store_gpr(dc, rd, cpu_dst);
3366 } else if (xop == 0x27) { /* sra, V9 srax */
3367 cpu_src1 = get_src1(dc, insn);
3368 if (IS_IMM) { /* immediate */
3369 simm = GET_FIELDs(insn, 20, 31);
3370 if (insn & (1 << 12)) {
3371 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3372 } else {
3373 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3374 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3376 } else { /* register */
3377 rs2 = GET_FIELD(insn, 27, 31);
3378 cpu_src2 = gen_load_gpr(dc, rs2);
3379 cpu_tmp0 = get_temp_tl(dc);
3380 if (insn & (1 << 12)) {
3381 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3382 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3383 } else {
3384 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3385 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3386 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3389 gen_store_gpr(dc, rd, cpu_dst);
3390 #endif
3391 } else if (xop < 0x36) {
3392 if (xop < 0x20) {
3393 cpu_src1 = get_src1(dc, insn);
3394 cpu_src2 = get_src2(dc, insn);
3395 switch (xop & ~0x10) {
3396 case 0x0: /* add */
3397 if (xop & 0x10) {
3398 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3399 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3400 dc->cc_op = CC_OP_ADD;
3401 } else {
3402 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3404 break;
3405 case 0x1: /* and */
3406 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3407 if (xop & 0x10) {
3408 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3409 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3410 dc->cc_op = CC_OP_LOGIC;
3412 break;
3413 case 0x2: /* or */
3414 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3415 if (xop & 0x10) {
3416 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3417 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3418 dc->cc_op = CC_OP_LOGIC;
3420 break;
3421 case 0x3: /* xor */
3422 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3423 if (xop & 0x10) {
3424 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3425 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3426 dc->cc_op = CC_OP_LOGIC;
3428 break;
3429 case 0x4: /* sub */
3430 if (xop & 0x10) {
3431 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3432 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3433 dc->cc_op = CC_OP_SUB;
3434 } else {
3435 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3437 break;
3438 case 0x5: /* andn */
3439 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3440 if (xop & 0x10) {
3441 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3442 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3443 dc->cc_op = CC_OP_LOGIC;
3445 break;
3446 case 0x6: /* orn */
3447 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3448 if (xop & 0x10) {
3449 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3450 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3451 dc->cc_op = CC_OP_LOGIC;
3453 break;
3454 case 0x7: /* xorn */
3455 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3456 if (xop & 0x10) {
3457 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3458 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3459 dc->cc_op = CC_OP_LOGIC;
3461 break;
3462 case 0x8: /* addx, V9 addc */
3463 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3464 (xop & 0x10));
3465 break;
3466 #ifdef TARGET_SPARC64
3467 case 0x9: /* V9 mulx */
3468 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3469 break;
3470 #endif
3471 case 0xa: /* umul */
3472 CHECK_IU_FEATURE(dc, MUL);
3473 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3474 if (xop & 0x10) {
3475 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3476 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3477 dc->cc_op = CC_OP_LOGIC;
3479 break;
3480 case 0xb: /* smul */
3481 CHECK_IU_FEATURE(dc, MUL);
3482 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3483 if (xop & 0x10) {
3484 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3485 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3486 dc->cc_op = CC_OP_LOGIC;
3488 break;
3489 case 0xc: /* subx, V9 subc */
3490 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3491 (xop & 0x10));
3492 break;
3493 #ifdef TARGET_SPARC64
3494 case 0xd: /* V9 udivx */
3495 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3496 break;
3497 #endif
3498 case 0xe: /* udiv */
3499 CHECK_IU_FEATURE(dc, DIV);
3500 if (xop & 0x10) {
3501 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3502 cpu_src2);
3503 dc->cc_op = CC_OP_DIV;
3504 } else {
3505 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3506 cpu_src2);
3508 break;
3509 case 0xf: /* sdiv */
3510 CHECK_IU_FEATURE(dc, DIV);
3511 if (xop & 0x10) {
3512 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3513 cpu_src2);
3514 dc->cc_op = CC_OP_DIV;
3515 } else {
3516 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3517 cpu_src2);
3519 break;
3520 default:
3521 goto illegal_insn;
3523 gen_store_gpr(dc, rd, cpu_dst);
3524 } else {
3525 cpu_src1 = get_src1(dc, insn);
3526 cpu_src2 = get_src2(dc, insn);
3527 switch (xop) {
3528 case 0x20: /* taddcc */
3529 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3530 gen_store_gpr(dc, rd, cpu_dst);
3531 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3532 dc->cc_op = CC_OP_TADD;
3533 break;
3534 case 0x21: /* tsubcc */
3535 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3536 gen_store_gpr(dc, rd, cpu_dst);
3537 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3538 dc->cc_op = CC_OP_TSUB;
3539 break;
3540 case 0x22: /* taddcctv */
3541 gen_helper_taddcctv(cpu_dst, cpu_env,
3542 cpu_src1, cpu_src2);
3543 gen_store_gpr(dc, rd, cpu_dst);
3544 dc->cc_op = CC_OP_TADDTV;
3545 break;
3546 case 0x23: /* tsubcctv */
3547 gen_helper_tsubcctv(cpu_dst, cpu_env,
3548 cpu_src1, cpu_src2);
3549 gen_store_gpr(dc, rd, cpu_dst);
3550 dc->cc_op = CC_OP_TSUBTV;
3551 break;
3552 case 0x24: /* mulscc */
3553 update_psr(dc);
3554 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3555 gen_store_gpr(dc, rd, cpu_dst);
3556 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3557 dc->cc_op = CC_OP_ADD;
3558 break;
3559 #ifndef TARGET_SPARC64
3560 case 0x25: /* sll */
3561 if (IS_IMM) { /* immediate */
3562 simm = GET_FIELDs(insn, 20, 31);
3563 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3564 } else { /* register */
3565 cpu_tmp0 = get_temp_tl(dc);
3566 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3567 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3569 gen_store_gpr(dc, rd, cpu_dst);
3570 break;
3571 case 0x26: /* srl */
3572 if (IS_IMM) { /* immediate */
3573 simm = GET_FIELDs(insn, 20, 31);
3574 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3575 } else { /* register */
3576 cpu_tmp0 = get_temp_tl(dc);
3577 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3578 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3580 gen_store_gpr(dc, rd, cpu_dst);
3581 break;
3582 case 0x27: /* sra */
3583 if (IS_IMM) { /* immediate */
3584 simm = GET_FIELDs(insn, 20, 31);
3585 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3586 } else { /* register */
3587 cpu_tmp0 = get_temp_tl(dc);
3588 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3589 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3591 gen_store_gpr(dc, rd, cpu_dst);
3592 break;
3593 #endif
3594 case 0x30:
3596 cpu_tmp0 = get_temp_tl(dc);
3597 switch(rd) {
3598 case 0: /* wry */
3599 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3600 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3601 break;
3602 #ifndef TARGET_SPARC64
3603 case 0x01 ... 0x0f: /* undefined in the
3604 SPARCv8 manual, nop
3605 on the microSPARC
3606 II */
3607 case 0x10 ... 0x1f: /* implementation-dependent
3608 in the SPARCv8
3609 manual, nop on the
3610 microSPARC II */
3611 if ((rd == 0x13) && (dc->def->features &
3612 CPU_FEATURE_POWERDOWN)) {
3613 /* LEON3 power-down */
3614 save_state(dc);
3615 gen_helper_power_down(cpu_env);
3617 break;
3618 #else
3619 case 0x2: /* V9 wrccr */
3620 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3621 gen_helper_wrccr(cpu_env, cpu_tmp0);
3622 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3623 dc->cc_op = CC_OP_FLAGS;
3624 break;
3625 case 0x3: /* V9 wrasi */
3626 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3627 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
3628 tcg_gen_trunc_tl_i32(cpu_asi, cpu_tmp0);
3629 break;
3630 case 0x6: /* V9 wrfprs */
3631 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3632 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
3633 save_state(dc);
3634 gen_op_next_insn();
3635 tcg_gen_exit_tb(0);
3636 dc->is_br = 1;
3637 break;
3638 case 0xf: /* V9 sir, nop if user */
3639 #if !defined(CONFIG_USER_ONLY)
3640 if (supervisor(dc)) {
3641 ; // XXX
3643 #endif
3644 break;
3645 case 0x13: /* Graphics Status */
3646 if (gen_trap_ifnofpu(dc)) {
3647 goto jmp_insn;
3649 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3650 break;
3651 case 0x14: /* Softint set */
3652 if (!supervisor(dc))
3653 goto illegal_insn;
3654 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3655 gen_helper_set_softint(cpu_env, cpu_tmp0);
3656 break;
3657 case 0x15: /* Softint clear */
3658 if (!supervisor(dc))
3659 goto illegal_insn;
3660 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3661 gen_helper_clear_softint(cpu_env, cpu_tmp0);
3662 break;
3663 case 0x16: /* Softint write */
3664 if (!supervisor(dc))
3665 goto illegal_insn;
3666 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3667 gen_helper_write_softint(cpu_env, cpu_tmp0);
3668 break;
3669 case 0x17: /* Tick compare */
3670 #if !defined(CONFIG_USER_ONLY)
3671 if (!supervisor(dc))
3672 goto illegal_insn;
3673 #endif
3675 TCGv_ptr r_tickptr;
3677 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3678 cpu_src2);
3679 r_tickptr = tcg_temp_new_ptr();
3680 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3681 offsetof(CPUSPARCState, tick));
3682 gen_helper_tick_set_limit(r_tickptr,
3683 cpu_tick_cmpr);
3684 tcg_temp_free_ptr(r_tickptr);
3686 break;
3687 case 0x18: /* System tick */
3688 #if !defined(CONFIG_USER_ONLY)
3689 if (!supervisor(dc))
3690 goto illegal_insn;
3691 #endif
3693 TCGv_ptr r_tickptr;
3695 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
3696 cpu_src2);
3697 r_tickptr = tcg_temp_new_ptr();
3698 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3699 offsetof(CPUSPARCState, stick));
3700 gen_helper_tick_set_count(r_tickptr,
3701 cpu_tmp0);
3702 tcg_temp_free_ptr(r_tickptr);
3704 break;
3705 case 0x19: /* System tick compare */
3706 #if !defined(CONFIG_USER_ONLY)
3707 if (!supervisor(dc))
3708 goto illegal_insn;
3709 #endif
3711 TCGv_ptr r_tickptr;
3713 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3714 cpu_src2);
3715 r_tickptr = tcg_temp_new_ptr();
3716 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3717 offsetof(CPUSPARCState, stick));
3718 gen_helper_tick_set_limit(r_tickptr,
3719 cpu_stick_cmpr);
3720 tcg_temp_free_ptr(r_tickptr);
3722 break;
3724 case 0x10: /* Performance Control */
3725 case 0x11: /* Performance Instrumentation
3726 Counter */
3727 case 0x12: /* Dispatch Control */
3728 #endif
3729 default:
3730 goto illegal_insn;
3733 break;
3734 #if !defined(CONFIG_USER_ONLY)
3735 case 0x31: /* wrpsr, V9 saved, restored */
3737 if (!supervisor(dc))
3738 goto priv_insn;
3739 #ifdef TARGET_SPARC64
3740 switch (rd) {
3741 case 0:
3742 gen_helper_saved(cpu_env);
3743 break;
3744 case 1:
3745 gen_helper_restored(cpu_env);
3746 break;
3747 case 2: /* UA2005 allclean */
3748 case 3: /* UA2005 otherw */
3749 case 4: /* UA2005 normalw */
3750 case 5: /* UA2005 invalw */
3751 // XXX
3752 default:
3753 goto illegal_insn;
3755 #else
3756 cpu_tmp0 = get_temp_tl(dc);
3757 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3758 gen_helper_wrpsr(cpu_env, cpu_tmp0);
3759 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3760 dc->cc_op = CC_OP_FLAGS;
3761 save_state(dc);
3762 gen_op_next_insn();
3763 tcg_gen_exit_tb(0);
3764 dc->is_br = 1;
3765 #endif
3767 break;
3768 case 0x32: /* wrwim, V9 wrpr */
3770 if (!supervisor(dc))
3771 goto priv_insn;
3772 cpu_tmp0 = get_temp_tl(dc);
3773 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3774 #ifdef TARGET_SPARC64
3775 switch (rd) {
3776 case 0: // tpc
3778 TCGv_ptr r_tsptr;
3780 r_tsptr = tcg_temp_new_ptr();
3781 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3782 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3783 offsetof(trap_state, tpc));
3784 tcg_temp_free_ptr(r_tsptr);
3786 break;
3787 case 1: // tnpc
3789 TCGv_ptr r_tsptr;
3791 r_tsptr = tcg_temp_new_ptr();
3792 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3793 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3794 offsetof(trap_state, tnpc));
3795 tcg_temp_free_ptr(r_tsptr);
3797 break;
3798 case 2: // tstate
3800 TCGv_ptr r_tsptr;
3802 r_tsptr = tcg_temp_new_ptr();
3803 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3804 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3805 offsetof(trap_state,
3806 tstate));
3807 tcg_temp_free_ptr(r_tsptr);
3809 break;
3810 case 3: // tt
3812 TCGv_ptr r_tsptr;
3814 r_tsptr = tcg_temp_new_ptr();
3815 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3816 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
3817 offsetof(trap_state, tt));
3818 tcg_temp_free_ptr(r_tsptr);
3820 break;
3821 case 4: // tick
3823 TCGv_ptr r_tickptr;
3825 r_tickptr = tcg_temp_new_ptr();
3826 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3827 offsetof(CPUSPARCState, tick));
3828 gen_helper_tick_set_count(r_tickptr,
3829 cpu_tmp0);
3830 tcg_temp_free_ptr(r_tickptr);
3832 break;
3833 case 5: // tba
3834 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3835 break;
3836 case 6: // pstate
3837 save_state(dc);
3838 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3839 dc->npc = DYNAMIC_PC;
3840 break;
3841 case 7: // tl
3842 save_state(dc);
3843 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3844 offsetof(CPUSPARCState, tl));
3845 dc->npc = DYNAMIC_PC;
3846 break;
3847 case 8: // pil
3848 gen_helper_wrpil(cpu_env, cpu_tmp0);
3849 break;
3850 case 9: // cwp
3851 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3852 break;
3853 case 10: // cansave
3854 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3855 offsetof(CPUSPARCState,
3856 cansave));
3857 break;
3858 case 11: // canrestore
3859 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3860 offsetof(CPUSPARCState,
3861 canrestore));
3862 break;
3863 case 12: // cleanwin
3864 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3865 offsetof(CPUSPARCState,
3866 cleanwin));
3867 break;
3868 case 13: // otherwin
3869 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3870 offsetof(CPUSPARCState,
3871 otherwin));
3872 break;
3873 case 14: // wstate
3874 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3875 offsetof(CPUSPARCState,
3876 wstate));
3877 break;
3878 case 16: // UA2005 gl
3879 CHECK_IU_FEATURE(dc, GL);
3880 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3881 offsetof(CPUSPARCState, gl));
3882 break;
3883 case 26: // UA2005 strand status
3884 CHECK_IU_FEATURE(dc, HYPV);
3885 if (!hypervisor(dc))
3886 goto priv_insn;
3887 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3888 break;
3889 default:
3890 goto illegal_insn;
3892 #else
3893 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
3894 if (dc->def->nwindows != 32) {
3895 tcg_gen_andi_tl(cpu_wim, cpu_wim,
3896 (1 << dc->def->nwindows) - 1);
3898 #endif
3900 break;
3901 case 0x33: /* wrtbr, UA2005 wrhpr */
3903 #ifndef TARGET_SPARC64
3904 if (!supervisor(dc))
3905 goto priv_insn;
3906 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3907 #else
3908 CHECK_IU_FEATURE(dc, HYPV);
3909 if (!hypervisor(dc))
3910 goto priv_insn;
3911 cpu_tmp0 = get_temp_tl(dc);
3912 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3913 switch (rd) {
3914 case 0: // hpstate
3915 // XXX gen_op_wrhpstate();
3916 save_state(dc);
3917 gen_op_next_insn();
3918 tcg_gen_exit_tb(0);
3919 dc->is_br = 1;
3920 break;
3921 case 1: // htstate
3922 // XXX gen_op_wrhtstate();
3923 break;
3924 case 3: // hintp
3925 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3926 break;
3927 case 5: // htba
3928 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3929 break;
3930 case 31: // hstick_cmpr
3932 TCGv_ptr r_tickptr;
3934 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3935 r_tickptr = tcg_temp_new_ptr();
3936 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3937 offsetof(CPUSPARCState, hstick));
3938 gen_helper_tick_set_limit(r_tickptr,
3939 cpu_hstick_cmpr);
3940 tcg_temp_free_ptr(r_tickptr);
3942 break;
3943 case 6: // hver readonly
3944 default:
3945 goto illegal_insn;
3947 #endif
3949 break;
3950 #endif
3951 #ifdef TARGET_SPARC64
3952 case 0x2c: /* V9 movcc */
3954 int cc = GET_FIELD_SP(insn, 11, 12);
3955 int cond = GET_FIELD_SP(insn, 14, 17);
3956 DisasCompare cmp;
3957 TCGv dst;
3959 if (insn & (1 << 18)) {
3960 if (cc == 0) {
3961 gen_compare(&cmp, 0, cond, dc);
3962 } else if (cc == 2) {
3963 gen_compare(&cmp, 1, cond, dc);
3964 } else {
3965 goto illegal_insn;
3967 } else {
3968 gen_fcompare(&cmp, cc, cond);
3971 /* The get_src2 above loaded the normal 13-bit
3972 immediate field, not the 11-bit field we have
3973 in movcc. But it did handle the reg case. */
3974 if (IS_IMM) {
3975 simm = GET_FIELD_SPs(insn, 0, 10);
3976 tcg_gen_movi_tl(cpu_src2, simm);
3979 dst = gen_load_gpr(dc, rd);
3980 tcg_gen_movcond_tl(cmp.cond, dst,
3981 cmp.c1, cmp.c2,
3982 cpu_src2, dst);
3983 free_compare(&cmp);
3984 gen_store_gpr(dc, rd, dst);
3985 break;
3987 case 0x2d: /* V9 sdivx */
3988 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3989 gen_store_gpr(dc, rd, cpu_dst);
3990 break;
3991 case 0x2e: /* V9 popc */
3992 gen_helper_popc(cpu_dst, cpu_src2);
3993 gen_store_gpr(dc, rd, cpu_dst);
3994 break;
3995 case 0x2f: /* V9 movr */
3997 int cond = GET_FIELD_SP(insn, 10, 12);
3998 DisasCompare cmp;
3999 TCGv dst;
4001 gen_compare_reg(&cmp, cond, cpu_src1);
4003 /* The get_src2 above loaded the normal 13-bit
4004 immediate field, not the 10-bit field we have
4005 in movr. But it did handle the reg case. */
4006 if (IS_IMM) {
4007 simm = GET_FIELD_SPs(insn, 0, 9);
4008 tcg_gen_movi_tl(cpu_src2, simm);
4011 dst = gen_load_gpr(dc, rd);
4012 tcg_gen_movcond_tl(cmp.cond, dst,
4013 cmp.c1, cmp.c2,
4014 cpu_src2, dst);
4015 free_compare(&cmp);
4016 gen_store_gpr(dc, rd, dst);
4017 break;
4019 #endif
4020 default:
4021 goto illegal_insn;
4024 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4025 #ifdef TARGET_SPARC64
4026 int opf = GET_FIELD_SP(insn, 5, 13);
4027 rs1 = GET_FIELD(insn, 13, 17);
4028 rs2 = GET_FIELD(insn, 27, 31);
4029 if (gen_trap_ifnofpu(dc)) {
4030 goto jmp_insn;
4033 switch (opf) {
4034 case 0x000: /* VIS I edge8cc */
4035 CHECK_FPU_FEATURE(dc, VIS1);
4036 cpu_src1 = gen_load_gpr(dc, rs1);
4037 cpu_src2 = gen_load_gpr(dc, rs2);
4038 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4039 gen_store_gpr(dc, rd, cpu_dst);
4040 break;
4041 case 0x001: /* VIS II edge8n */
4042 CHECK_FPU_FEATURE(dc, VIS2);
4043 cpu_src1 = gen_load_gpr(dc, rs1);
4044 cpu_src2 = gen_load_gpr(dc, rs2);
4045 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4046 gen_store_gpr(dc, rd, cpu_dst);
4047 break;
4048 case 0x002: /* VIS I edge8lcc */
4049 CHECK_FPU_FEATURE(dc, VIS1);
4050 cpu_src1 = gen_load_gpr(dc, rs1);
4051 cpu_src2 = gen_load_gpr(dc, rs2);
4052 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4053 gen_store_gpr(dc, rd, cpu_dst);
4054 break;
4055 case 0x003: /* VIS II edge8ln */
4056 CHECK_FPU_FEATURE(dc, VIS2);
4057 cpu_src1 = gen_load_gpr(dc, rs1);
4058 cpu_src2 = gen_load_gpr(dc, rs2);
4059 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4060 gen_store_gpr(dc, rd, cpu_dst);
4061 break;
4062 case 0x004: /* VIS I edge16cc */
4063 CHECK_FPU_FEATURE(dc, VIS1);
4064 cpu_src1 = gen_load_gpr(dc, rs1);
4065 cpu_src2 = gen_load_gpr(dc, rs2);
4066 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4067 gen_store_gpr(dc, rd, cpu_dst);
4068 break;
4069 case 0x005: /* VIS II edge16n */
4070 CHECK_FPU_FEATURE(dc, VIS2);
4071 cpu_src1 = gen_load_gpr(dc, rs1);
4072 cpu_src2 = gen_load_gpr(dc, rs2);
4073 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4074 gen_store_gpr(dc, rd, cpu_dst);
4075 break;
4076 case 0x006: /* VIS I edge16lcc */
4077 CHECK_FPU_FEATURE(dc, VIS1);
4078 cpu_src1 = gen_load_gpr(dc, rs1);
4079 cpu_src2 = gen_load_gpr(dc, rs2);
4080 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4081 gen_store_gpr(dc, rd, cpu_dst);
4082 break;
4083 case 0x007: /* VIS II edge16ln */
4084 CHECK_FPU_FEATURE(dc, VIS2);
4085 cpu_src1 = gen_load_gpr(dc, rs1);
4086 cpu_src2 = gen_load_gpr(dc, rs2);
4087 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4088 gen_store_gpr(dc, rd, cpu_dst);
4089 break;
4090 case 0x008: /* VIS I edge32cc */
4091 CHECK_FPU_FEATURE(dc, VIS1);
4092 cpu_src1 = gen_load_gpr(dc, rs1);
4093 cpu_src2 = gen_load_gpr(dc, rs2);
4094 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4095 gen_store_gpr(dc, rd, cpu_dst);
4096 break;
4097 case 0x009: /* VIS II edge32n */
4098 CHECK_FPU_FEATURE(dc, VIS2);
4099 cpu_src1 = gen_load_gpr(dc, rs1);
4100 cpu_src2 = gen_load_gpr(dc, rs2);
4101 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4102 gen_store_gpr(dc, rd, cpu_dst);
4103 break;
4104 case 0x00a: /* VIS I edge32lcc */
4105 CHECK_FPU_FEATURE(dc, VIS1);
4106 cpu_src1 = gen_load_gpr(dc, rs1);
4107 cpu_src2 = gen_load_gpr(dc, rs2);
4108 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4109 gen_store_gpr(dc, rd, cpu_dst);
4110 break;
4111 case 0x00b: /* VIS II edge32ln */
4112 CHECK_FPU_FEATURE(dc, VIS2);
4113 cpu_src1 = gen_load_gpr(dc, rs1);
4114 cpu_src2 = gen_load_gpr(dc, rs2);
4115 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4116 gen_store_gpr(dc, rd, cpu_dst);
4117 break;
4118 case 0x010: /* VIS I array8 */
4119 CHECK_FPU_FEATURE(dc, VIS1);
4120 cpu_src1 = gen_load_gpr(dc, rs1);
4121 cpu_src2 = gen_load_gpr(dc, rs2);
4122 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4123 gen_store_gpr(dc, rd, cpu_dst);
4124 break;
4125 case 0x012: /* VIS I array16 */
4126 CHECK_FPU_FEATURE(dc, VIS1);
4127 cpu_src1 = gen_load_gpr(dc, rs1);
4128 cpu_src2 = gen_load_gpr(dc, rs2);
4129 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4130 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4131 gen_store_gpr(dc, rd, cpu_dst);
4132 break;
4133 case 0x014: /* VIS I array32 */
4134 CHECK_FPU_FEATURE(dc, VIS1);
4135 cpu_src1 = gen_load_gpr(dc, rs1);
4136 cpu_src2 = gen_load_gpr(dc, rs2);
4137 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4138 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4139 gen_store_gpr(dc, rd, cpu_dst);
4140 break;
4141 case 0x018: /* VIS I alignaddr */
4142 CHECK_FPU_FEATURE(dc, VIS1);
4143 cpu_src1 = gen_load_gpr(dc, rs1);
4144 cpu_src2 = gen_load_gpr(dc, rs2);
4145 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4146 gen_store_gpr(dc, rd, cpu_dst);
4147 break;
4148 case 0x01a: /* VIS I alignaddrl */
4149 CHECK_FPU_FEATURE(dc, VIS1);
4150 cpu_src1 = gen_load_gpr(dc, rs1);
4151 cpu_src2 = gen_load_gpr(dc, rs2);
4152 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4153 gen_store_gpr(dc, rd, cpu_dst);
4154 break;
4155 case 0x019: /* VIS II bmask */
4156 CHECK_FPU_FEATURE(dc, VIS2);
4157 cpu_src1 = gen_load_gpr(dc, rs1);
4158 cpu_src2 = gen_load_gpr(dc, rs2);
4159 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4160 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4161 gen_store_gpr(dc, rd, cpu_dst);
4162 break;
4163 case 0x020: /* VIS I fcmple16 */
4164 CHECK_FPU_FEATURE(dc, VIS1);
4165 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4166 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4167 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4168 gen_store_gpr(dc, rd, cpu_dst);
4169 break;
4170 case 0x022: /* VIS I fcmpne16 */
4171 CHECK_FPU_FEATURE(dc, VIS1);
4172 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4173 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4174 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4175 gen_store_gpr(dc, rd, cpu_dst);
4176 break;
4177 case 0x024: /* VIS I fcmple32 */
4178 CHECK_FPU_FEATURE(dc, VIS1);
4179 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4180 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4181 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4182 gen_store_gpr(dc, rd, cpu_dst);
4183 break;
4184 case 0x026: /* VIS I fcmpne32 */
4185 CHECK_FPU_FEATURE(dc, VIS1);
4186 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4187 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4188 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4189 gen_store_gpr(dc, rd, cpu_dst);
4190 break;
4191 case 0x028: /* VIS I fcmpgt16 */
4192 CHECK_FPU_FEATURE(dc, VIS1);
4193 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4194 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4195 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4196 gen_store_gpr(dc, rd, cpu_dst);
4197 break;
4198 case 0x02a: /* VIS I fcmpeq16 */
4199 CHECK_FPU_FEATURE(dc, VIS1);
4200 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4201 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4202 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4203 gen_store_gpr(dc, rd, cpu_dst);
4204 break;
4205 case 0x02c: /* VIS I fcmpgt32 */
4206 CHECK_FPU_FEATURE(dc, VIS1);
4207 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4208 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4209 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4210 gen_store_gpr(dc, rd, cpu_dst);
4211 break;
4212 case 0x02e: /* VIS I fcmpeq32 */
4213 CHECK_FPU_FEATURE(dc, VIS1);
4214 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4215 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4216 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4217 gen_store_gpr(dc, rd, cpu_dst);
4218 break;
4219 case 0x031: /* VIS I fmul8x16 */
4220 CHECK_FPU_FEATURE(dc, VIS1);
4221 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4222 break;
4223 case 0x033: /* VIS I fmul8x16au */
4224 CHECK_FPU_FEATURE(dc, VIS1);
4225 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4226 break;
4227 case 0x035: /* VIS I fmul8x16al */
4228 CHECK_FPU_FEATURE(dc, VIS1);
4229 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4230 break;
4231 case 0x036: /* VIS I fmul8sux16 */
4232 CHECK_FPU_FEATURE(dc, VIS1);
4233 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4234 break;
4235 case 0x037: /* VIS I fmul8ulx16 */
4236 CHECK_FPU_FEATURE(dc, VIS1);
4237 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4238 break;
4239 case 0x038: /* VIS I fmuld8sux16 */
4240 CHECK_FPU_FEATURE(dc, VIS1);
4241 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4242 break;
4243 case 0x039: /* VIS I fmuld8ulx16 */
4244 CHECK_FPU_FEATURE(dc, VIS1);
4245 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4246 break;
4247 case 0x03a: /* VIS I fpack32 */
4248 CHECK_FPU_FEATURE(dc, VIS1);
4249 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4250 break;
4251 case 0x03b: /* VIS I fpack16 */
4252 CHECK_FPU_FEATURE(dc, VIS1);
4253 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4254 cpu_dst_32 = gen_dest_fpr_F(dc);
4255 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4256 gen_store_fpr_F(dc, rd, cpu_dst_32);
4257 break;
4258 case 0x03d: /* VIS I fpackfix */
4259 CHECK_FPU_FEATURE(dc, VIS1);
4260 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4261 cpu_dst_32 = gen_dest_fpr_F(dc);
4262 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4263 gen_store_fpr_F(dc, rd, cpu_dst_32);
4264 break;
4265 case 0x03e: /* VIS I pdist */
4266 CHECK_FPU_FEATURE(dc, VIS1);
4267 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4268 break;
4269 case 0x048: /* VIS I faligndata */
4270 CHECK_FPU_FEATURE(dc, VIS1);
4271 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4272 break;
4273 case 0x04b: /* VIS I fpmerge */
4274 CHECK_FPU_FEATURE(dc, VIS1);
4275 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4276 break;
4277 case 0x04c: /* VIS II bshuffle */
4278 CHECK_FPU_FEATURE(dc, VIS2);
4279 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4280 break;
4281 case 0x04d: /* VIS I fexpand */
4282 CHECK_FPU_FEATURE(dc, VIS1);
4283 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4284 break;
4285 case 0x050: /* VIS I fpadd16 */
4286 CHECK_FPU_FEATURE(dc, VIS1);
4287 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4288 break;
4289 case 0x051: /* VIS I fpadd16s */
4290 CHECK_FPU_FEATURE(dc, VIS1);
4291 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4292 break;
4293 case 0x052: /* VIS I fpadd32 */
4294 CHECK_FPU_FEATURE(dc, VIS1);
4295 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4296 break;
4297 case 0x053: /* VIS I fpadd32s */
4298 CHECK_FPU_FEATURE(dc, VIS1);
4299 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4300 break;
4301 case 0x054: /* VIS I fpsub16 */
4302 CHECK_FPU_FEATURE(dc, VIS1);
4303 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4304 break;
4305 case 0x055: /* VIS I fpsub16s */
4306 CHECK_FPU_FEATURE(dc, VIS1);
4307 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4308 break;
4309 case 0x056: /* VIS I fpsub32 */
4310 CHECK_FPU_FEATURE(dc, VIS1);
4311 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4312 break;
4313 case 0x057: /* VIS I fpsub32s */
4314 CHECK_FPU_FEATURE(dc, VIS1);
4315 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4316 break;
4317 case 0x060: /* VIS I fzero */
4318 CHECK_FPU_FEATURE(dc, VIS1);
4319 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4320 tcg_gen_movi_i64(cpu_dst_64, 0);
4321 gen_store_fpr_D(dc, rd, cpu_dst_64);
4322 break;
4323 case 0x061: /* VIS I fzeros */
4324 CHECK_FPU_FEATURE(dc, VIS1);
4325 cpu_dst_32 = gen_dest_fpr_F(dc);
4326 tcg_gen_movi_i32(cpu_dst_32, 0);
4327 gen_store_fpr_F(dc, rd, cpu_dst_32);
4328 break;
4329 case 0x062: /* VIS I fnor */
4330 CHECK_FPU_FEATURE(dc, VIS1);
4331 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4332 break;
4333 case 0x063: /* VIS I fnors */
4334 CHECK_FPU_FEATURE(dc, VIS1);
4335 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4336 break;
4337 case 0x064: /* VIS I fandnot2 */
4338 CHECK_FPU_FEATURE(dc, VIS1);
4339 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4340 break;
4341 case 0x065: /* VIS I fandnot2s */
4342 CHECK_FPU_FEATURE(dc, VIS1);
4343 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4344 break;
4345 case 0x066: /* VIS I fnot2 */
4346 CHECK_FPU_FEATURE(dc, VIS1);
4347 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4348 break;
4349 case 0x067: /* VIS I fnot2s */
4350 CHECK_FPU_FEATURE(dc, VIS1);
4351 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4352 break;
4353 case 0x068: /* VIS I fandnot1 */
4354 CHECK_FPU_FEATURE(dc, VIS1);
4355 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4356 break;
4357 case 0x069: /* VIS I fandnot1s */
4358 CHECK_FPU_FEATURE(dc, VIS1);
4359 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4360 break;
4361 case 0x06a: /* VIS I fnot1 */
4362 CHECK_FPU_FEATURE(dc, VIS1);
4363 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4364 break;
4365 case 0x06b: /* VIS I fnot1s */
4366 CHECK_FPU_FEATURE(dc, VIS1);
4367 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4368 break;
4369 case 0x06c: /* VIS I fxor */
4370 CHECK_FPU_FEATURE(dc, VIS1);
4371 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4372 break;
4373 case 0x06d: /* VIS I fxors */
4374 CHECK_FPU_FEATURE(dc, VIS1);
4375 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4376 break;
4377 case 0x06e: /* VIS I fnand */
4378 CHECK_FPU_FEATURE(dc, VIS1);
4379 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4380 break;
4381 case 0x06f: /* VIS I fnands */
4382 CHECK_FPU_FEATURE(dc, VIS1);
4383 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4384 break;
4385 case 0x070: /* VIS I fand */
4386 CHECK_FPU_FEATURE(dc, VIS1);
4387 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4388 break;
4389 case 0x071: /* VIS I fands */
4390 CHECK_FPU_FEATURE(dc, VIS1);
4391 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4392 break;
4393 case 0x072: /* VIS I fxnor */
4394 CHECK_FPU_FEATURE(dc, VIS1);
4395 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4396 break;
4397 case 0x073: /* VIS I fxnors */
4398 CHECK_FPU_FEATURE(dc, VIS1);
4399 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4400 break;
4401 case 0x074: /* VIS I fsrc1 */
4402 CHECK_FPU_FEATURE(dc, VIS1);
4403 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4404 gen_store_fpr_D(dc, rd, cpu_src1_64);
4405 break;
4406 case 0x075: /* VIS I fsrc1s */
4407 CHECK_FPU_FEATURE(dc, VIS1);
4408 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4409 gen_store_fpr_F(dc, rd, cpu_src1_32);
4410 break;
4411 case 0x076: /* VIS I fornot2 */
4412 CHECK_FPU_FEATURE(dc, VIS1);
4413 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4414 break;
4415 case 0x077: /* VIS I fornot2s */
4416 CHECK_FPU_FEATURE(dc, VIS1);
4417 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4418 break;
4419 case 0x078: /* VIS I fsrc2 */
4420 CHECK_FPU_FEATURE(dc, VIS1);
4421 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4422 gen_store_fpr_D(dc, rd, cpu_src1_64);
4423 break;
4424 case 0x079: /* VIS I fsrc2s */
4425 CHECK_FPU_FEATURE(dc, VIS1);
4426 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4427 gen_store_fpr_F(dc, rd, cpu_src1_32);
4428 break;
4429 case 0x07a: /* VIS I fornot1 */
4430 CHECK_FPU_FEATURE(dc, VIS1);
4431 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4432 break;
4433 case 0x07b: /* VIS I fornot1s */
4434 CHECK_FPU_FEATURE(dc, VIS1);
4435 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4436 break;
4437 case 0x07c: /* VIS I for */
4438 CHECK_FPU_FEATURE(dc, VIS1);
4439 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4440 break;
4441 case 0x07d: /* VIS I fors */
4442 CHECK_FPU_FEATURE(dc, VIS1);
4443 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4444 break;
4445 case 0x07e: /* VIS I fone */
4446 CHECK_FPU_FEATURE(dc, VIS1);
4447 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4448 tcg_gen_movi_i64(cpu_dst_64, -1);
4449 gen_store_fpr_D(dc, rd, cpu_dst_64);
4450 break;
4451 case 0x07f: /* VIS I fones */
4452 CHECK_FPU_FEATURE(dc, VIS1);
4453 cpu_dst_32 = gen_dest_fpr_F(dc);
4454 tcg_gen_movi_i32(cpu_dst_32, -1);
4455 gen_store_fpr_F(dc, rd, cpu_dst_32);
4456 break;
4457 case 0x080: /* VIS I shutdown */
4458 case 0x081: /* VIS II siam */
4459 // XXX
4460 goto illegal_insn;
4461 default:
4462 goto illegal_insn;
4464 #else
4465 goto ncp_insn;
4466 #endif
4467 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4468 #ifdef TARGET_SPARC64
4469 goto illegal_insn;
4470 #else
4471 goto ncp_insn;
4472 #endif
4473 #ifdef TARGET_SPARC64
4474 } else if (xop == 0x39) { /* V9 return */
4475 TCGv_i32 r_const;
4477 save_state(dc);
4478 cpu_src1 = get_src1(dc, insn);
4479 cpu_tmp0 = get_temp_tl(dc);
4480 if (IS_IMM) { /* immediate */
4481 simm = GET_FIELDs(insn, 19, 31);
4482 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4483 } else { /* register */
4484 rs2 = GET_FIELD(insn, 27, 31);
4485 if (rs2) {
4486 cpu_src2 = gen_load_gpr(dc, rs2);
4487 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4488 } else {
4489 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4492 gen_helper_restore(cpu_env);
4493 gen_mov_pc_npc(dc);
4494 r_const = tcg_const_i32(3);
4495 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4496 tcg_temp_free_i32(r_const);
4497 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4498 dc->npc = DYNAMIC_PC;
4499 goto jmp_insn;
4500 #endif
4501 } else {
4502 cpu_src1 = get_src1(dc, insn);
4503 cpu_tmp0 = get_temp_tl(dc);
4504 if (IS_IMM) { /* immediate */
4505 simm = GET_FIELDs(insn, 19, 31);
4506 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4507 } else { /* register */
4508 rs2 = GET_FIELD(insn, 27, 31);
4509 if (rs2) {
4510 cpu_src2 = gen_load_gpr(dc, rs2);
4511 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4512 } else {
4513 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4516 switch (xop) {
4517 case 0x38: /* jmpl */
4519 TCGv t;
4520 TCGv_i32 r_const;
4522 t = gen_dest_gpr(dc, rd);
4523 tcg_gen_movi_tl(t, dc->pc);
4524 gen_store_gpr(dc, rd, t);
4525 gen_mov_pc_npc(dc);
4526 r_const = tcg_const_i32(3);
4527 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4528 tcg_temp_free_i32(r_const);
4529 gen_address_mask(dc, cpu_tmp0);
4530 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4531 dc->npc = DYNAMIC_PC;
4533 goto jmp_insn;
4534 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4535 case 0x39: /* rett, V9 return */
4537 TCGv_i32 r_const;
4539 if (!supervisor(dc))
4540 goto priv_insn;
4541 gen_mov_pc_npc(dc);
4542 r_const = tcg_const_i32(3);
4543 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4544 tcg_temp_free_i32(r_const);
4545 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4546 dc->npc = DYNAMIC_PC;
4547 gen_helper_rett(cpu_env);
4549 goto jmp_insn;
4550 #endif
4551 case 0x3b: /* flush */
4552 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4553 goto unimp_flush;
4554 /* nop */
4555 break;
4556 case 0x3c: /* save */
4557 save_state(dc);
4558 gen_helper_save(cpu_env);
4559 gen_store_gpr(dc, rd, cpu_tmp0);
4560 break;
4561 case 0x3d: /* restore */
4562 save_state(dc);
4563 gen_helper_restore(cpu_env);
4564 gen_store_gpr(dc, rd, cpu_tmp0);
4565 break;
4566 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4567 case 0x3e: /* V9 done/retry */
4569 switch (rd) {
4570 case 0:
4571 if (!supervisor(dc))
4572 goto priv_insn;
4573 dc->npc = DYNAMIC_PC;
4574 dc->pc = DYNAMIC_PC;
4575 gen_helper_done(cpu_env);
4576 goto jmp_insn;
4577 case 1:
4578 if (!supervisor(dc))
4579 goto priv_insn;
4580 dc->npc = DYNAMIC_PC;
4581 dc->pc = DYNAMIC_PC;
4582 gen_helper_retry(cpu_env);
4583 goto jmp_insn;
4584 default:
4585 goto illegal_insn;
4588 break;
4589 #endif
4590 default:
4591 goto illegal_insn;
4594 break;
4596 break;
4597 case 3: /* load/store instructions */
4599 unsigned int xop = GET_FIELD(insn, 7, 12);
4600 /* ??? gen_address_mask prevents us from using a source
4601 register directly. Always generate a temporary. */
4602 TCGv cpu_addr = get_temp_tl(dc);
4604 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
4605 if (xop == 0x3c || xop == 0x3e) {
4606 /* V9 casa/casxa : no offset */
4607 } else if (IS_IMM) { /* immediate */
4608 simm = GET_FIELDs(insn, 19, 31);
4609 if (simm != 0) {
4610 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
4612 } else { /* register */
4613 rs2 = GET_FIELD(insn, 27, 31);
4614 if (rs2 != 0) {
4615 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4618 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4619 (xop > 0x17 && xop <= 0x1d ) ||
4620 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4621 TCGv cpu_val = gen_dest_gpr(dc, rd);
4623 switch (xop) {
4624 case 0x0: /* ld, V9 lduw, load unsigned word */
4625 gen_address_mask(dc, cpu_addr);
4626 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4627 break;
4628 case 0x1: /* ldub, load unsigned byte */
4629 gen_address_mask(dc, cpu_addr);
4630 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4631 break;
4632 case 0x2: /* lduh, load unsigned halfword */
4633 gen_address_mask(dc, cpu_addr);
4634 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4635 break;
4636 case 0x3: /* ldd, load double word */
4637 if (rd & 1)
4638 goto illegal_insn;
4639 else {
4640 TCGv_i32 r_const;
4641 TCGv_i64 t64;
4643 save_state(dc);
4644 r_const = tcg_const_i32(7);
4645 /* XXX remove alignment check */
4646 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4647 tcg_temp_free_i32(r_const);
4648 gen_address_mask(dc, cpu_addr);
4649 t64 = tcg_temp_new_i64();
4650 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4651 tcg_gen_trunc_i64_tl(cpu_val, t64);
4652 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4653 gen_store_gpr(dc, rd + 1, cpu_val);
4654 tcg_gen_shri_i64(t64, t64, 32);
4655 tcg_gen_trunc_i64_tl(cpu_val, t64);
4656 tcg_temp_free_i64(t64);
4657 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4659 break;
4660 case 0x9: /* ldsb, load signed byte */
4661 gen_address_mask(dc, cpu_addr);
4662 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4663 break;
4664 case 0xa: /* ldsh, load signed halfword */
4665 gen_address_mask(dc, cpu_addr);
4666 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4667 break;
4668 case 0xd: /* ldstub -- XXX: should be atomically */
4670 TCGv r_const;
4672 gen_address_mask(dc, cpu_addr);
4673 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4674 r_const = tcg_const_tl(0xff);
4675 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4676 tcg_temp_free(r_const);
4678 break;
4679 case 0x0f:
4680 /* swap, swap register with memory. Also atomically */
4682 TCGv t0 = get_temp_tl(dc);
4683 CHECK_IU_FEATURE(dc, SWAP);
4684 cpu_src1 = gen_load_gpr(dc, rd);
4685 gen_address_mask(dc, cpu_addr);
4686 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4687 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4688 tcg_gen_mov_tl(cpu_val, t0);
4690 break;
4691 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4692 case 0x10: /* lda, V9 lduwa, load word alternate */
4693 #ifndef TARGET_SPARC64
4694 if (IS_IMM)
4695 goto illegal_insn;
4696 if (!supervisor(dc))
4697 goto priv_insn;
4698 #endif
4699 save_state(dc);
4700 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4701 break;
4702 case 0x11: /* lduba, load unsigned byte alternate */
4703 #ifndef TARGET_SPARC64
4704 if (IS_IMM)
4705 goto illegal_insn;
4706 if (!supervisor(dc))
4707 goto priv_insn;
4708 #endif
4709 save_state(dc);
4710 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4711 break;
4712 case 0x12: /* lduha, load unsigned halfword alternate */
4713 #ifndef TARGET_SPARC64
4714 if (IS_IMM)
4715 goto illegal_insn;
4716 if (!supervisor(dc))
4717 goto priv_insn;
4718 #endif
4719 save_state(dc);
4720 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4721 break;
4722 case 0x13: /* ldda, load double word alternate */
4723 #ifndef TARGET_SPARC64
4724 if (IS_IMM)
4725 goto illegal_insn;
4726 if (!supervisor(dc))
4727 goto priv_insn;
4728 #endif
4729 if (rd & 1)
4730 goto illegal_insn;
4731 save_state(dc);
4732 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4733 goto skip_move;
4734 case 0x19: /* ldsba, load signed byte alternate */
4735 #ifndef TARGET_SPARC64
4736 if (IS_IMM)
4737 goto illegal_insn;
4738 if (!supervisor(dc))
4739 goto priv_insn;
4740 #endif
4741 save_state(dc);
4742 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4743 break;
4744 case 0x1a: /* ldsha, load signed halfword alternate */
4745 #ifndef TARGET_SPARC64
4746 if (IS_IMM)
4747 goto illegal_insn;
4748 if (!supervisor(dc))
4749 goto priv_insn;
4750 #endif
4751 save_state(dc);
4752 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4753 break;
4754 case 0x1d: /* ldstuba -- XXX: should be atomically */
4755 #ifndef TARGET_SPARC64
4756 if (IS_IMM)
4757 goto illegal_insn;
4758 if (!supervisor(dc))
4759 goto priv_insn;
4760 #endif
4761 save_state(dc);
4762 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4763 break;
4764 case 0x1f: /* swapa, swap reg with alt. memory. Also
4765 atomically */
4766 CHECK_IU_FEATURE(dc, SWAP);
4767 #ifndef TARGET_SPARC64
4768 if (IS_IMM)
4769 goto illegal_insn;
4770 if (!supervisor(dc))
4771 goto priv_insn;
4772 #endif
4773 save_state(dc);
4774 cpu_src1 = gen_load_gpr(dc, rd);
4775 gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
4776 break;
4778 #ifndef TARGET_SPARC64
4779 case 0x30: /* ldc */
4780 case 0x31: /* ldcsr */
4781 case 0x33: /* lddc */
4782 goto ncp_insn;
4783 #endif
4784 #endif
4785 #ifdef TARGET_SPARC64
4786 case 0x08: /* V9 ldsw */
4787 gen_address_mask(dc, cpu_addr);
4788 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4789 break;
4790 case 0x0b: /* V9 ldx */
4791 gen_address_mask(dc, cpu_addr);
4792 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4793 break;
4794 case 0x18: /* V9 ldswa */
4795 save_state(dc);
4796 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4797 break;
4798 case 0x1b: /* V9 ldxa */
4799 save_state(dc);
4800 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4801 break;
4802 case 0x2d: /* V9 prefetch, no effect */
4803 goto skip_move;
4804 case 0x30: /* V9 ldfa */
4805 if (gen_trap_ifnofpu(dc)) {
4806 goto jmp_insn;
4808 save_state(dc);
4809 gen_ldf_asi(cpu_addr, insn, 4, rd);
4810 gen_update_fprs_dirty(rd);
4811 goto skip_move;
4812 case 0x33: /* V9 lddfa */
4813 if (gen_trap_ifnofpu(dc)) {
4814 goto jmp_insn;
4816 save_state(dc);
4817 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4818 gen_update_fprs_dirty(DFPREG(rd));
4819 goto skip_move;
4820 case 0x3d: /* V9 prefetcha, no effect */
4821 goto skip_move;
4822 case 0x32: /* V9 ldqfa */
4823 CHECK_FPU_FEATURE(dc, FLOAT128);
4824 if (gen_trap_ifnofpu(dc)) {
4825 goto jmp_insn;
4827 save_state(dc);
4828 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4829 gen_update_fprs_dirty(QFPREG(rd));
4830 goto skip_move;
4831 #endif
4832 default:
4833 goto illegal_insn;
4835 gen_store_gpr(dc, rd, cpu_val);
4836 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4837 skip_move: ;
4838 #endif
4839 } else if (xop >= 0x20 && xop < 0x24) {
4840 TCGv t0;
4842 if (gen_trap_ifnofpu(dc)) {
4843 goto jmp_insn;
4845 save_state(dc);
4846 switch (xop) {
4847 case 0x20: /* ldf, load fpreg */
4848 gen_address_mask(dc, cpu_addr);
4849 t0 = get_temp_tl(dc);
4850 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4851 cpu_dst_32 = gen_dest_fpr_F(dc);
4852 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4853 gen_store_fpr_F(dc, rd, cpu_dst_32);
4854 break;
4855 case 0x21: /* ldfsr, V9 ldxfsr */
4856 #ifdef TARGET_SPARC64
4857 gen_address_mask(dc, cpu_addr);
4858 if (rd == 1) {
4859 TCGv_i64 t64 = tcg_temp_new_i64();
4860 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4861 gen_helper_ldxfsr(cpu_env, t64);
4862 tcg_temp_free_i64(t64);
4863 break;
4865 #endif
4866 cpu_dst_32 = get_temp_i32(dc);
4867 t0 = get_temp_tl(dc);
4868 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4869 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4870 gen_helper_ldfsr(cpu_env, cpu_dst_32);
4871 break;
4872 case 0x22: /* ldqf, load quad fpreg */
4874 TCGv_i32 r_const;
4876 CHECK_FPU_FEATURE(dc, FLOAT128);
4877 r_const = tcg_const_i32(dc->mem_idx);
4878 gen_address_mask(dc, cpu_addr);
4879 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4880 tcg_temp_free_i32(r_const);
4881 gen_op_store_QT0_fpr(QFPREG(rd));
4882 gen_update_fprs_dirty(QFPREG(rd));
4884 break;
4885 case 0x23: /* lddf, load double fpreg */
4886 gen_address_mask(dc, cpu_addr);
4887 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4888 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4889 gen_store_fpr_D(dc, rd, cpu_dst_64);
4890 break;
4891 default:
4892 goto illegal_insn;
4894 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4895 xop == 0xe || xop == 0x1e) {
4896 TCGv cpu_val = gen_load_gpr(dc, rd);
4898 switch (xop) {
4899 case 0x4: /* st, store word */
4900 gen_address_mask(dc, cpu_addr);
4901 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4902 break;
4903 case 0x5: /* stb, store byte */
4904 gen_address_mask(dc, cpu_addr);
4905 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4906 break;
4907 case 0x6: /* sth, store halfword */
4908 gen_address_mask(dc, cpu_addr);
4909 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4910 break;
4911 case 0x7: /* std, store double word */
4912 if (rd & 1)
4913 goto illegal_insn;
4914 else {
4915 TCGv_i32 r_const;
4916 TCGv_i64 t64;
4917 TCGv lo;
4919 save_state(dc);
4920 gen_address_mask(dc, cpu_addr);
4921 r_const = tcg_const_i32(7);
4922 /* XXX remove alignment check */
4923 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4924 tcg_temp_free_i32(r_const);
4925 lo = gen_load_gpr(dc, rd + 1);
4927 t64 = tcg_temp_new_i64();
4928 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
4929 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
4930 tcg_temp_free_i64(t64);
4932 break;
4933 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4934 case 0x14: /* sta, V9 stwa, store word alternate */
4935 #ifndef TARGET_SPARC64
4936 if (IS_IMM)
4937 goto illegal_insn;
4938 if (!supervisor(dc))
4939 goto priv_insn;
4940 #endif
4941 save_state(dc);
4942 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4943 dc->npc = DYNAMIC_PC;
4944 break;
4945 case 0x15: /* stba, store byte alternate */
4946 #ifndef TARGET_SPARC64
4947 if (IS_IMM)
4948 goto illegal_insn;
4949 if (!supervisor(dc))
4950 goto priv_insn;
4951 #endif
4952 save_state(dc);
4953 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4954 dc->npc = DYNAMIC_PC;
4955 break;
4956 case 0x16: /* stha, store halfword alternate */
4957 #ifndef TARGET_SPARC64
4958 if (IS_IMM)
4959 goto illegal_insn;
4960 if (!supervisor(dc))
4961 goto priv_insn;
4962 #endif
4963 save_state(dc);
4964 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4965 dc->npc = DYNAMIC_PC;
4966 break;
4967 case 0x17: /* stda, store double word alternate */
4968 #ifndef TARGET_SPARC64
4969 if (IS_IMM)
4970 goto illegal_insn;
4971 if (!supervisor(dc))
4972 goto priv_insn;
4973 #endif
4974 if (rd & 1)
4975 goto illegal_insn;
4976 else {
4977 save_state(dc);
4978 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4980 break;
4981 #endif
4982 #ifdef TARGET_SPARC64
4983 case 0x0e: /* V9 stx */
4984 gen_address_mask(dc, cpu_addr);
4985 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4986 break;
4987 case 0x1e: /* V9 stxa */
4988 save_state(dc);
4989 gen_st_asi(cpu_val, cpu_addr, insn, 8);
4990 dc->npc = DYNAMIC_PC;
4991 break;
4992 #endif
4993 default:
4994 goto illegal_insn;
4996 } else if (xop > 0x23 && xop < 0x28) {
4997 if (gen_trap_ifnofpu(dc)) {
4998 goto jmp_insn;
5000 save_state(dc);
5001 switch (xop) {
5002 case 0x24: /* stf, store fpreg */
5004 TCGv t = get_temp_tl(dc);
5005 gen_address_mask(dc, cpu_addr);
5006 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5007 tcg_gen_ext_i32_tl(t, cpu_src1_32);
5008 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5010 break;
5011 case 0x25: /* stfsr, V9 stxfsr */
5013 TCGv t = get_temp_tl(dc);
5015 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUSPARCState, fsr));
5016 #ifdef TARGET_SPARC64
5017 gen_address_mask(dc, cpu_addr);
5018 if (rd == 1) {
5019 tcg_gen_qemu_st64(t, cpu_addr, dc->mem_idx);
5020 break;
5022 #endif
5023 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5025 break;
5026 case 0x26:
5027 #ifdef TARGET_SPARC64
5028 /* V9 stqf, store quad fpreg */
5030 TCGv_i32 r_const;
5032 CHECK_FPU_FEATURE(dc, FLOAT128);
5033 gen_op_load_fpr_QT0(QFPREG(rd));
5034 r_const = tcg_const_i32(dc->mem_idx);
5035 gen_address_mask(dc, cpu_addr);
5036 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5037 tcg_temp_free_i32(r_const);
5039 break;
5040 #else /* !TARGET_SPARC64 */
5041 /* stdfq, store floating point queue */
5042 #if defined(CONFIG_USER_ONLY)
5043 goto illegal_insn;
5044 #else
5045 if (!supervisor(dc))
5046 goto priv_insn;
5047 if (gen_trap_ifnofpu(dc)) {
5048 goto jmp_insn;
5050 goto nfq_insn;
5051 #endif
5052 #endif
5053 case 0x27: /* stdf, store double fpreg */
5054 gen_address_mask(dc, cpu_addr);
5055 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5056 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5057 break;
5058 default:
5059 goto illegal_insn;
5061 } else if (xop > 0x33 && xop < 0x3f) {
5062 save_state(dc);
5063 switch (xop) {
5064 #ifdef TARGET_SPARC64
5065 case 0x34: /* V9 stfa */
5066 if (gen_trap_ifnofpu(dc)) {
5067 goto jmp_insn;
5069 gen_stf_asi(cpu_addr, insn, 4, rd);
5070 break;
5071 case 0x36: /* V9 stqfa */
5073 TCGv_i32 r_const;
5075 CHECK_FPU_FEATURE(dc, FLOAT128);
5076 if (gen_trap_ifnofpu(dc)) {
5077 goto jmp_insn;
5079 r_const = tcg_const_i32(7);
5080 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5081 tcg_temp_free_i32(r_const);
5082 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5084 break;
5085 case 0x37: /* V9 stdfa */
5086 if (gen_trap_ifnofpu(dc)) {
5087 goto jmp_insn;
5089 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5090 break;
5091 case 0x3e: /* V9 casxa */
5092 rs2 = GET_FIELD(insn, 27, 31);
5093 cpu_src2 = gen_load_gpr(dc, rs2);
5094 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5095 break;
5096 #else
5097 case 0x34: /* stc */
5098 case 0x35: /* stcsr */
5099 case 0x36: /* stdcq */
5100 case 0x37: /* stdc */
5101 goto ncp_insn;
5102 #endif
5103 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5104 case 0x3c: /* V9 or LEON3 casa */
5105 #ifndef TARGET_SPARC64
5106 CHECK_IU_FEATURE(dc, CASA);
5107 if (IS_IMM) {
5108 goto illegal_insn;
5110 /* LEON3 allows CASA from user space with ASI 0xa */
5111 if ((GET_FIELD(insn, 19, 26) != 0xa) && !supervisor(dc)) {
5112 goto priv_insn;
5114 #endif
5115 rs2 = GET_FIELD(insn, 27, 31);
5116 cpu_src2 = gen_load_gpr(dc, rs2);
5117 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5118 break;
5119 #endif
5120 default:
5121 goto illegal_insn;
5123 } else {
5124 goto illegal_insn;
5127 break;
5129 /* default case for non jump instructions */
5130 if (dc->npc == DYNAMIC_PC) {
5131 dc->pc = DYNAMIC_PC;
5132 gen_op_next_insn();
5133 } else if (dc->npc == JUMP_PC) {
5134 /* we can do a static jump */
5135 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5136 dc->is_br = 1;
5137 } else {
5138 dc->pc = dc->npc;
5139 dc->npc = dc->npc + 4;
5141 jmp_insn:
5142 goto egress;
5143 illegal_insn:
5145 TCGv_i32 r_const;
5147 save_state(dc);
5148 r_const = tcg_const_i32(TT_ILL_INSN);
5149 gen_helper_raise_exception(cpu_env, r_const);
5150 tcg_temp_free_i32(r_const);
5151 dc->is_br = 1;
5153 goto egress;
5154 unimp_flush:
5156 TCGv_i32 r_const;
5158 save_state(dc);
5159 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5160 gen_helper_raise_exception(cpu_env, r_const);
5161 tcg_temp_free_i32(r_const);
5162 dc->is_br = 1;
5164 goto egress;
5165 #if !defined(CONFIG_USER_ONLY)
5166 priv_insn:
5168 TCGv_i32 r_const;
5170 save_state(dc);
5171 r_const = tcg_const_i32(TT_PRIV_INSN);
5172 gen_helper_raise_exception(cpu_env, r_const);
5173 tcg_temp_free_i32(r_const);
5174 dc->is_br = 1;
5176 goto egress;
5177 #endif
5178 nfpu_insn:
5179 save_state(dc);
5180 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5181 dc->is_br = 1;
5182 goto egress;
5183 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5184 nfq_insn:
5185 save_state(dc);
5186 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5187 dc->is_br = 1;
5188 goto egress;
5189 #endif
5190 #ifndef TARGET_SPARC64
5191 ncp_insn:
5193 TCGv r_const;
5195 save_state(dc);
5196 r_const = tcg_const_i32(TT_NCP_INSN);
5197 gen_helper_raise_exception(cpu_env, r_const);
5198 tcg_temp_free(r_const);
5199 dc->is_br = 1;
5201 goto egress;
5202 #endif
5203 egress:
5204 if (dc->n_t32 != 0) {
5205 int i;
5206 for (i = dc->n_t32 - 1; i >= 0; --i) {
5207 tcg_temp_free_i32(dc->t32[i]);
5209 dc->n_t32 = 0;
5211 if (dc->n_ttl != 0) {
5212 int i;
5213 for (i = dc->n_ttl - 1; i >= 0; --i) {
5214 tcg_temp_free(dc->ttl[i]);
5216 dc->n_ttl = 0;
5220 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5222 SPARCCPU *cpu = sparc_env_get_cpu(env);
5223 CPUState *cs = CPU(cpu);
5224 target_ulong pc_start, last_pc;
5225 DisasContext dc1, *dc = &dc1;
5226 int num_insns;
5227 int max_insns;
5228 unsigned int insn;
5230 memset(dc, 0, sizeof(DisasContext));
5231 dc->tb = tb;
5232 pc_start = tb->pc;
5233 dc->pc = pc_start;
5234 last_pc = dc->pc;
5235 dc->npc = (target_ulong) tb->cs_base;
5236 dc->cc_op = CC_OP_DYNAMIC;
5237 dc->mem_idx = cpu_mmu_index(env, false);
5238 dc->def = env->def;
5239 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5240 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5241 dc->singlestep = (cs->singlestep_enabled || singlestep);
5243 num_insns = 0;
5244 max_insns = tb->cflags & CF_COUNT_MASK;
5245 if (max_insns == 0) {
5246 max_insns = CF_COUNT_MASK;
5248 if (max_insns > TCG_MAX_INSNS) {
5249 max_insns = TCG_MAX_INSNS;
5252 gen_tb_start(tb);
5253 do {
5254 if (dc->npc & JUMP_PC) {
5255 assert(dc->jump_pc[1] == dc->pc + 4);
5256 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5257 } else {
5258 tcg_gen_insn_start(dc->pc, dc->npc);
5260 num_insns++;
5261 last_pc = dc->pc;
5263 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5264 if (dc->pc != pc_start) {
5265 save_state(dc);
5267 gen_helper_debug(cpu_env);
5268 tcg_gen_exit_tb(0);
5269 dc->is_br = 1;
5270 goto exit_gen_loop;
5273 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5274 gen_io_start();
5277 insn = cpu_ldl_code(env, dc->pc);
5279 disas_sparc_insn(dc, insn);
5281 if (dc->is_br)
5282 break;
5283 /* if the next PC is different, we abort now */
5284 if (dc->pc != (last_pc + 4))
5285 break;
5286 /* if we reach a page boundary, we stop generation so that the
5287 PC of a TT_TFAULT exception is always in the right page */
5288 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5289 break;
5290 /* if single step mode, we generate only one instruction and
5291 generate an exception */
5292 if (dc->singlestep) {
5293 break;
5295 } while (!tcg_op_buf_full() &&
5296 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5297 num_insns < max_insns);
5299 exit_gen_loop:
5300 if (tb->cflags & CF_LAST_IO) {
5301 gen_io_end();
5303 if (!dc->is_br) {
5304 if (dc->pc != DYNAMIC_PC &&
5305 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5306 /* static PC and NPC: we can use direct chaining */
5307 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5308 } else {
5309 if (dc->pc != DYNAMIC_PC) {
5310 tcg_gen_movi_tl(cpu_pc, dc->pc);
5312 save_npc(dc);
5313 tcg_gen_exit_tb(0);
5316 gen_tb_end(tb, num_insns);
5318 tb->size = last_pc + 4 - pc_start;
5319 tb->icount = num_insns;
5321 #ifdef DEBUG_DISAS
5322 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5323 qemu_log("--------------\n");
5324 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5325 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5326 qemu_log("\n");
5328 #endif
5331 void gen_intermediate_code_init(CPUSPARCState *env)
5333 static int inited;
5334 static const char gregnames[32][4] = {
5335 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5336 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5337 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5338 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5340 static const char fregnames[32][4] = {
5341 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5342 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5343 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5344 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5347 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5348 #ifdef TARGET_SPARC64
5349 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5350 { &cpu_asi, offsetof(CPUSPARCState, asi), "asi" },
5351 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5352 { &cpu_softint, offsetof(CPUSPARCState, softint), "softint" },
5353 #else
5354 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5355 #endif
5356 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5357 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5360 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5361 #ifdef TARGET_SPARC64
5362 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5363 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5364 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5365 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5366 "hstick_cmpr" },
5367 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5368 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5369 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5370 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5371 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5372 #endif
5373 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5374 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5375 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5376 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5377 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5378 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5379 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5380 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5381 #ifndef CONFIG_USER_ONLY
5382 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5383 #endif
5386 unsigned int i;
5388 /* init various static tables */
5389 if (inited) {
5390 return;
5392 inited = 1;
5394 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5396 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5397 offsetof(CPUSPARCState, regwptr),
5398 "regwptr");
5400 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5401 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5404 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5405 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5408 TCGV_UNUSED(cpu_regs[0]);
5409 for (i = 1; i < 8; ++i) {
5410 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5411 offsetof(CPUSPARCState, gregs[i]),
5412 gregnames[i]);
5415 for (i = 8; i < 32; ++i) {
5416 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5417 (i - 8) * sizeof(target_ulong),
5418 gregnames[i]);
5421 for (i = 0; i < TARGET_DPREGS; i++) {
5422 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5423 offsetof(CPUSPARCState, fpr[i]),
5424 fregnames[i]);
5428 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5429 target_ulong *data)
5431 target_ulong pc = data[0];
5432 target_ulong npc = data[1];
5434 env->pc = pc;
5435 if (npc == DYNAMIC_PC) {
5436 /* dynamic NPC: already stored */
5437 } else if (npc & JUMP_PC) {
5438 /* jump PC: use 'cond' and the jump targets of the translation */
5439 if (env->cond) {
5440 env->npc = npc & ~3;
5441 } else {
5442 env->npc = pc + 4;
5444 } else {
5445 env->npc = npc;