target-sparc: Store %asi in TB flags
[qemu/ar7.git] / target-sparc / translate.c
blob0b056e5cf70ce45268310e20797b6bd7ea1e5b12
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
36 #define DEBUG_DISAS
38 #define DYNAMIC_PC 1 /* dynamic pc value */
39 #define JUMP_PC 2 /* dynamic pc value which takes only two values
40 according to jump_pc[T2] */
42 /* global register indexes */
43 static TCGv_env cpu_env;
44 static TCGv_ptr cpu_regwptr;
45 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
46 static TCGv_i32 cpu_cc_op;
47 static TCGv_i32 cpu_psr;
48 static TCGv cpu_fsr, cpu_pc, cpu_npc;
49 static TCGv cpu_regs[32];
50 static TCGv cpu_y;
51 #ifndef CONFIG_USER_ONLY
52 static TCGv cpu_tbr;
53 #endif
54 static TCGv cpu_cond;
55 #ifdef TARGET_SPARC64
56 static TCGv_i32 cpu_xcc, cpu_fprs;
57 static TCGv cpu_gsr;
58 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
59 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
60 #else
61 static TCGv cpu_wim;
62 #endif
63 /* Floating point registers */
64 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
66 #include "exec/gen-icount.h"
68 typedef struct DisasContext {
69 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
70 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
71 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
72 int is_br;
73 int mem_idx;
74 int fpu_enabled;
75 int address_mask_32bit;
76 int singlestep;
77 uint32_t cc_op; /* current CC operation */
78 struct TranslationBlock *tb;
79 sparc_def_t *def;
80 TCGv_i32 t32[3];
81 TCGv ttl[5];
82 int n_t32;
83 int n_ttl;
84 #ifdef TARGET_SPARC64
85 int asi;
86 #endif
87 } DisasContext;
89 typedef struct {
90 TCGCond cond;
91 bool is_bool;
92 bool g1, g2;
93 TCGv c1, c2;
94 } DisasCompare;
96 // This function uses non-native bit order
97 #define GET_FIELD(X, FROM, TO) \
98 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
100 // This function uses the order in the manuals, i.e. bit 0 is 2^0
101 #define GET_FIELD_SP(X, FROM, TO) \
102 GET_FIELD(X, 31 - (TO), 31 - (FROM))
104 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
105 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
107 #ifdef TARGET_SPARC64
108 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
109 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
110 #else
111 #define DFPREG(r) (r & 0x1e)
112 #define QFPREG(r) (r & 0x1c)
113 #endif
115 #define UA2005_HTRAP_MASK 0xff
116 #define V8_TRAP_MASK 0x7f
118 static int sign_extend(int x, int len)
120 len = 32 - len;
121 return (x << len) >> len;
124 #define IS_IMM (insn & (1<<13))
126 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
128 TCGv_i32 t;
129 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
130 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
131 return t;
134 static inline TCGv get_temp_tl(DisasContext *dc)
136 TCGv t;
137 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
138 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
139 return t;
142 static inline void gen_update_fprs_dirty(int rd)
144 #if defined(TARGET_SPARC64)
145 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
146 #endif
149 /* floating point registers moves */
150 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
152 #if TCG_TARGET_REG_BITS == 32
153 if (src & 1) {
154 return TCGV_LOW(cpu_fpr[src / 2]);
155 } else {
156 return TCGV_HIGH(cpu_fpr[src / 2]);
158 #else
159 if (src & 1) {
160 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
161 } else {
162 TCGv_i32 ret = get_temp_i32(dc);
163 TCGv_i64 t = tcg_temp_new_i64();
165 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
166 tcg_gen_extrl_i64_i32(ret, t);
167 tcg_temp_free_i64(t);
169 return ret;
171 #endif
174 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
176 #if TCG_TARGET_REG_BITS == 32
177 if (dst & 1) {
178 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
179 } else {
180 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
182 #else
183 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
184 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
185 (dst & 1 ? 0 : 32), 32);
186 #endif
187 gen_update_fprs_dirty(dst);
190 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
192 return get_temp_i32(dc);
195 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
197 src = DFPREG(src);
198 return cpu_fpr[src / 2];
201 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
203 dst = DFPREG(dst);
204 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
205 gen_update_fprs_dirty(dst);
208 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
210 return cpu_fpr[DFPREG(dst) / 2];
213 static void gen_op_load_fpr_QT0(unsigned int src)
215 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
216 offsetof(CPU_QuadU, ll.upper));
217 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
218 offsetof(CPU_QuadU, ll.lower));
221 static void gen_op_load_fpr_QT1(unsigned int src)
223 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
224 offsetof(CPU_QuadU, ll.upper));
225 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
226 offsetof(CPU_QuadU, ll.lower));
229 static void gen_op_store_QT0_fpr(unsigned int dst)
231 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
232 offsetof(CPU_QuadU, ll.upper));
233 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
234 offsetof(CPU_QuadU, ll.lower));
237 #ifdef TARGET_SPARC64
238 static void gen_move_Q(unsigned int rd, unsigned int rs)
240 rd = QFPREG(rd);
241 rs = QFPREG(rs);
243 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
244 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
245 gen_update_fprs_dirty(rd);
247 #endif
249 /* moves */
250 #ifdef CONFIG_USER_ONLY
251 #define supervisor(dc) 0
252 #ifdef TARGET_SPARC64
253 #define hypervisor(dc) 0
254 #endif
255 #else
256 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
257 #ifdef TARGET_SPARC64
258 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
259 #else
260 #endif
261 #endif
263 #ifdef TARGET_SPARC64
264 #ifndef TARGET_ABI32
265 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
266 #else
267 #define AM_CHECK(dc) (1)
268 #endif
269 #endif
271 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
273 #ifdef TARGET_SPARC64
274 if (AM_CHECK(dc))
275 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
276 #endif
279 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
281 if (reg > 0) {
282 assert(reg < 32);
283 return cpu_regs[reg];
284 } else {
285 TCGv t = get_temp_tl(dc);
286 tcg_gen_movi_tl(t, 0);
287 return t;
291 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
293 if (reg > 0) {
294 assert(reg < 32);
295 tcg_gen_mov_tl(cpu_regs[reg], v);
299 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
301 if (reg > 0) {
302 assert(reg < 32);
303 return cpu_regs[reg];
304 } else {
305 return get_temp_tl(dc);
309 static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
310 target_ulong npc)
312 if (unlikely(s->singlestep)) {
313 return false;
316 #ifndef CONFIG_USER_ONLY
317 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
318 (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
319 #else
320 return true;
321 #endif
324 static inline void gen_goto_tb(DisasContext *s, int tb_num,
325 target_ulong pc, target_ulong npc)
327 if (use_goto_tb(s, pc, npc)) {
328 /* jump to same page: we can use a direct jump */
329 tcg_gen_goto_tb(tb_num);
330 tcg_gen_movi_tl(cpu_pc, pc);
331 tcg_gen_movi_tl(cpu_npc, npc);
332 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
333 } else {
334 /* jump to another page: currently not optimized */
335 tcg_gen_movi_tl(cpu_pc, pc);
336 tcg_gen_movi_tl(cpu_npc, npc);
337 tcg_gen_exit_tb(0);
341 // XXX suboptimal
342 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
344 tcg_gen_extu_i32_tl(reg, src);
345 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
346 tcg_gen_andi_tl(reg, reg, 0x1);
349 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
351 tcg_gen_extu_i32_tl(reg, src);
352 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
353 tcg_gen_andi_tl(reg, reg, 0x1);
356 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
358 tcg_gen_extu_i32_tl(reg, src);
359 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
360 tcg_gen_andi_tl(reg, reg, 0x1);
363 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
365 tcg_gen_extu_i32_tl(reg, src);
366 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
367 tcg_gen_andi_tl(reg, reg, 0x1);
370 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
372 tcg_gen_mov_tl(cpu_cc_src, src1);
373 tcg_gen_mov_tl(cpu_cc_src2, src2);
374 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
375 tcg_gen_mov_tl(dst, cpu_cc_dst);
378 static TCGv_i32 gen_add32_carry32(void)
380 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
382 /* Carry is computed from a previous add: (dst < src) */
383 #if TARGET_LONG_BITS == 64
384 cc_src1_32 = tcg_temp_new_i32();
385 cc_src2_32 = tcg_temp_new_i32();
386 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
387 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
388 #else
389 cc_src1_32 = cpu_cc_dst;
390 cc_src2_32 = cpu_cc_src;
391 #endif
393 carry_32 = tcg_temp_new_i32();
394 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
396 #if TARGET_LONG_BITS == 64
397 tcg_temp_free_i32(cc_src1_32);
398 tcg_temp_free_i32(cc_src2_32);
399 #endif
401 return carry_32;
404 static TCGv_i32 gen_sub32_carry32(void)
406 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
408 /* Carry is computed from a previous borrow: (src1 < src2) */
409 #if TARGET_LONG_BITS == 64
410 cc_src1_32 = tcg_temp_new_i32();
411 cc_src2_32 = tcg_temp_new_i32();
412 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
413 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
414 #else
415 cc_src1_32 = cpu_cc_src;
416 cc_src2_32 = cpu_cc_src2;
417 #endif
419 carry_32 = tcg_temp_new_i32();
420 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
422 #if TARGET_LONG_BITS == 64
423 tcg_temp_free_i32(cc_src1_32);
424 tcg_temp_free_i32(cc_src2_32);
425 #endif
427 return carry_32;
430 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
431 TCGv src2, int update_cc)
433 TCGv_i32 carry_32;
434 TCGv carry;
436 switch (dc->cc_op) {
437 case CC_OP_DIV:
438 case CC_OP_LOGIC:
439 /* Carry is known to be zero. Fall back to plain ADD. */
440 if (update_cc) {
441 gen_op_add_cc(dst, src1, src2);
442 } else {
443 tcg_gen_add_tl(dst, src1, src2);
445 return;
447 case CC_OP_ADD:
448 case CC_OP_TADD:
449 case CC_OP_TADDTV:
450 if (TARGET_LONG_BITS == 32) {
451 /* We can re-use the host's hardware carry generation by using
452 an ADD2 opcode. We discard the low part of the output.
453 Ideally we'd combine this operation with the add that
454 generated the carry in the first place. */
455 carry = tcg_temp_new();
456 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
457 tcg_temp_free(carry);
458 goto add_done;
460 carry_32 = gen_add32_carry32();
461 break;
463 case CC_OP_SUB:
464 case CC_OP_TSUB:
465 case CC_OP_TSUBTV:
466 carry_32 = gen_sub32_carry32();
467 break;
469 default:
470 /* We need external help to produce the carry. */
471 carry_32 = tcg_temp_new_i32();
472 gen_helper_compute_C_icc(carry_32, cpu_env);
473 break;
476 #if TARGET_LONG_BITS == 64
477 carry = tcg_temp_new();
478 tcg_gen_extu_i32_i64(carry, carry_32);
479 #else
480 carry = carry_32;
481 #endif
483 tcg_gen_add_tl(dst, src1, src2);
484 tcg_gen_add_tl(dst, dst, carry);
486 tcg_temp_free_i32(carry_32);
487 #if TARGET_LONG_BITS == 64
488 tcg_temp_free(carry);
489 #endif
491 add_done:
492 if (update_cc) {
493 tcg_gen_mov_tl(cpu_cc_src, src1);
494 tcg_gen_mov_tl(cpu_cc_src2, src2);
495 tcg_gen_mov_tl(cpu_cc_dst, dst);
496 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
497 dc->cc_op = CC_OP_ADDX;
501 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
503 tcg_gen_mov_tl(cpu_cc_src, src1);
504 tcg_gen_mov_tl(cpu_cc_src2, src2);
505 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
506 tcg_gen_mov_tl(dst, cpu_cc_dst);
509 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
510 TCGv src2, int update_cc)
512 TCGv_i32 carry_32;
513 TCGv carry;
515 switch (dc->cc_op) {
516 case CC_OP_DIV:
517 case CC_OP_LOGIC:
518 /* Carry is known to be zero. Fall back to plain SUB. */
519 if (update_cc) {
520 gen_op_sub_cc(dst, src1, src2);
521 } else {
522 tcg_gen_sub_tl(dst, src1, src2);
524 return;
526 case CC_OP_ADD:
527 case CC_OP_TADD:
528 case CC_OP_TADDTV:
529 carry_32 = gen_add32_carry32();
530 break;
532 case CC_OP_SUB:
533 case CC_OP_TSUB:
534 case CC_OP_TSUBTV:
535 if (TARGET_LONG_BITS == 32) {
536 /* We can re-use the host's hardware carry generation by using
537 a SUB2 opcode. We discard the low part of the output.
538 Ideally we'd combine this operation with the add that
539 generated the carry in the first place. */
540 carry = tcg_temp_new();
541 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
542 tcg_temp_free(carry);
543 goto sub_done;
545 carry_32 = gen_sub32_carry32();
546 break;
548 default:
549 /* We need external help to produce the carry. */
550 carry_32 = tcg_temp_new_i32();
551 gen_helper_compute_C_icc(carry_32, cpu_env);
552 break;
555 #if TARGET_LONG_BITS == 64
556 carry = tcg_temp_new();
557 tcg_gen_extu_i32_i64(carry, carry_32);
558 #else
559 carry = carry_32;
560 #endif
562 tcg_gen_sub_tl(dst, src1, src2);
563 tcg_gen_sub_tl(dst, dst, carry);
565 tcg_temp_free_i32(carry_32);
566 #if TARGET_LONG_BITS == 64
567 tcg_temp_free(carry);
568 #endif
570 sub_done:
571 if (update_cc) {
572 tcg_gen_mov_tl(cpu_cc_src, src1);
573 tcg_gen_mov_tl(cpu_cc_src2, src2);
574 tcg_gen_mov_tl(cpu_cc_dst, dst);
575 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
576 dc->cc_op = CC_OP_SUBX;
580 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
582 TCGv r_temp, zero, t0;
584 r_temp = tcg_temp_new();
585 t0 = tcg_temp_new();
587 /* old op:
588 if (!(env->y & 1))
589 T1 = 0;
591 zero = tcg_const_tl(0);
592 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
593 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
594 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
595 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
596 zero, cpu_cc_src2);
597 tcg_temp_free(zero);
599 // b2 = T0 & 1;
600 // env->y = (b2 << 31) | (env->y >> 1);
601 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
602 tcg_gen_shli_tl(r_temp, r_temp, 31);
603 tcg_gen_shri_tl(t0, cpu_y, 1);
604 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
605 tcg_gen_or_tl(t0, t0, r_temp);
606 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
608 // b1 = N ^ V;
609 gen_mov_reg_N(t0, cpu_psr);
610 gen_mov_reg_V(r_temp, cpu_psr);
611 tcg_gen_xor_tl(t0, t0, r_temp);
612 tcg_temp_free(r_temp);
614 // T0 = (b1 << 31) | (T0 >> 1);
615 // src1 = T0;
616 tcg_gen_shli_tl(t0, t0, 31);
617 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
618 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
619 tcg_temp_free(t0);
621 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
623 tcg_gen_mov_tl(dst, cpu_cc_dst);
626 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
628 #if TARGET_LONG_BITS == 32
629 if (sign_ext) {
630 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
631 } else {
632 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
634 #else
635 TCGv t0 = tcg_temp_new_i64();
636 TCGv t1 = tcg_temp_new_i64();
638 if (sign_ext) {
639 tcg_gen_ext32s_i64(t0, src1);
640 tcg_gen_ext32s_i64(t1, src2);
641 } else {
642 tcg_gen_ext32u_i64(t0, src1);
643 tcg_gen_ext32u_i64(t1, src2);
646 tcg_gen_mul_i64(dst, t0, t1);
647 tcg_temp_free(t0);
648 tcg_temp_free(t1);
650 tcg_gen_shri_i64(cpu_y, dst, 32);
651 #endif
654 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
656 /* zero-extend truncated operands before multiplication */
657 gen_op_multiply(dst, src1, src2, 0);
660 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
662 /* sign-extend truncated operands before multiplication */
663 gen_op_multiply(dst, src1, src2, 1);
666 // 1
667 static inline void gen_op_eval_ba(TCGv dst)
669 tcg_gen_movi_tl(dst, 1);
672 // Z
673 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
675 gen_mov_reg_Z(dst, src);
678 // Z | (N ^ V)
679 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
681 TCGv t0 = tcg_temp_new();
682 gen_mov_reg_N(t0, src);
683 gen_mov_reg_V(dst, src);
684 tcg_gen_xor_tl(dst, dst, t0);
685 gen_mov_reg_Z(t0, src);
686 tcg_gen_or_tl(dst, dst, t0);
687 tcg_temp_free(t0);
690 // N ^ V
691 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
693 TCGv t0 = tcg_temp_new();
694 gen_mov_reg_V(t0, src);
695 gen_mov_reg_N(dst, src);
696 tcg_gen_xor_tl(dst, dst, t0);
697 tcg_temp_free(t0);
700 // C | Z
701 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
703 TCGv t0 = tcg_temp_new();
704 gen_mov_reg_Z(t0, src);
705 gen_mov_reg_C(dst, src);
706 tcg_gen_or_tl(dst, dst, t0);
707 tcg_temp_free(t0);
710 // C
711 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
713 gen_mov_reg_C(dst, src);
716 // V
717 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
719 gen_mov_reg_V(dst, src);
722 // 0
723 static inline void gen_op_eval_bn(TCGv dst)
725 tcg_gen_movi_tl(dst, 0);
728 // N
729 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
731 gen_mov_reg_N(dst, src);
734 // !Z
735 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
737 gen_mov_reg_Z(dst, src);
738 tcg_gen_xori_tl(dst, dst, 0x1);
741 // !(Z | (N ^ V))
742 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
744 gen_op_eval_ble(dst, src);
745 tcg_gen_xori_tl(dst, dst, 0x1);
748 // !(N ^ V)
749 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
751 gen_op_eval_bl(dst, src);
752 tcg_gen_xori_tl(dst, dst, 0x1);
755 // !(C | Z)
756 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
758 gen_op_eval_bleu(dst, src);
759 tcg_gen_xori_tl(dst, dst, 0x1);
762 // !C
763 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
765 gen_mov_reg_C(dst, src);
766 tcg_gen_xori_tl(dst, dst, 0x1);
769 // !N
770 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
772 gen_mov_reg_N(dst, src);
773 tcg_gen_xori_tl(dst, dst, 0x1);
776 // !V
777 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
779 gen_mov_reg_V(dst, src);
780 tcg_gen_xori_tl(dst, dst, 0x1);
784 FPSR bit field FCC1 | FCC0:
788 3 unordered
790 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
791 unsigned int fcc_offset)
793 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
794 tcg_gen_andi_tl(reg, reg, 0x1);
797 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
798 unsigned int fcc_offset)
800 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
801 tcg_gen_andi_tl(reg, reg, 0x1);
804 // !0: FCC0 | FCC1
805 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
806 unsigned int fcc_offset)
808 TCGv t0 = tcg_temp_new();
809 gen_mov_reg_FCC0(dst, src, fcc_offset);
810 gen_mov_reg_FCC1(t0, src, fcc_offset);
811 tcg_gen_or_tl(dst, dst, t0);
812 tcg_temp_free(t0);
815 // 1 or 2: FCC0 ^ FCC1
816 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
817 unsigned int fcc_offset)
819 TCGv t0 = tcg_temp_new();
820 gen_mov_reg_FCC0(dst, src, fcc_offset);
821 gen_mov_reg_FCC1(t0, src, fcc_offset);
822 tcg_gen_xor_tl(dst, dst, t0);
823 tcg_temp_free(t0);
826 // 1 or 3: FCC0
827 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
828 unsigned int fcc_offset)
830 gen_mov_reg_FCC0(dst, src, fcc_offset);
833 // 1: FCC0 & !FCC1
834 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
835 unsigned int fcc_offset)
837 TCGv t0 = tcg_temp_new();
838 gen_mov_reg_FCC0(dst, src, fcc_offset);
839 gen_mov_reg_FCC1(t0, src, fcc_offset);
840 tcg_gen_andc_tl(dst, dst, t0);
841 tcg_temp_free(t0);
844 // 2 or 3: FCC1
845 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
846 unsigned int fcc_offset)
848 gen_mov_reg_FCC1(dst, src, fcc_offset);
851 // 2: !FCC0 & FCC1
852 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
853 unsigned int fcc_offset)
855 TCGv t0 = tcg_temp_new();
856 gen_mov_reg_FCC0(dst, src, fcc_offset);
857 gen_mov_reg_FCC1(t0, src, fcc_offset);
858 tcg_gen_andc_tl(dst, t0, dst);
859 tcg_temp_free(t0);
862 // 3: FCC0 & FCC1
863 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
864 unsigned int fcc_offset)
866 TCGv t0 = tcg_temp_new();
867 gen_mov_reg_FCC0(dst, src, fcc_offset);
868 gen_mov_reg_FCC1(t0, src, fcc_offset);
869 tcg_gen_and_tl(dst, dst, t0);
870 tcg_temp_free(t0);
873 // 0: !(FCC0 | FCC1)
874 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
875 unsigned int fcc_offset)
877 TCGv t0 = tcg_temp_new();
878 gen_mov_reg_FCC0(dst, src, fcc_offset);
879 gen_mov_reg_FCC1(t0, src, fcc_offset);
880 tcg_gen_or_tl(dst, dst, t0);
881 tcg_gen_xori_tl(dst, dst, 0x1);
882 tcg_temp_free(t0);
885 // 0 or 3: !(FCC0 ^ FCC1)
886 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
887 unsigned int fcc_offset)
889 TCGv t0 = tcg_temp_new();
890 gen_mov_reg_FCC0(dst, src, fcc_offset);
891 gen_mov_reg_FCC1(t0, src, fcc_offset);
892 tcg_gen_xor_tl(dst, dst, t0);
893 tcg_gen_xori_tl(dst, dst, 0x1);
894 tcg_temp_free(t0);
897 // 0 or 2: !FCC0
898 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
899 unsigned int fcc_offset)
901 gen_mov_reg_FCC0(dst, src, fcc_offset);
902 tcg_gen_xori_tl(dst, dst, 0x1);
905 // !1: !(FCC0 & !FCC1)
906 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
907 unsigned int fcc_offset)
909 TCGv t0 = tcg_temp_new();
910 gen_mov_reg_FCC0(dst, src, fcc_offset);
911 gen_mov_reg_FCC1(t0, src, fcc_offset);
912 tcg_gen_andc_tl(dst, dst, t0);
913 tcg_gen_xori_tl(dst, dst, 0x1);
914 tcg_temp_free(t0);
917 // 0 or 1: !FCC1
918 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
919 unsigned int fcc_offset)
921 gen_mov_reg_FCC1(dst, src, fcc_offset);
922 tcg_gen_xori_tl(dst, dst, 0x1);
925 // !2: !(!FCC0 & FCC1)
926 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
927 unsigned int fcc_offset)
929 TCGv t0 = tcg_temp_new();
930 gen_mov_reg_FCC0(dst, src, fcc_offset);
931 gen_mov_reg_FCC1(t0, src, fcc_offset);
932 tcg_gen_andc_tl(dst, t0, dst);
933 tcg_gen_xori_tl(dst, dst, 0x1);
934 tcg_temp_free(t0);
937 // !3: !(FCC0 & FCC1)
938 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
939 unsigned int fcc_offset)
941 TCGv t0 = tcg_temp_new();
942 gen_mov_reg_FCC0(dst, src, fcc_offset);
943 gen_mov_reg_FCC1(t0, src, fcc_offset);
944 tcg_gen_and_tl(dst, dst, t0);
945 tcg_gen_xori_tl(dst, dst, 0x1);
946 tcg_temp_free(t0);
949 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
950 target_ulong pc2, TCGv r_cond)
952 TCGLabel *l1 = gen_new_label();
954 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
956 gen_goto_tb(dc, 0, pc1, pc1 + 4);
958 gen_set_label(l1);
959 gen_goto_tb(dc, 1, pc2, pc2 + 4);
962 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
964 TCGLabel *l1 = gen_new_label();
965 target_ulong npc = dc->npc;
967 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
969 gen_goto_tb(dc, 0, npc, pc1);
971 gen_set_label(l1);
972 gen_goto_tb(dc, 1, npc + 4, npc + 8);
974 dc->is_br = 1;
977 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
979 target_ulong npc = dc->npc;
981 if (likely(npc != DYNAMIC_PC)) {
982 dc->pc = npc;
983 dc->jump_pc[0] = pc1;
984 dc->jump_pc[1] = npc + 4;
985 dc->npc = JUMP_PC;
986 } else {
987 TCGv t, z;
989 tcg_gen_mov_tl(cpu_pc, cpu_npc);
991 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
992 t = tcg_const_tl(pc1);
993 z = tcg_const_tl(0);
994 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
995 tcg_temp_free(t);
996 tcg_temp_free(z);
998 dc->pc = DYNAMIC_PC;
1002 static inline void gen_generic_branch(DisasContext *dc)
1004 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1005 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1006 TCGv zero = tcg_const_tl(0);
1008 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1010 tcg_temp_free(npc0);
1011 tcg_temp_free(npc1);
1012 tcg_temp_free(zero);
1015 /* call this function before using the condition register as it may
1016 have been set for a jump */
1017 static inline void flush_cond(DisasContext *dc)
1019 if (dc->npc == JUMP_PC) {
1020 gen_generic_branch(dc);
1021 dc->npc = DYNAMIC_PC;
1025 static inline void save_npc(DisasContext *dc)
1027 if (dc->npc == JUMP_PC) {
1028 gen_generic_branch(dc);
1029 dc->npc = DYNAMIC_PC;
1030 } else if (dc->npc != DYNAMIC_PC) {
1031 tcg_gen_movi_tl(cpu_npc, dc->npc);
1035 static inline void update_psr(DisasContext *dc)
1037 if (dc->cc_op != CC_OP_FLAGS) {
1038 dc->cc_op = CC_OP_FLAGS;
1039 gen_helper_compute_psr(cpu_env);
1043 static inline void save_state(DisasContext *dc)
1045 tcg_gen_movi_tl(cpu_pc, dc->pc);
1046 save_npc(dc);
1049 static void gen_exception(DisasContext *dc, int which)
1051 TCGv_i32 t;
1053 save_state(dc);
1054 t = tcg_const_i32(which);
1055 gen_helper_raise_exception(cpu_env, t);
1056 tcg_temp_free_i32(t);
1057 dc->is_br = 1;
1060 static inline void gen_mov_pc_npc(DisasContext *dc)
1062 if (dc->npc == JUMP_PC) {
1063 gen_generic_branch(dc);
1064 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1065 dc->pc = DYNAMIC_PC;
1066 } else if (dc->npc == DYNAMIC_PC) {
1067 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1068 dc->pc = DYNAMIC_PC;
1069 } else {
1070 dc->pc = dc->npc;
1074 static inline void gen_op_next_insn(void)
1076 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1077 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1080 static void free_compare(DisasCompare *cmp)
1082 if (!cmp->g1) {
1083 tcg_temp_free(cmp->c1);
1085 if (!cmp->g2) {
1086 tcg_temp_free(cmp->c2);
1090 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1091 DisasContext *dc)
1093 static int subcc_cond[16] = {
1094 TCG_COND_NEVER,
1095 TCG_COND_EQ,
1096 TCG_COND_LE,
1097 TCG_COND_LT,
1098 TCG_COND_LEU,
1099 TCG_COND_LTU,
1100 -1, /* neg */
1101 -1, /* overflow */
1102 TCG_COND_ALWAYS,
1103 TCG_COND_NE,
1104 TCG_COND_GT,
1105 TCG_COND_GE,
1106 TCG_COND_GTU,
1107 TCG_COND_GEU,
1108 -1, /* pos */
1109 -1, /* no overflow */
1112 static int logic_cond[16] = {
1113 TCG_COND_NEVER,
1114 TCG_COND_EQ, /* eq: Z */
1115 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1116 TCG_COND_LT, /* lt: N ^ V -> N */
1117 TCG_COND_EQ, /* leu: C | Z -> Z */
1118 TCG_COND_NEVER, /* ltu: C -> 0 */
1119 TCG_COND_LT, /* neg: N */
1120 TCG_COND_NEVER, /* vs: V -> 0 */
1121 TCG_COND_ALWAYS,
1122 TCG_COND_NE, /* ne: !Z */
1123 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1124 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1125 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1126 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1127 TCG_COND_GE, /* pos: !N */
1128 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1131 TCGv_i32 r_src;
1132 TCGv r_dst;
1134 #ifdef TARGET_SPARC64
1135 if (xcc) {
1136 r_src = cpu_xcc;
1137 } else {
1138 r_src = cpu_psr;
1140 #else
1141 r_src = cpu_psr;
1142 #endif
1144 switch (dc->cc_op) {
1145 case CC_OP_LOGIC:
1146 cmp->cond = logic_cond[cond];
1147 do_compare_dst_0:
1148 cmp->is_bool = false;
1149 cmp->g2 = false;
1150 cmp->c2 = tcg_const_tl(0);
1151 #ifdef TARGET_SPARC64
1152 if (!xcc) {
1153 cmp->g1 = false;
1154 cmp->c1 = tcg_temp_new();
1155 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1156 break;
1158 #endif
1159 cmp->g1 = true;
1160 cmp->c1 = cpu_cc_dst;
1161 break;
1163 case CC_OP_SUB:
1164 switch (cond) {
1165 case 6: /* neg */
1166 case 14: /* pos */
1167 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1168 goto do_compare_dst_0;
1170 case 7: /* overflow */
1171 case 15: /* !overflow */
1172 goto do_dynamic;
1174 default:
1175 cmp->cond = subcc_cond[cond];
1176 cmp->is_bool = false;
1177 #ifdef TARGET_SPARC64
1178 if (!xcc) {
1179 /* Note that sign-extension works for unsigned compares as
1180 long as both operands are sign-extended. */
1181 cmp->g1 = cmp->g2 = false;
1182 cmp->c1 = tcg_temp_new();
1183 cmp->c2 = tcg_temp_new();
1184 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1185 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1186 break;
1188 #endif
1189 cmp->g1 = cmp->g2 = true;
1190 cmp->c1 = cpu_cc_src;
1191 cmp->c2 = cpu_cc_src2;
1192 break;
1194 break;
1196 default:
1197 do_dynamic:
1198 gen_helper_compute_psr(cpu_env);
1199 dc->cc_op = CC_OP_FLAGS;
1200 /* FALLTHRU */
1202 case CC_OP_FLAGS:
1203 /* We're going to generate a boolean result. */
1204 cmp->cond = TCG_COND_NE;
1205 cmp->is_bool = true;
1206 cmp->g1 = cmp->g2 = false;
1207 cmp->c1 = r_dst = tcg_temp_new();
1208 cmp->c2 = tcg_const_tl(0);
1210 switch (cond) {
1211 case 0x0:
1212 gen_op_eval_bn(r_dst);
1213 break;
1214 case 0x1:
1215 gen_op_eval_be(r_dst, r_src);
1216 break;
1217 case 0x2:
1218 gen_op_eval_ble(r_dst, r_src);
1219 break;
1220 case 0x3:
1221 gen_op_eval_bl(r_dst, r_src);
1222 break;
1223 case 0x4:
1224 gen_op_eval_bleu(r_dst, r_src);
1225 break;
1226 case 0x5:
1227 gen_op_eval_bcs(r_dst, r_src);
1228 break;
1229 case 0x6:
1230 gen_op_eval_bneg(r_dst, r_src);
1231 break;
1232 case 0x7:
1233 gen_op_eval_bvs(r_dst, r_src);
1234 break;
1235 case 0x8:
1236 gen_op_eval_ba(r_dst);
1237 break;
1238 case 0x9:
1239 gen_op_eval_bne(r_dst, r_src);
1240 break;
1241 case 0xa:
1242 gen_op_eval_bg(r_dst, r_src);
1243 break;
1244 case 0xb:
1245 gen_op_eval_bge(r_dst, r_src);
1246 break;
1247 case 0xc:
1248 gen_op_eval_bgu(r_dst, r_src);
1249 break;
1250 case 0xd:
1251 gen_op_eval_bcc(r_dst, r_src);
1252 break;
1253 case 0xe:
1254 gen_op_eval_bpos(r_dst, r_src);
1255 break;
1256 case 0xf:
1257 gen_op_eval_bvc(r_dst, r_src);
1258 break;
1260 break;
1264 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1266 unsigned int offset;
1267 TCGv r_dst;
1269 /* For now we still generate a straight boolean result. */
1270 cmp->cond = TCG_COND_NE;
1271 cmp->is_bool = true;
1272 cmp->g1 = cmp->g2 = false;
1273 cmp->c1 = r_dst = tcg_temp_new();
1274 cmp->c2 = tcg_const_tl(0);
1276 switch (cc) {
1277 default:
1278 case 0x0:
1279 offset = 0;
1280 break;
1281 case 0x1:
1282 offset = 32 - 10;
1283 break;
1284 case 0x2:
1285 offset = 34 - 10;
1286 break;
1287 case 0x3:
1288 offset = 36 - 10;
1289 break;
1292 switch (cond) {
1293 case 0x0:
1294 gen_op_eval_bn(r_dst);
1295 break;
1296 case 0x1:
1297 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1298 break;
1299 case 0x2:
1300 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1301 break;
1302 case 0x3:
1303 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1304 break;
1305 case 0x4:
1306 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1307 break;
1308 case 0x5:
1309 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1310 break;
1311 case 0x6:
1312 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1313 break;
1314 case 0x7:
1315 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1316 break;
1317 case 0x8:
1318 gen_op_eval_ba(r_dst);
1319 break;
1320 case 0x9:
1321 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1322 break;
1323 case 0xa:
1324 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1325 break;
1326 case 0xb:
1327 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1328 break;
1329 case 0xc:
1330 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1331 break;
1332 case 0xd:
1333 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1334 break;
1335 case 0xe:
1336 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1337 break;
1338 case 0xf:
1339 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1340 break;
1344 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1345 DisasContext *dc)
1347 DisasCompare cmp;
1348 gen_compare(&cmp, cc, cond, dc);
1350 /* The interface is to return a boolean in r_dst. */
1351 if (cmp.is_bool) {
1352 tcg_gen_mov_tl(r_dst, cmp.c1);
1353 } else {
1354 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1357 free_compare(&cmp);
1360 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1362 DisasCompare cmp;
1363 gen_fcompare(&cmp, cc, cond);
1365 /* The interface is to return a boolean in r_dst. */
1366 if (cmp.is_bool) {
1367 tcg_gen_mov_tl(r_dst, cmp.c1);
1368 } else {
1369 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1372 free_compare(&cmp);
1375 #ifdef TARGET_SPARC64
1376 // Inverted logic
1377 static const int gen_tcg_cond_reg[8] = {
1379 TCG_COND_NE,
1380 TCG_COND_GT,
1381 TCG_COND_GE,
1383 TCG_COND_EQ,
1384 TCG_COND_LE,
1385 TCG_COND_LT,
1388 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1390 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1391 cmp->is_bool = false;
1392 cmp->g1 = true;
1393 cmp->g2 = false;
1394 cmp->c1 = r_src;
1395 cmp->c2 = tcg_const_tl(0);
1398 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1400 DisasCompare cmp;
1401 gen_compare_reg(&cmp, cond, r_src);
1403 /* The interface is to return a boolean in r_dst. */
1404 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1406 free_compare(&cmp);
1408 #endif
1410 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1412 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1413 target_ulong target = dc->pc + offset;
1415 #ifdef TARGET_SPARC64
1416 if (unlikely(AM_CHECK(dc))) {
1417 target &= 0xffffffffULL;
1419 #endif
1420 if (cond == 0x0) {
1421 /* unconditional not taken */
1422 if (a) {
1423 dc->pc = dc->npc + 4;
1424 dc->npc = dc->pc + 4;
1425 } else {
1426 dc->pc = dc->npc;
1427 dc->npc = dc->pc + 4;
1429 } else if (cond == 0x8) {
1430 /* unconditional taken */
1431 if (a) {
1432 dc->pc = target;
1433 dc->npc = dc->pc + 4;
1434 } else {
1435 dc->pc = dc->npc;
1436 dc->npc = target;
1437 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1439 } else {
1440 flush_cond(dc);
1441 gen_cond(cpu_cond, cc, cond, dc);
1442 if (a) {
1443 gen_branch_a(dc, target);
1444 } else {
1445 gen_branch_n(dc, target);
1450 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1452 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1453 target_ulong target = dc->pc + offset;
1455 #ifdef TARGET_SPARC64
1456 if (unlikely(AM_CHECK(dc))) {
1457 target &= 0xffffffffULL;
1459 #endif
1460 if (cond == 0x0) {
1461 /* unconditional not taken */
1462 if (a) {
1463 dc->pc = dc->npc + 4;
1464 dc->npc = dc->pc + 4;
1465 } else {
1466 dc->pc = dc->npc;
1467 dc->npc = dc->pc + 4;
1469 } else if (cond == 0x8) {
1470 /* unconditional taken */
1471 if (a) {
1472 dc->pc = target;
1473 dc->npc = dc->pc + 4;
1474 } else {
1475 dc->pc = dc->npc;
1476 dc->npc = target;
1477 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1479 } else {
1480 flush_cond(dc);
1481 gen_fcond(cpu_cond, cc, cond);
1482 if (a) {
1483 gen_branch_a(dc, target);
1484 } else {
1485 gen_branch_n(dc, target);
1490 #ifdef TARGET_SPARC64
1491 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1492 TCGv r_reg)
1494 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1495 target_ulong target = dc->pc + offset;
1497 if (unlikely(AM_CHECK(dc))) {
1498 target &= 0xffffffffULL;
1500 flush_cond(dc);
1501 gen_cond_reg(cpu_cond, cond, r_reg);
1502 if (a) {
1503 gen_branch_a(dc, target);
1504 } else {
1505 gen_branch_n(dc, target);
1509 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1511 switch (fccno) {
1512 case 0:
1513 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1514 break;
1515 case 1:
1516 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1517 break;
1518 case 2:
1519 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1520 break;
1521 case 3:
1522 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1523 break;
1527 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1529 switch (fccno) {
1530 case 0:
1531 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1532 break;
1533 case 1:
1534 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1535 break;
1536 case 2:
1537 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1538 break;
1539 case 3:
1540 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1541 break;
1545 static inline void gen_op_fcmpq(int fccno)
1547 switch (fccno) {
1548 case 0:
1549 gen_helper_fcmpq(cpu_env);
1550 break;
1551 case 1:
1552 gen_helper_fcmpq_fcc1(cpu_env);
1553 break;
1554 case 2:
1555 gen_helper_fcmpq_fcc2(cpu_env);
1556 break;
1557 case 3:
1558 gen_helper_fcmpq_fcc3(cpu_env);
1559 break;
1563 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1565 switch (fccno) {
1566 case 0:
1567 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1568 break;
1569 case 1:
1570 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1571 break;
1572 case 2:
1573 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1574 break;
1575 case 3:
1576 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1577 break;
1581 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1583 switch (fccno) {
1584 case 0:
1585 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1586 break;
1587 case 1:
1588 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1589 break;
1590 case 2:
1591 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1592 break;
1593 case 3:
1594 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1595 break;
1599 static inline void gen_op_fcmpeq(int fccno)
1601 switch (fccno) {
1602 case 0:
1603 gen_helper_fcmpeq(cpu_env);
1604 break;
1605 case 1:
1606 gen_helper_fcmpeq_fcc1(cpu_env);
1607 break;
1608 case 2:
1609 gen_helper_fcmpeq_fcc2(cpu_env);
1610 break;
1611 case 3:
1612 gen_helper_fcmpeq_fcc3(cpu_env);
1613 break;
1617 #else
1619 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1621 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1624 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1626 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1629 static inline void gen_op_fcmpq(int fccno)
1631 gen_helper_fcmpq(cpu_env);
1634 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1636 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1639 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1641 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1644 static inline void gen_op_fcmpeq(int fccno)
1646 gen_helper_fcmpeq(cpu_env);
1648 #endif
1650 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1652 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1653 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1654 gen_exception(dc, TT_FP_EXCP);
1657 static int gen_trap_ifnofpu(DisasContext *dc)
1659 #if !defined(CONFIG_USER_ONLY)
1660 if (!dc->fpu_enabled) {
1661 gen_exception(dc, TT_NFPU_INSN);
1662 return 1;
1664 #endif
1665 return 0;
1668 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1670 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1673 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1674 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1676 TCGv_i32 dst, src;
1678 src = gen_load_fpr_F(dc, rs);
1679 dst = gen_dest_fpr_F(dc);
1681 gen(dst, cpu_env, src);
1683 gen_store_fpr_F(dc, rd, dst);
1686 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1687 void (*gen)(TCGv_i32, TCGv_i32))
1689 TCGv_i32 dst, src;
1691 src = gen_load_fpr_F(dc, rs);
1692 dst = gen_dest_fpr_F(dc);
1694 gen(dst, src);
1696 gen_store_fpr_F(dc, rd, dst);
1699 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1700 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1702 TCGv_i32 dst, src1, src2;
1704 src1 = gen_load_fpr_F(dc, rs1);
1705 src2 = gen_load_fpr_F(dc, rs2);
1706 dst = gen_dest_fpr_F(dc);
1708 gen(dst, cpu_env, src1, src2);
1710 gen_store_fpr_F(dc, rd, dst);
1713 #ifdef TARGET_SPARC64
1714 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1715 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1717 TCGv_i32 dst, src1, src2;
1719 src1 = gen_load_fpr_F(dc, rs1);
1720 src2 = gen_load_fpr_F(dc, rs2);
1721 dst = gen_dest_fpr_F(dc);
1723 gen(dst, src1, src2);
1725 gen_store_fpr_F(dc, rd, dst);
1727 #endif
1729 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1730 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1732 TCGv_i64 dst, src;
1734 src = gen_load_fpr_D(dc, rs);
1735 dst = gen_dest_fpr_D(dc, rd);
1737 gen(dst, cpu_env, src);
1739 gen_store_fpr_D(dc, rd, dst);
1742 #ifdef TARGET_SPARC64
1743 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1744 void (*gen)(TCGv_i64, TCGv_i64))
1746 TCGv_i64 dst, src;
1748 src = gen_load_fpr_D(dc, rs);
1749 dst = gen_dest_fpr_D(dc, rd);
1751 gen(dst, src);
1753 gen_store_fpr_D(dc, rd, dst);
1755 #endif
1757 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1758 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1760 TCGv_i64 dst, src1, src2;
1762 src1 = gen_load_fpr_D(dc, rs1);
1763 src2 = gen_load_fpr_D(dc, rs2);
1764 dst = gen_dest_fpr_D(dc, rd);
1766 gen(dst, cpu_env, src1, src2);
1768 gen_store_fpr_D(dc, rd, dst);
1771 #ifdef TARGET_SPARC64
1772 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1773 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1775 TCGv_i64 dst, src1, src2;
1777 src1 = gen_load_fpr_D(dc, rs1);
1778 src2 = gen_load_fpr_D(dc, rs2);
1779 dst = gen_dest_fpr_D(dc, rd);
1781 gen(dst, src1, src2);
1783 gen_store_fpr_D(dc, rd, dst);
1786 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1787 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1789 TCGv_i64 dst, src1, src2;
1791 src1 = gen_load_fpr_D(dc, rs1);
1792 src2 = gen_load_fpr_D(dc, rs2);
1793 dst = gen_dest_fpr_D(dc, rd);
1795 gen(dst, cpu_gsr, src1, src2);
1797 gen_store_fpr_D(dc, rd, dst);
1800 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1801 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1803 TCGv_i64 dst, src0, src1, src2;
1805 src1 = gen_load_fpr_D(dc, rs1);
1806 src2 = gen_load_fpr_D(dc, rs2);
1807 src0 = gen_load_fpr_D(dc, rd);
1808 dst = gen_dest_fpr_D(dc, rd);
1810 gen(dst, src0, src1, src2);
1812 gen_store_fpr_D(dc, rd, dst);
1814 #endif
1816 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1817 void (*gen)(TCGv_ptr))
1819 gen_op_load_fpr_QT1(QFPREG(rs));
1821 gen(cpu_env);
1823 gen_op_store_QT0_fpr(QFPREG(rd));
1824 gen_update_fprs_dirty(QFPREG(rd));
1827 #ifdef TARGET_SPARC64
1828 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1829 void (*gen)(TCGv_ptr))
1831 gen_op_load_fpr_QT1(QFPREG(rs));
1833 gen(cpu_env);
1835 gen_op_store_QT0_fpr(QFPREG(rd));
1836 gen_update_fprs_dirty(QFPREG(rd));
1838 #endif
1840 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1841 void (*gen)(TCGv_ptr))
1843 gen_op_load_fpr_QT0(QFPREG(rs1));
1844 gen_op_load_fpr_QT1(QFPREG(rs2));
1846 gen(cpu_env);
1848 gen_op_store_QT0_fpr(QFPREG(rd));
1849 gen_update_fprs_dirty(QFPREG(rd));
1852 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1853 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1855 TCGv_i64 dst;
1856 TCGv_i32 src1, src2;
1858 src1 = gen_load_fpr_F(dc, rs1);
1859 src2 = gen_load_fpr_F(dc, rs2);
1860 dst = gen_dest_fpr_D(dc, rd);
1862 gen(dst, cpu_env, src1, src2);
1864 gen_store_fpr_D(dc, rd, dst);
1867 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1868 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1870 TCGv_i64 src1, src2;
1872 src1 = gen_load_fpr_D(dc, rs1);
1873 src2 = gen_load_fpr_D(dc, rs2);
1875 gen(cpu_env, src1, src2);
1877 gen_op_store_QT0_fpr(QFPREG(rd));
1878 gen_update_fprs_dirty(QFPREG(rd));
1881 #ifdef TARGET_SPARC64
1882 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1883 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1885 TCGv_i64 dst;
1886 TCGv_i32 src;
1888 src = gen_load_fpr_F(dc, rs);
1889 dst = gen_dest_fpr_D(dc, rd);
1891 gen(dst, cpu_env, src);
1893 gen_store_fpr_D(dc, rd, dst);
1895 #endif
1897 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1898 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1900 TCGv_i64 dst;
1901 TCGv_i32 src;
1903 src = gen_load_fpr_F(dc, rs);
1904 dst = gen_dest_fpr_D(dc, rd);
1906 gen(dst, cpu_env, src);
1908 gen_store_fpr_D(dc, rd, dst);
1911 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1912 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1914 TCGv_i32 dst;
1915 TCGv_i64 src;
1917 src = gen_load_fpr_D(dc, rs);
1918 dst = gen_dest_fpr_F(dc);
1920 gen(dst, cpu_env, src);
1922 gen_store_fpr_F(dc, rd, dst);
1925 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1926 void (*gen)(TCGv_i32, TCGv_ptr))
1928 TCGv_i32 dst;
1930 gen_op_load_fpr_QT1(QFPREG(rs));
1931 dst = gen_dest_fpr_F(dc);
1933 gen(dst, cpu_env);
1935 gen_store_fpr_F(dc, rd, dst);
1938 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1939 void (*gen)(TCGv_i64, TCGv_ptr))
1941 TCGv_i64 dst;
1943 gen_op_load_fpr_QT1(QFPREG(rs));
1944 dst = gen_dest_fpr_D(dc, rd);
1946 gen(dst, cpu_env);
1948 gen_store_fpr_D(dc, rd, dst);
1951 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1952 void (*gen)(TCGv_ptr, TCGv_i32))
1954 TCGv_i32 src;
1956 src = gen_load_fpr_F(dc, rs);
1958 gen(cpu_env, src);
1960 gen_op_store_QT0_fpr(QFPREG(rd));
1961 gen_update_fprs_dirty(QFPREG(rd));
1964 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1965 void (*gen)(TCGv_ptr, TCGv_i64))
1967 TCGv_i64 src;
1969 src = gen_load_fpr_D(dc, rs);
1971 gen(cpu_env, src);
1973 gen_op_store_QT0_fpr(QFPREG(rd));
1974 gen_update_fprs_dirty(QFPREG(rd));
1977 /* asi moves */
1978 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
1979 static TCGv_i32 gen_get_asi(DisasContext *dc, int insn)
1981 int asi;
1983 if (IS_IMM) {
1984 #ifdef TARGET_SPARC64
1985 asi = dc->asi;
1986 #else
1987 gen_exception(dc, TT_ILL_INSN);
1988 asi = 0;
1989 #endif
1990 } else {
1991 asi = GET_FIELD(insn, 19, 26);
1993 return tcg_const_i32(asi);
1996 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
1997 int insn, int size, int sign)
1999 TCGv_i32 r_asi, r_size, r_sign;
2001 r_asi = gen_get_asi(dc, insn);
2002 r_size = tcg_const_i32(size);
2003 r_sign = tcg_const_i32(sign);
2004 #ifdef TARGET_SPARC64
2005 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
2006 #else
2008 TCGv_i64 t64 = tcg_temp_new_i64();
2009 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2010 tcg_gen_trunc_i64_tl(dst, t64);
2011 tcg_temp_free_i64(t64);
2013 #endif
2014 tcg_temp_free_i32(r_sign);
2015 tcg_temp_free_i32(r_size);
2016 tcg_temp_free_i32(r_asi);
2019 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2020 int insn, int size)
2022 TCGv_i32 r_asi, r_size;
2024 r_asi = gen_get_asi(dc, insn);
2025 r_size = tcg_const_i32(size);
2026 #ifdef TARGET_SPARC64
2027 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2028 #else
2030 TCGv_i64 t64 = tcg_temp_new_i64();
2031 tcg_gen_extu_tl_i64(t64, src);
2032 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2033 tcg_temp_free_i64(t64);
2035 #endif
2036 tcg_temp_free_i32(r_size);
2037 tcg_temp_free_i32(r_asi);
2040 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2041 TCGv addr, int insn)
2043 TCGv_i32 r_asi, r_size, r_sign;
2044 TCGv_i64 s64, t64 = tcg_temp_new_i64();
2046 r_asi = gen_get_asi(dc, insn);
2047 r_size = tcg_const_i32(4);
2048 r_sign = tcg_const_i32(0);
2049 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2050 tcg_temp_free_i32(r_sign);
2052 s64 = tcg_temp_new_i64();
2053 tcg_gen_extu_tl_i64(s64, src);
2054 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_size);
2055 tcg_temp_free_i64(s64);
2056 tcg_temp_free_i32(r_size);
2057 tcg_temp_free_i32(r_asi);
2059 tcg_gen_trunc_i64_tl(dst, t64);
2060 tcg_temp_free_i64(t64);
2063 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv val2,
2064 int insn, int rd)
2066 TCGv val1 = gen_load_gpr(dc, rd);
2067 TCGv dst = gen_dest_gpr(dc, rd);
2068 TCGv_i32 r_asi = gen_get_asi(dc, insn);
2070 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2071 tcg_temp_free_i32(r_asi);
2072 gen_store_gpr(dc, rd, dst);
2075 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2077 TCGv_i32 r_asi, r_size, r_sign;
2078 TCGv_i64 s64, d64 = tcg_temp_new_i64();
2080 r_asi = gen_get_asi(dc, insn);
2081 r_size = tcg_const_i32(1);
2082 r_sign = tcg_const_i32(0);
2083 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_size, r_sign);
2084 tcg_temp_free_i32(r_sign);
2086 s64 = tcg_const_i64(0xff);
2087 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_size);
2088 tcg_temp_free_i64(s64);
2089 tcg_temp_free_i32(r_size);
2090 tcg_temp_free_i32(r_asi);
2092 tcg_gen_trunc_i64_tl(dst, d64);
2093 tcg_temp_free_i64(d64);
2095 #endif
2097 #ifdef TARGET_SPARC64
2098 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2099 int insn, int size, int rd)
2101 TCGv_i32 r_asi, r_size, r_rd;
2103 r_asi = gen_get_asi(dc, insn);
2104 r_size = tcg_const_i32(size);
2105 r_rd = tcg_const_i32(rd);
2106 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2107 tcg_temp_free_i32(r_rd);
2108 tcg_temp_free_i32(r_size);
2109 tcg_temp_free_i32(r_asi);
2112 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2113 int insn, int size, int rd)
2115 TCGv_i32 r_asi, r_size, r_rd;
2117 r_asi = gen_get_asi(dc, insn);
2118 r_size = tcg_const_i32(size);
2119 r_rd = tcg_const_i32(rd);
2120 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2121 tcg_temp_free_i32(r_rd);
2122 tcg_temp_free_i32(r_size);
2123 tcg_temp_free_i32(r_asi);
2126 static void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2127 int insn, int rd)
2129 TCGv_i32 r_asi, r_rd;
2131 r_asi = gen_get_asi(dc, insn);
2132 r_rd = tcg_const_i32(rd);
2133 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2134 tcg_temp_free_i32(r_rd);
2135 tcg_temp_free_i32(r_asi);
2138 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2139 int insn, int rd)
2141 TCGv_i32 r_asi, r_size;
2142 TCGv lo = gen_load_gpr(dc, rd + 1);
2143 TCGv_i64 t64 = tcg_temp_new_i64();
2145 tcg_gen_concat_tl_i64(t64, lo, hi);
2146 r_asi = gen_get_asi(dc, insn);
2147 r_size = tcg_const_i32(8);
2148 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2149 tcg_temp_free_i32(r_size);
2150 tcg_temp_free_i32(r_asi);
2151 tcg_temp_free_i64(t64);
2154 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv val2,
2155 int insn, int rd)
2157 TCGv val1 = gen_load_gpr(dc, rd);
2158 TCGv dst = gen_dest_gpr(dc, rd);
2159 TCGv_i32 r_asi = gen_get_asi(dc, insn);
2161 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2162 tcg_temp_free_i32(r_asi);
2163 gen_store_gpr(dc, rd, dst);
2166 #elif !defined(CONFIG_USER_ONLY)
2167 static void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2168 int insn, int rd)
2170 TCGv_i32 r_asi, r_size, r_sign;
2171 TCGv t;
2172 TCGv_i64 t64;
2174 r_asi = gen_get_asi(dc, insn);
2175 r_size = tcg_const_i32(8);
2176 r_sign = tcg_const_i32(0);
2177 t64 = tcg_temp_new_i64();
2178 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2179 tcg_temp_free_i32(r_sign);
2180 tcg_temp_free_i32(r_size);
2181 tcg_temp_free_i32(r_asi);
2183 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2184 whereby "rd + 1" elicits "error: array subscript is above array".
2185 Since we have already asserted that rd is even, the semantics
2186 are unchanged. */
2187 t = gen_dest_gpr(dc, rd | 1);
2188 tcg_gen_trunc_i64_tl(t, t64);
2189 gen_store_gpr(dc, rd | 1, t);
2191 tcg_gen_shri_i64(t64, t64, 32);
2192 tcg_gen_trunc_i64_tl(hi, t64);
2193 tcg_temp_free_i64(t64);
2194 gen_store_gpr(dc, rd, hi);
2197 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2198 int insn, int rd)
2200 TCGv_i32 r_asi, r_size;
2201 TCGv lo = gen_load_gpr(dc, rd + 1);
2202 TCGv_i64 t64 = tcg_temp_new_i64();
2204 tcg_gen_concat_tl_i64(t64, lo, hi);
2205 r_asi = gen_get_asi(dc, insn);
2206 r_size = tcg_const_i32(8);
2207 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2208 tcg_temp_free_i32(r_size);
2209 tcg_temp_free_i32(r_asi);
2210 tcg_temp_free_i64(t64);
2212 #endif
2214 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2216 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2217 return gen_load_gpr(dc, rs1);
2220 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2222 if (IS_IMM) { /* immediate */
2223 target_long simm = GET_FIELDs(insn, 19, 31);
2224 TCGv t = get_temp_tl(dc);
2225 tcg_gen_movi_tl(t, simm);
2226 return t;
2227 } else { /* register */
2228 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2229 return gen_load_gpr(dc, rs2);
2233 #ifdef TARGET_SPARC64
2234 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2236 TCGv_i32 c32, zero, dst, s1, s2;
2238 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2239 or fold the comparison down to 32 bits and use movcond_i32. Choose
2240 the later. */
2241 c32 = tcg_temp_new_i32();
2242 if (cmp->is_bool) {
2243 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2244 } else {
2245 TCGv_i64 c64 = tcg_temp_new_i64();
2246 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2247 tcg_gen_extrl_i64_i32(c32, c64);
2248 tcg_temp_free_i64(c64);
2251 s1 = gen_load_fpr_F(dc, rs);
2252 s2 = gen_load_fpr_F(dc, rd);
2253 dst = gen_dest_fpr_F(dc);
2254 zero = tcg_const_i32(0);
2256 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2258 tcg_temp_free_i32(c32);
2259 tcg_temp_free_i32(zero);
2260 gen_store_fpr_F(dc, rd, dst);
2263 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2265 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2266 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2267 gen_load_fpr_D(dc, rs),
2268 gen_load_fpr_D(dc, rd));
2269 gen_store_fpr_D(dc, rd, dst);
2272 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2274 int qd = QFPREG(rd);
2275 int qs = QFPREG(rs);
2277 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2278 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2279 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2280 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2282 gen_update_fprs_dirty(qd);
2285 #ifndef CONFIG_USER_ONLY
2286 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2288 TCGv_i32 r_tl = tcg_temp_new_i32();
2290 /* load env->tl into r_tl */
2291 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2293 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2294 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2296 /* calculate offset to current trap state from env->ts, reuse r_tl */
2297 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2298 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2300 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2302 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2303 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2304 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2305 tcg_temp_free_ptr(r_tl_tmp);
2308 tcg_temp_free_i32(r_tl);
2310 #endif
2312 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2313 int width, bool cc, bool left)
2315 TCGv lo1, lo2, t1, t2;
2316 uint64_t amask, tabl, tabr;
2317 int shift, imask, omask;
2319 if (cc) {
2320 tcg_gen_mov_tl(cpu_cc_src, s1);
2321 tcg_gen_mov_tl(cpu_cc_src2, s2);
2322 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2323 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2324 dc->cc_op = CC_OP_SUB;
2327 /* Theory of operation: there are two tables, left and right (not to
2328 be confused with the left and right versions of the opcode). These
2329 are indexed by the low 3 bits of the inputs. To make things "easy",
2330 these tables are loaded into two constants, TABL and TABR below.
2331 The operation index = (input & imask) << shift calculates the index
2332 into the constant, while val = (table >> index) & omask calculates
2333 the value we're looking for. */
2334 switch (width) {
2335 case 8:
2336 imask = 0x7;
2337 shift = 3;
2338 omask = 0xff;
2339 if (left) {
2340 tabl = 0x80c0e0f0f8fcfeffULL;
2341 tabr = 0xff7f3f1f0f070301ULL;
2342 } else {
2343 tabl = 0x0103070f1f3f7fffULL;
2344 tabr = 0xfffefcf8f0e0c080ULL;
2346 break;
2347 case 16:
2348 imask = 0x6;
2349 shift = 1;
2350 omask = 0xf;
2351 if (left) {
2352 tabl = 0x8cef;
2353 tabr = 0xf731;
2354 } else {
2355 tabl = 0x137f;
2356 tabr = 0xfec8;
2358 break;
2359 case 32:
2360 imask = 0x4;
2361 shift = 0;
2362 omask = 0x3;
2363 if (left) {
2364 tabl = (2 << 2) | 3;
2365 tabr = (3 << 2) | 1;
2366 } else {
2367 tabl = (1 << 2) | 3;
2368 tabr = (3 << 2) | 2;
2370 break;
2371 default:
2372 abort();
2375 lo1 = tcg_temp_new();
2376 lo2 = tcg_temp_new();
2377 tcg_gen_andi_tl(lo1, s1, imask);
2378 tcg_gen_andi_tl(lo2, s2, imask);
2379 tcg_gen_shli_tl(lo1, lo1, shift);
2380 tcg_gen_shli_tl(lo2, lo2, shift);
2382 t1 = tcg_const_tl(tabl);
2383 t2 = tcg_const_tl(tabr);
2384 tcg_gen_shr_tl(lo1, t1, lo1);
2385 tcg_gen_shr_tl(lo2, t2, lo2);
2386 tcg_gen_andi_tl(dst, lo1, omask);
2387 tcg_gen_andi_tl(lo2, lo2, omask);
2389 amask = -8;
2390 if (AM_CHECK(dc)) {
2391 amask &= 0xffffffffULL;
2393 tcg_gen_andi_tl(s1, s1, amask);
2394 tcg_gen_andi_tl(s2, s2, amask);
2396 /* We want to compute
2397 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2398 We've already done dst = lo1, so this reduces to
2399 dst &= (s1 == s2 ? -1 : lo2)
2400 Which we perform by
2401 lo2 |= -(s1 == s2)
2402 dst &= lo2
2404 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2405 tcg_gen_neg_tl(t1, t1);
2406 tcg_gen_or_tl(lo2, lo2, t1);
2407 tcg_gen_and_tl(dst, dst, lo2);
2409 tcg_temp_free(lo1);
2410 tcg_temp_free(lo2);
2411 tcg_temp_free(t1);
2412 tcg_temp_free(t2);
2415 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2417 TCGv tmp = tcg_temp_new();
2419 tcg_gen_add_tl(tmp, s1, s2);
2420 tcg_gen_andi_tl(dst, tmp, -8);
2421 if (left) {
2422 tcg_gen_neg_tl(tmp, tmp);
2424 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2426 tcg_temp_free(tmp);
2429 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2431 TCGv t1, t2, shift;
2433 t1 = tcg_temp_new();
2434 t2 = tcg_temp_new();
2435 shift = tcg_temp_new();
2437 tcg_gen_andi_tl(shift, gsr, 7);
2438 tcg_gen_shli_tl(shift, shift, 3);
2439 tcg_gen_shl_tl(t1, s1, shift);
2441 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2442 shift of (up to 63) followed by a constant shift of 1. */
2443 tcg_gen_xori_tl(shift, shift, 63);
2444 tcg_gen_shr_tl(t2, s2, shift);
2445 tcg_gen_shri_tl(t2, t2, 1);
2447 tcg_gen_or_tl(dst, t1, t2);
2449 tcg_temp_free(t1);
2450 tcg_temp_free(t2);
2451 tcg_temp_free(shift);
2453 #endif
2455 #define CHECK_IU_FEATURE(dc, FEATURE) \
2456 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2457 goto illegal_insn;
2458 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2459 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2460 goto nfpu_insn;
2462 /* before an instruction, dc->pc must be static */
2463 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2465 unsigned int opc, rs1, rs2, rd;
2466 TCGv cpu_src1, cpu_src2;
2467 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2468 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2469 target_long simm;
2471 opc = GET_FIELD(insn, 0, 1);
2472 rd = GET_FIELD(insn, 2, 6);
2474 switch (opc) {
2475 case 0: /* branches/sethi */
2477 unsigned int xop = GET_FIELD(insn, 7, 9);
2478 int32_t target;
2479 switch (xop) {
2480 #ifdef TARGET_SPARC64
2481 case 0x1: /* V9 BPcc */
2483 int cc;
2485 target = GET_FIELD_SP(insn, 0, 18);
2486 target = sign_extend(target, 19);
2487 target <<= 2;
2488 cc = GET_FIELD_SP(insn, 20, 21);
2489 if (cc == 0)
2490 do_branch(dc, target, insn, 0);
2491 else if (cc == 2)
2492 do_branch(dc, target, insn, 1);
2493 else
2494 goto illegal_insn;
2495 goto jmp_insn;
2497 case 0x3: /* V9 BPr */
2499 target = GET_FIELD_SP(insn, 0, 13) |
2500 (GET_FIELD_SP(insn, 20, 21) << 14);
2501 target = sign_extend(target, 16);
2502 target <<= 2;
2503 cpu_src1 = get_src1(dc, insn);
2504 do_branch_reg(dc, target, insn, cpu_src1);
2505 goto jmp_insn;
2507 case 0x5: /* V9 FBPcc */
2509 int cc = GET_FIELD_SP(insn, 20, 21);
2510 if (gen_trap_ifnofpu(dc)) {
2511 goto jmp_insn;
2513 target = GET_FIELD_SP(insn, 0, 18);
2514 target = sign_extend(target, 19);
2515 target <<= 2;
2516 do_fbranch(dc, target, insn, cc);
2517 goto jmp_insn;
2519 #else
2520 case 0x7: /* CBN+x */
2522 goto ncp_insn;
2524 #endif
2525 case 0x2: /* BN+x */
2527 target = GET_FIELD(insn, 10, 31);
2528 target = sign_extend(target, 22);
2529 target <<= 2;
2530 do_branch(dc, target, insn, 0);
2531 goto jmp_insn;
2533 case 0x6: /* FBN+x */
2535 if (gen_trap_ifnofpu(dc)) {
2536 goto jmp_insn;
2538 target = GET_FIELD(insn, 10, 31);
2539 target = sign_extend(target, 22);
2540 target <<= 2;
2541 do_fbranch(dc, target, insn, 0);
2542 goto jmp_insn;
2544 case 0x4: /* SETHI */
2545 /* Special-case %g0 because that's the canonical nop. */
2546 if (rd) {
2547 uint32_t value = GET_FIELD(insn, 10, 31);
2548 TCGv t = gen_dest_gpr(dc, rd);
2549 tcg_gen_movi_tl(t, value << 10);
2550 gen_store_gpr(dc, rd, t);
2552 break;
2553 case 0x0: /* UNIMPL */
2554 default:
2555 goto illegal_insn;
2557 break;
2559 break;
2560 case 1: /*CALL*/
2562 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2563 TCGv o7 = gen_dest_gpr(dc, 15);
2565 tcg_gen_movi_tl(o7, dc->pc);
2566 gen_store_gpr(dc, 15, o7);
2567 target += dc->pc;
2568 gen_mov_pc_npc(dc);
2569 #ifdef TARGET_SPARC64
2570 if (unlikely(AM_CHECK(dc))) {
2571 target &= 0xffffffffULL;
2573 #endif
2574 dc->npc = target;
2576 goto jmp_insn;
2577 case 2: /* FPU & Logical Operations */
2579 unsigned int xop = GET_FIELD(insn, 7, 12);
2580 TCGv cpu_dst = get_temp_tl(dc);
2581 TCGv cpu_tmp0;
2583 if (xop == 0x3a) { /* generate trap */
2584 int cond = GET_FIELD(insn, 3, 6);
2585 TCGv_i32 trap;
2586 TCGLabel *l1 = NULL;
2587 int mask;
2589 if (cond == 0) {
2590 /* Trap never. */
2591 break;
2594 save_state(dc);
2596 if (cond != 8) {
2597 /* Conditional trap. */
2598 DisasCompare cmp;
2599 #ifdef TARGET_SPARC64
2600 /* V9 icc/xcc */
2601 int cc = GET_FIELD_SP(insn, 11, 12);
2602 if (cc == 0) {
2603 gen_compare(&cmp, 0, cond, dc);
2604 } else if (cc == 2) {
2605 gen_compare(&cmp, 1, cond, dc);
2606 } else {
2607 goto illegal_insn;
2609 #else
2610 gen_compare(&cmp, 0, cond, dc);
2611 #endif
2612 l1 = gen_new_label();
2613 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2614 cmp.c1, cmp.c2, l1);
2615 free_compare(&cmp);
2618 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2619 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2621 /* Don't use the normal temporaries, as they may well have
2622 gone out of scope with the branch above. While we're
2623 doing that we might as well pre-truncate to 32-bit. */
2624 trap = tcg_temp_new_i32();
2626 rs1 = GET_FIELD_SP(insn, 14, 18);
2627 if (IS_IMM) {
2628 rs2 = GET_FIELD_SP(insn, 0, 6);
2629 if (rs1 == 0) {
2630 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2631 /* Signal that the trap value is fully constant. */
2632 mask = 0;
2633 } else {
2634 TCGv t1 = gen_load_gpr(dc, rs1);
2635 tcg_gen_trunc_tl_i32(trap, t1);
2636 tcg_gen_addi_i32(trap, trap, rs2);
2638 } else {
2639 TCGv t1, t2;
2640 rs2 = GET_FIELD_SP(insn, 0, 4);
2641 t1 = gen_load_gpr(dc, rs1);
2642 t2 = gen_load_gpr(dc, rs2);
2643 tcg_gen_add_tl(t1, t1, t2);
2644 tcg_gen_trunc_tl_i32(trap, t1);
2646 if (mask != 0) {
2647 tcg_gen_andi_i32(trap, trap, mask);
2648 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2651 gen_helper_raise_exception(cpu_env, trap);
2652 tcg_temp_free_i32(trap);
2654 if (cond == 8) {
2655 /* An unconditional trap ends the TB. */
2656 dc->is_br = 1;
2657 goto jmp_insn;
2658 } else {
2659 /* A conditional trap falls through to the next insn. */
2660 gen_set_label(l1);
2661 break;
2663 } else if (xop == 0x28) {
2664 rs1 = GET_FIELD(insn, 13, 17);
2665 switch(rs1) {
2666 case 0: /* rdy */
2667 #ifndef TARGET_SPARC64
2668 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2669 manual, rdy on the microSPARC
2670 II */
2671 case 0x0f: /* stbar in the SPARCv8 manual,
2672 rdy on the microSPARC II */
2673 case 0x10 ... 0x1f: /* implementation-dependent in the
2674 SPARCv8 manual, rdy on the
2675 microSPARC II */
2676 /* Read Asr17 */
2677 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2678 TCGv t = gen_dest_gpr(dc, rd);
2679 /* Read Asr17 for a Leon3 monoprocessor */
2680 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2681 gen_store_gpr(dc, rd, t);
2682 break;
2684 #endif
2685 gen_store_gpr(dc, rd, cpu_y);
2686 break;
2687 #ifdef TARGET_SPARC64
2688 case 0x2: /* V9 rdccr */
2689 update_psr(dc);
2690 gen_helper_rdccr(cpu_dst, cpu_env);
2691 gen_store_gpr(dc, rd, cpu_dst);
2692 break;
2693 case 0x3: /* V9 rdasi */
2694 tcg_gen_movi_tl(cpu_dst, dc->asi);
2695 gen_store_gpr(dc, rd, cpu_dst);
2696 break;
2697 case 0x4: /* V9 rdtick */
2699 TCGv_ptr r_tickptr;
2700 TCGv_i32 r_const;
2702 r_tickptr = tcg_temp_new_ptr();
2703 r_const = tcg_const_i32(dc->mem_idx);
2704 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2705 offsetof(CPUSPARCState, tick));
2706 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2707 r_const);
2708 tcg_temp_free_ptr(r_tickptr);
2709 tcg_temp_free_i32(r_const);
2710 gen_store_gpr(dc, rd, cpu_dst);
2712 break;
2713 case 0x5: /* V9 rdpc */
2715 TCGv t = gen_dest_gpr(dc, rd);
2716 if (unlikely(AM_CHECK(dc))) {
2717 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2718 } else {
2719 tcg_gen_movi_tl(t, dc->pc);
2721 gen_store_gpr(dc, rd, t);
2723 break;
2724 case 0x6: /* V9 rdfprs */
2725 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2726 gen_store_gpr(dc, rd, cpu_dst);
2727 break;
2728 case 0xf: /* V9 membar */
2729 break; /* no effect */
2730 case 0x13: /* Graphics Status */
2731 if (gen_trap_ifnofpu(dc)) {
2732 goto jmp_insn;
2734 gen_store_gpr(dc, rd, cpu_gsr);
2735 break;
2736 case 0x16: /* Softint */
2737 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
2738 offsetof(CPUSPARCState, softint));
2739 gen_store_gpr(dc, rd, cpu_dst);
2740 break;
2741 case 0x17: /* Tick compare */
2742 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2743 break;
2744 case 0x18: /* System tick */
2746 TCGv_ptr r_tickptr;
2747 TCGv_i32 r_const;
2749 r_tickptr = tcg_temp_new_ptr();
2750 r_const = tcg_const_i32(dc->mem_idx);
2751 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2752 offsetof(CPUSPARCState, stick));
2753 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2754 r_const);
2755 tcg_temp_free_ptr(r_tickptr);
2756 tcg_temp_free_i32(r_const);
2757 gen_store_gpr(dc, rd, cpu_dst);
2759 break;
2760 case 0x19: /* System tick compare */
2761 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2762 break;
2763 case 0x10: /* Performance Control */
2764 case 0x11: /* Performance Instrumentation Counter */
2765 case 0x12: /* Dispatch Control */
2766 case 0x14: /* Softint set, WO */
2767 case 0x15: /* Softint clear, WO */
2768 #endif
2769 default:
2770 goto illegal_insn;
2772 #if !defined(CONFIG_USER_ONLY)
2773 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2774 #ifndef TARGET_SPARC64
2775 if (!supervisor(dc)) {
2776 goto priv_insn;
2778 update_psr(dc);
2779 gen_helper_rdpsr(cpu_dst, cpu_env);
2780 #else
2781 CHECK_IU_FEATURE(dc, HYPV);
2782 if (!hypervisor(dc))
2783 goto priv_insn;
2784 rs1 = GET_FIELD(insn, 13, 17);
2785 switch (rs1) {
2786 case 0: // hpstate
2787 // gen_op_rdhpstate();
2788 break;
2789 case 1: // htstate
2790 // gen_op_rdhtstate();
2791 break;
2792 case 3: // hintp
2793 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2794 break;
2795 case 5: // htba
2796 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2797 break;
2798 case 6: // hver
2799 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2800 break;
2801 case 31: // hstick_cmpr
2802 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2803 break;
2804 default:
2805 goto illegal_insn;
2807 #endif
2808 gen_store_gpr(dc, rd, cpu_dst);
2809 break;
2810 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2811 if (!supervisor(dc)) {
2812 goto priv_insn;
2814 cpu_tmp0 = get_temp_tl(dc);
2815 #ifdef TARGET_SPARC64
2816 rs1 = GET_FIELD(insn, 13, 17);
2817 switch (rs1) {
2818 case 0: // tpc
2820 TCGv_ptr r_tsptr;
2822 r_tsptr = tcg_temp_new_ptr();
2823 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2824 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2825 offsetof(trap_state, tpc));
2826 tcg_temp_free_ptr(r_tsptr);
2828 break;
2829 case 1: // tnpc
2831 TCGv_ptr r_tsptr;
2833 r_tsptr = tcg_temp_new_ptr();
2834 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2835 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2836 offsetof(trap_state, tnpc));
2837 tcg_temp_free_ptr(r_tsptr);
2839 break;
2840 case 2: // tstate
2842 TCGv_ptr r_tsptr;
2844 r_tsptr = tcg_temp_new_ptr();
2845 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2846 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2847 offsetof(trap_state, tstate));
2848 tcg_temp_free_ptr(r_tsptr);
2850 break;
2851 case 3: // tt
2853 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2855 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2856 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2857 offsetof(trap_state, tt));
2858 tcg_temp_free_ptr(r_tsptr);
2860 break;
2861 case 4: // tick
2863 TCGv_ptr r_tickptr;
2864 TCGv_i32 r_const;
2866 r_tickptr = tcg_temp_new_ptr();
2867 r_const = tcg_const_i32(dc->mem_idx);
2868 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2869 offsetof(CPUSPARCState, tick));
2870 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
2871 r_tickptr, r_const);
2872 tcg_temp_free_ptr(r_tickptr);
2873 tcg_temp_free_i32(r_const);
2875 break;
2876 case 5: // tba
2877 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2878 break;
2879 case 6: // pstate
2880 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2881 offsetof(CPUSPARCState, pstate));
2882 break;
2883 case 7: // tl
2884 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2885 offsetof(CPUSPARCState, tl));
2886 break;
2887 case 8: // pil
2888 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2889 offsetof(CPUSPARCState, psrpil));
2890 break;
2891 case 9: // cwp
2892 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2893 break;
2894 case 10: // cansave
2895 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2896 offsetof(CPUSPARCState, cansave));
2897 break;
2898 case 11: // canrestore
2899 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2900 offsetof(CPUSPARCState, canrestore));
2901 break;
2902 case 12: // cleanwin
2903 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2904 offsetof(CPUSPARCState, cleanwin));
2905 break;
2906 case 13: // otherwin
2907 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2908 offsetof(CPUSPARCState, otherwin));
2909 break;
2910 case 14: // wstate
2911 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2912 offsetof(CPUSPARCState, wstate));
2913 break;
2914 case 16: // UA2005 gl
2915 CHECK_IU_FEATURE(dc, GL);
2916 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2917 offsetof(CPUSPARCState, gl));
2918 break;
2919 case 26: // UA2005 strand status
2920 CHECK_IU_FEATURE(dc, HYPV);
2921 if (!hypervisor(dc))
2922 goto priv_insn;
2923 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2924 break;
2925 case 31: // ver
2926 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2927 break;
2928 case 15: // fq
2929 default:
2930 goto illegal_insn;
2932 #else
2933 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2934 #endif
2935 gen_store_gpr(dc, rd, cpu_tmp0);
2936 break;
2937 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2938 #ifdef TARGET_SPARC64
2939 save_state(dc);
2940 gen_helper_flushw(cpu_env);
2941 #else
2942 if (!supervisor(dc))
2943 goto priv_insn;
2944 gen_store_gpr(dc, rd, cpu_tbr);
2945 #endif
2946 break;
2947 #endif
2948 } else if (xop == 0x34) { /* FPU Operations */
2949 if (gen_trap_ifnofpu(dc)) {
2950 goto jmp_insn;
2952 gen_op_clear_ieee_excp_and_FTT();
2953 rs1 = GET_FIELD(insn, 13, 17);
2954 rs2 = GET_FIELD(insn, 27, 31);
2955 xop = GET_FIELD(insn, 18, 26);
2956 save_state(dc);
2957 switch (xop) {
2958 case 0x1: /* fmovs */
2959 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2960 gen_store_fpr_F(dc, rd, cpu_src1_32);
2961 break;
2962 case 0x5: /* fnegs */
2963 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2964 break;
2965 case 0x9: /* fabss */
2966 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2967 break;
2968 case 0x29: /* fsqrts */
2969 CHECK_FPU_FEATURE(dc, FSQRT);
2970 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2971 break;
2972 case 0x2a: /* fsqrtd */
2973 CHECK_FPU_FEATURE(dc, FSQRT);
2974 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2975 break;
2976 case 0x2b: /* fsqrtq */
2977 CHECK_FPU_FEATURE(dc, FLOAT128);
2978 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2979 break;
2980 case 0x41: /* fadds */
2981 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
2982 break;
2983 case 0x42: /* faddd */
2984 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
2985 break;
2986 case 0x43: /* faddq */
2987 CHECK_FPU_FEATURE(dc, FLOAT128);
2988 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
2989 break;
2990 case 0x45: /* fsubs */
2991 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
2992 break;
2993 case 0x46: /* fsubd */
2994 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
2995 break;
2996 case 0x47: /* fsubq */
2997 CHECK_FPU_FEATURE(dc, FLOAT128);
2998 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
2999 break;
3000 case 0x49: /* fmuls */
3001 CHECK_FPU_FEATURE(dc, FMUL);
3002 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3003 break;
3004 case 0x4a: /* fmuld */
3005 CHECK_FPU_FEATURE(dc, FMUL);
3006 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3007 break;
3008 case 0x4b: /* fmulq */
3009 CHECK_FPU_FEATURE(dc, FLOAT128);
3010 CHECK_FPU_FEATURE(dc, FMUL);
3011 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3012 break;
3013 case 0x4d: /* fdivs */
3014 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3015 break;
3016 case 0x4e: /* fdivd */
3017 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3018 break;
3019 case 0x4f: /* fdivq */
3020 CHECK_FPU_FEATURE(dc, FLOAT128);
3021 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3022 break;
3023 case 0x69: /* fsmuld */
3024 CHECK_FPU_FEATURE(dc, FSMULD);
3025 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3026 break;
3027 case 0x6e: /* fdmulq */
3028 CHECK_FPU_FEATURE(dc, FLOAT128);
3029 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3030 break;
3031 case 0xc4: /* fitos */
3032 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3033 break;
3034 case 0xc6: /* fdtos */
3035 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3036 break;
3037 case 0xc7: /* fqtos */
3038 CHECK_FPU_FEATURE(dc, FLOAT128);
3039 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3040 break;
3041 case 0xc8: /* fitod */
3042 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3043 break;
3044 case 0xc9: /* fstod */
3045 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3046 break;
3047 case 0xcb: /* fqtod */
3048 CHECK_FPU_FEATURE(dc, FLOAT128);
3049 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3050 break;
3051 case 0xcc: /* fitoq */
3052 CHECK_FPU_FEATURE(dc, FLOAT128);
3053 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3054 break;
3055 case 0xcd: /* fstoq */
3056 CHECK_FPU_FEATURE(dc, FLOAT128);
3057 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3058 break;
3059 case 0xce: /* fdtoq */
3060 CHECK_FPU_FEATURE(dc, FLOAT128);
3061 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3062 break;
3063 case 0xd1: /* fstoi */
3064 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3065 break;
3066 case 0xd2: /* fdtoi */
3067 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3068 break;
3069 case 0xd3: /* fqtoi */
3070 CHECK_FPU_FEATURE(dc, FLOAT128);
3071 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3072 break;
3073 #ifdef TARGET_SPARC64
3074 case 0x2: /* V9 fmovd */
3075 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3076 gen_store_fpr_D(dc, rd, cpu_src1_64);
3077 break;
3078 case 0x3: /* V9 fmovq */
3079 CHECK_FPU_FEATURE(dc, FLOAT128);
3080 gen_move_Q(rd, rs2);
3081 break;
3082 case 0x6: /* V9 fnegd */
3083 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3084 break;
3085 case 0x7: /* V9 fnegq */
3086 CHECK_FPU_FEATURE(dc, FLOAT128);
3087 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3088 break;
3089 case 0xa: /* V9 fabsd */
3090 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3091 break;
3092 case 0xb: /* V9 fabsq */
3093 CHECK_FPU_FEATURE(dc, FLOAT128);
3094 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3095 break;
3096 case 0x81: /* V9 fstox */
3097 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3098 break;
3099 case 0x82: /* V9 fdtox */
3100 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3101 break;
3102 case 0x83: /* V9 fqtox */
3103 CHECK_FPU_FEATURE(dc, FLOAT128);
3104 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3105 break;
3106 case 0x84: /* V9 fxtos */
3107 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3108 break;
3109 case 0x88: /* V9 fxtod */
3110 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3111 break;
3112 case 0x8c: /* V9 fxtoq */
3113 CHECK_FPU_FEATURE(dc, FLOAT128);
3114 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3115 break;
3116 #endif
3117 default:
3118 goto illegal_insn;
3120 } else if (xop == 0x35) { /* FPU Operations */
3121 #ifdef TARGET_SPARC64
3122 int cond;
3123 #endif
3124 if (gen_trap_ifnofpu(dc)) {
3125 goto jmp_insn;
3127 gen_op_clear_ieee_excp_and_FTT();
3128 rs1 = GET_FIELD(insn, 13, 17);
3129 rs2 = GET_FIELD(insn, 27, 31);
3130 xop = GET_FIELD(insn, 18, 26);
3131 save_state(dc);
3133 #ifdef TARGET_SPARC64
3134 #define FMOVR(sz) \
3135 do { \
3136 DisasCompare cmp; \
3137 cond = GET_FIELD_SP(insn, 10, 12); \
3138 cpu_src1 = get_src1(dc, insn); \
3139 gen_compare_reg(&cmp, cond, cpu_src1); \
3140 gen_fmov##sz(dc, &cmp, rd, rs2); \
3141 free_compare(&cmp); \
3142 } while (0)
3144 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3145 FMOVR(s);
3146 break;
3147 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3148 FMOVR(d);
3149 break;
3150 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3151 CHECK_FPU_FEATURE(dc, FLOAT128);
3152 FMOVR(q);
3153 break;
3155 #undef FMOVR
3156 #endif
3157 switch (xop) {
3158 #ifdef TARGET_SPARC64
3159 #define FMOVCC(fcc, sz) \
3160 do { \
3161 DisasCompare cmp; \
3162 cond = GET_FIELD_SP(insn, 14, 17); \
3163 gen_fcompare(&cmp, fcc, cond); \
3164 gen_fmov##sz(dc, &cmp, rd, rs2); \
3165 free_compare(&cmp); \
3166 } while (0)
3168 case 0x001: /* V9 fmovscc %fcc0 */
3169 FMOVCC(0, s);
3170 break;
3171 case 0x002: /* V9 fmovdcc %fcc0 */
3172 FMOVCC(0, d);
3173 break;
3174 case 0x003: /* V9 fmovqcc %fcc0 */
3175 CHECK_FPU_FEATURE(dc, FLOAT128);
3176 FMOVCC(0, q);
3177 break;
3178 case 0x041: /* V9 fmovscc %fcc1 */
3179 FMOVCC(1, s);
3180 break;
3181 case 0x042: /* V9 fmovdcc %fcc1 */
3182 FMOVCC(1, d);
3183 break;
3184 case 0x043: /* V9 fmovqcc %fcc1 */
3185 CHECK_FPU_FEATURE(dc, FLOAT128);
3186 FMOVCC(1, q);
3187 break;
3188 case 0x081: /* V9 fmovscc %fcc2 */
3189 FMOVCC(2, s);
3190 break;
3191 case 0x082: /* V9 fmovdcc %fcc2 */
3192 FMOVCC(2, d);
3193 break;
3194 case 0x083: /* V9 fmovqcc %fcc2 */
3195 CHECK_FPU_FEATURE(dc, FLOAT128);
3196 FMOVCC(2, q);
3197 break;
3198 case 0x0c1: /* V9 fmovscc %fcc3 */
3199 FMOVCC(3, s);
3200 break;
3201 case 0x0c2: /* V9 fmovdcc %fcc3 */
3202 FMOVCC(3, d);
3203 break;
3204 case 0x0c3: /* V9 fmovqcc %fcc3 */
3205 CHECK_FPU_FEATURE(dc, FLOAT128);
3206 FMOVCC(3, q);
3207 break;
3208 #undef FMOVCC
3209 #define FMOVCC(xcc, sz) \
3210 do { \
3211 DisasCompare cmp; \
3212 cond = GET_FIELD_SP(insn, 14, 17); \
3213 gen_compare(&cmp, xcc, cond, dc); \
3214 gen_fmov##sz(dc, &cmp, rd, rs2); \
3215 free_compare(&cmp); \
3216 } while (0)
3218 case 0x101: /* V9 fmovscc %icc */
3219 FMOVCC(0, s);
3220 break;
3221 case 0x102: /* V9 fmovdcc %icc */
3222 FMOVCC(0, d);
3223 break;
3224 case 0x103: /* V9 fmovqcc %icc */
3225 CHECK_FPU_FEATURE(dc, FLOAT128);
3226 FMOVCC(0, q);
3227 break;
3228 case 0x181: /* V9 fmovscc %xcc */
3229 FMOVCC(1, s);
3230 break;
3231 case 0x182: /* V9 fmovdcc %xcc */
3232 FMOVCC(1, d);
3233 break;
3234 case 0x183: /* V9 fmovqcc %xcc */
3235 CHECK_FPU_FEATURE(dc, FLOAT128);
3236 FMOVCC(1, q);
3237 break;
3238 #undef FMOVCC
3239 #endif
3240 case 0x51: /* fcmps, V9 %fcc */
3241 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3242 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3243 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3244 break;
3245 case 0x52: /* fcmpd, V9 %fcc */
3246 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3247 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3248 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3249 break;
3250 case 0x53: /* fcmpq, V9 %fcc */
3251 CHECK_FPU_FEATURE(dc, FLOAT128);
3252 gen_op_load_fpr_QT0(QFPREG(rs1));
3253 gen_op_load_fpr_QT1(QFPREG(rs2));
3254 gen_op_fcmpq(rd & 3);
3255 break;
3256 case 0x55: /* fcmpes, V9 %fcc */
3257 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3258 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3259 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3260 break;
3261 case 0x56: /* fcmped, V9 %fcc */
3262 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3263 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3264 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3265 break;
3266 case 0x57: /* fcmpeq, V9 %fcc */
3267 CHECK_FPU_FEATURE(dc, FLOAT128);
3268 gen_op_load_fpr_QT0(QFPREG(rs1));
3269 gen_op_load_fpr_QT1(QFPREG(rs2));
3270 gen_op_fcmpeq(rd & 3);
3271 break;
3272 default:
3273 goto illegal_insn;
3275 } else if (xop == 0x2) {
3276 TCGv dst = gen_dest_gpr(dc, rd);
3277 rs1 = GET_FIELD(insn, 13, 17);
3278 if (rs1 == 0) {
3279 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3280 if (IS_IMM) { /* immediate */
3281 simm = GET_FIELDs(insn, 19, 31);
3282 tcg_gen_movi_tl(dst, simm);
3283 gen_store_gpr(dc, rd, dst);
3284 } else { /* register */
3285 rs2 = GET_FIELD(insn, 27, 31);
3286 if (rs2 == 0) {
3287 tcg_gen_movi_tl(dst, 0);
3288 gen_store_gpr(dc, rd, dst);
3289 } else {
3290 cpu_src2 = gen_load_gpr(dc, rs2);
3291 gen_store_gpr(dc, rd, cpu_src2);
3294 } else {
3295 cpu_src1 = get_src1(dc, insn);
3296 if (IS_IMM) { /* immediate */
3297 simm = GET_FIELDs(insn, 19, 31);
3298 tcg_gen_ori_tl(dst, cpu_src1, simm);
3299 gen_store_gpr(dc, rd, dst);
3300 } else { /* register */
3301 rs2 = GET_FIELD(insn, 27, 31);
3302 if (rs2 == 0) {
3303 /* mov shortcut: or x, %g0, y -> mov x, y */
3304 gen_store_gpr(dc, rd, cpu_src1);
3305 } else {
3306 cpu_src2 = gen_load_gpr(dc, rs2);
3307 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3308 gen_store_gpr(dc, rd, dst);
3312 #ifdef TARGET_SPARC64
3313 } else if (xop == 0x25) { /* sll, V9 sllx */
3314 cpu_src1 = get_src1(dc, insn);
3315 if (IS_IMM) { /* immediate */
3316 simm = GET_FIELDs(insn, 20, 31);
3317 if (insn & (1 << 12)) {
3318 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3319 } else {
3320 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3322 } else { /* register */
3323 rs2 = GET_FIELD(insn, 27, 31);
3324 cpu_src2 = gen_load_gpr(dc, rs2);
3325 cpu_tmp0 = get_temp_tl(dc);
3326 if (insn & (1 << 12)) {
3327 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3328 } else {
3329 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3331 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3333 gen_store_gpr(dc, rd, cpu_dst);
3334 } else if (xop == 0x26) { /* srl, V9 srlx */
3335 cpu_src1 = get_src1(dc, insn);
3336 if (IS_IMM) { /* immediate */
3337 simm = GET_FIELDs(insn, 20, 31);
3338 if (insn & (1 << 12)) {
3339 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3340 } else {
3341 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3342 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3344 } else { /* register */
3345 rs2 = GET_FIELD(insn, 27, 31);
3346 cpu_src2 = gen_load_gpr(dc, rs2);
3347 cpu_tmp0 = get_temp_tl(dc);
3348 if (insn & (1 << 12)) {
3349 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3350 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3351 } else {
3352 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3353 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3354 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3357 gen_store_gpr(dc, rd, cpu_dst);
3358 } else if (xop == 0x27) { /* sra, V9 srax */
3359 cpu_src1 = get_src1(dc, insn);
3360 if (IS_IMM) { /* immediate */
3361 simm = GET_FIELDs(insn, 20, 31);
3362 if (insn & (1 << 12)) {
3363 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3364 } else {
3365 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3366 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3368 } else { /* register */
3369 rs2 = GET_FIELD(insn, 27, 31);
3370 cpu_src2 = gen_load_gpr(dc, rs2);
3371 cpu_tmp0 = get_temp_tl(dc);
3372 if (insn & (1 << 12)) {
3373 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3374 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3375 } else {
3376 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3377 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3378 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3381 gen_store_gpr(dc, rd, cpu_dst);
3382 #endif
3383 } else if (xop < 0x36) {
3384 if (xop < 0x20) {
3385 cpu_src1 = get_src1(dc, insn);
3386 cpu_src2 = get_src2(dc, insn);
3387 switch (xop & ~0x10) {
3388 case 0x0: /* add */
3389 if (xop & 0x10) {
3390 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3391 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3392 dc->cc_op = CC_OP_ADD;
3393 } else {
3394 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3396 break;
3397 case 0x1: /* and */
3398 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3399 if (xop & 0x10) {
3400 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3401 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3402 dc->cc_op = CC_OP_LOGIC;
3404 break;
3405 case 0x2: /* or */
3406 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3407 if (xop & 0x10) {
3408 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3409 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3410 dc->cc_op = CC_OP_LOGIC;
3412 break;
3413 case 0x3: /* xor */
3414 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3415 if (xop & 0x10) {
3416 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3417 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3418 dc->cc_op = CC_OP_LOGIC;
3420 break;
3421 case 0x4: /* sub */
3422 if (xop & 0x10) {
3423 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3424 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3425 dc->cc_op = CC_OP_SUB;
3426 } else {
3427 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3429 break;
3430 case 0x5: /* andn */
3431 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3432 if (xop & 0x10) {
3433 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3434 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3435 dc->cc_op = CC_OP_LOGIC;
3437 break;
3438 case 0x6: /* orn */
3439 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3440 if (xop & 0x10) {
3441 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3442 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3443 dc->cc_op = CC_OP_LOGIC;
3445 break;
3446 case 0x7: /* xorn */
3447 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3448 if (xop & 0x10) {
3449 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3450 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3451 dc->cc_op = CC_OP_LOGIC;
3453 break;
3454 case 0x8: /* addx, V9 addc */
3455 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3456 (xop & 0x10));
3457 break;
3458 #ifdef TARGET_SPARC64
3459 case 0x9: /* V9 mulx */
3460 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3461 break;
3462 #endif
3463 case 0xa: /* umul */
3464 CHECK_IU_FEATURE(dc, MUL);
3465 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3466 if (xop & 0x10) {
3467 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3468 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3469 dc->cc_op = CC_OP_LOGIC;
3471 break;
3472 case 0xb: /* smul */
3473 CHECK_IU_FEATURE(dc, MUL);
3474 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3475 if (xop & 0x10) {
3476 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3477 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3478 dc->cc_op = CC_OP_LOGIC;
3480 break;
3481 case 0xc: /* subx, V9 subc */
3482 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3483 (xop & 0x10));
3484 break;
3485 #ifdef TARGET_SPARC64
3486 case 0xd: /* V9 udivx */
3487 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3488 break;
3489 #endif
3490 case 0xe: /* udiv */
3491 CHECK_IU_FEATURE(dc, DIV);
3492 if (xop & 0x10) {
3493 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3494 cpu_src2);
3495 dc->cc_op = CC_OP_DIV;
3496 } else {
3497 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3498 cpu_src2);
3500 break;
3501 case 0xf: /* sdiv */
3502 CHECK_IU_FEATURE(dc, DIV);
3503 if (xop & 0x10) {
3504 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3505 cpu_src2);
3506 dc->cc_op = CC_OP_DIV;
3507 } else {
3508 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3509 cpu_src2);
3511 break;
3512 default:
3513 goto illegal_insn;
3515 gen_store_gpr(dc, rd, cpu_dst);
3516 } else {
3517 cpu_src1 = get_src1(dc, insn);
3518 cpu_src2 = get_src2(dc, insn);
3519 switch (xop) {
3520 case 0x20: /* taddcc */
3521 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3522 gen_store_gpr(dc, rd, cpu_dst);
3523 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3524 dc->cc_op = CC_OP_TADD;
3525 break;
3526 case 0x21: /* tsubcc */
3527 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3528 gen_store_gpr(dc, rd, cpu_dst);
3529 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3530 dc->cc_op = CC_OP_TSUB;
3531 break;
3532 case 0x22: /* taddcctv */
3533 gen_helper_taddcctv(cpu_dst, cpu_env,
3534 cpu_src1, cpu_src2);
3535 gen_store_gpr(dc, rd, cpu_dst);
3536 dc->cc_op = CC_OP_TADDTV;
3537 break;
3538 case 0x23: /* tsubcctv */
3539 gen_helper_tsubcctv(cpu_dst, cpu_env,
3540 cpu_src1, cpu_src2);
3541 gen_store_gpr(dc, rd, cpu_dst);
3542 dc->cc_op = CC_OP_TSUBTV;
3543 break;
3544 case 0x24: /* mulscc */
3545 update_psr(dc);
3546 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3547 gen_store_gpr(dc, rd, cpu_dst);
3548 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3549 dc->cc_op = CC_OP_ADD;
3550 break;
3551 #ifndef TARGET_SPARC64
3552 case 0x25: /* sll */
3553 if (IS_IMM) { /* immediate */
3554 simm = GET_FIELDs(insn, 20, 31);
3555 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3556 } else { /* register */
3557 cpu_tmp0 = get_temp_tl(dc);
3558 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3559 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3561 gen_store_gpr(dc, rd, cpu_dst);
3562 break;
3563 case 0x26: /* srl */
3564 if (IS_IMM) { /* immediate */
3565 simm = GET_FIELDs(insn, 20, 31);
3566 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3567 } else { /* register */
3568 cpu_tmp0 = get_temp_tl(dc);
3569 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3570 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3572 gen_store_gpr(dc, rd, cpu_dst);
3573 break;
3574 case 0x27: /* sra */
3575 if (IS_IMM) { /* immediate */
3576 simm = GET_FIELDs(insn, 20, 31);
3577 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3578 } else { /* register */
3579 cpu_tmp0 = get_temp_tl(dc);
3580 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3581 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3583 gen_store_gpr(dc, rd, cpu_dst);
3584 break;
3585 #endif
3586 case 0x30:
3588 cpu_tmp0 = get_temp_tl(dc);
3589 switch(rd) {
3590 case 0: /* wry */
3591 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3592 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3593 break;
3594 #ifndef TARGET_SPARC64
3595 case 0x01 ... 0x0f: /* undefined in the
3596 SPARCv8 manual, nop
3597 on the microSPARC
3598 II */
3599 case 0x10 ... 0x1f: /* implementation-dependent
3600 in the SPARCv8
3601 manual, nop on the
3602 microSPARC II */
3603 if ((rd == 0x13) && (dc->def->features &
3604 CPU_FEATURE_POWERDOWN)) {
3605 /* LEON3 power-down */
3606 save_state(dc);
3607 gen_helper_power_down(cpu_env);
3609 break;
3610 #else
3611 case 0x2: /* V9 wrccr */
3612 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3613 gen_helper_wrccr(cpu_env, cpu_tmp0);
3614 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3615 dc->cc_op = CC_OP_FLAGS;
3616 break;
3617 case 0x3: /* V9 wrasi */
3618 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3619 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
3620 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3621 offsetof(CPUSPARCState, asi));
3622 /* End TB to notice changed ASI. */
3623 save_state(dc);
3624 gen_op_next_insn();
3625 tcg_gen_exit_tb(0);
3626 dc->is_br = 1;
3627 break;
3628 case 0x6: /* V9 wrfprs */
3629 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3630 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
3631 save_state(dc);
3632 gen_op_next_insn();
3633 tcg_gen_exit_tb(0);
3634 dc->is_br = 1;
3635 break;
3636 case 0xf: /* V9 sir, nop if user */
3637 #if !defined(CONFIG_USER_ONLY)
3638 if (supervisor(dc)) {
3639 ; // XXX
3641 #endif
3642 break;
3643 case 0x13: /* Graphics Status */
3644 if (gen_trap_ifnofpu(dc)) {
3645 goto jmp_insn;
3647 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3648 break;
3649 case 0x14: /* Softint set */
3650 if (!supervisor(dc))
3651 goto illegal_insn;
3652 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3653 gen_helper_set_softint(cpu_env, cpu_tmp0);
3654 break;
3655 case 0x15: /* Softint clear */
3656 if (!supervisor(dc))
3657 goto illegal_insn;
3658 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3659 gen_helper_clear_softint(cpu_env, cpu_tmp0);
3660 break;
3661 case 0x16: /* Softint write */
3662 if (!supervisor(dc))
3663 goto illegal_insn;
3664 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3665 gen_helper_write_softint(cpu_env, cpu_tmp0);
3666 break;
3667 case 0x17: /* Tick compare */
3668 #if !defined(CONFIG_USER_ONLY)
3669 if (!supervisor(dc))
3670 goto illegal_insn;
3671 #endif
3673 TCGv_ptr r_tickptr;
3675 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3676 cpu_src2);
3677 r_tickptr = tcg_temp_new_ptr();
3678 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3679 offsetof(CPUSPARCState, tick));
3680 gen_helper_tick_set_limit(r_tickptr,
3681 cpu_tick_cmpr);
3682 tcg_temp_free_ptr(r_tickptr);
3684 break;
3685 case 0x18: /* System tick */
3686 #if !defined(CONFIG_USER_ONLY)
3687 if (!supervisor(dc))
3688 goto illegal_insn;
3689 #endif
3691 TCGv_ptr r_tickptr;
3693 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
3694 cpu_src2);
3695 r_tickptr = tcg_temp_new_ptr();
3696 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3697 offsetof(CPUSPARCState, stick));
3698 gen_helper_tick_set_count(r_tickptr,
3699 cpu_tmp0);
3700 tcg_temp_free_ptr(r_tickptr);
3702 break;
3703 case 0x19: /* System tick compare */
3704 #if !defined(CONFIG_USER_ONLY)
3705 if (!supervisor(dc))
3706 goto illegal_insn;
3707 #endif
3709 TCGv_ptr r_tickptr;
3711 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3712 cpu_src2);
3713 r_tickptr = tcg_temp_new_ptr();
3714 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3715 offsetof(CPUSPARCState, stick));
3716 gen_helper_tick_set_limit(r_tickptr,
3717 cpu_stick_cmpr);
3718 tcg_temp_free_ptr(r_tickptr);
3720 break;
3722 case 0x10: /* Performance Control */
3723 case 0x11: /* Performance Instrumentation
3724 Counter */
3725 case 0x12: /* Dispatch Control */
3726 #endif
3727 default:
3728 goto illegal_insn;
3731 break;
3732 #if !defined(CONFIG_USER_ONLY)
3733 case 0x31: /* wrpsr, V9 saved, restored */
3735 if (!supervisor(dc))
3736 goto priv_insn;
3737 #ifdef TARGET_SPARC64
3738 switch (rd) {
3739 case 0:
3740 gen_helper_saved(cpu_env);
3741 break;
3742 case 1:
3743 gen_helper_restored(cpu_env);
3744 break;
3745 case 2: /* UA2005 allclean */
3746 case 3: /* UA2005 otherw */
3747 case 4: /* UA2005 normalw */
3748 case 5: /* UA2005 invalw */
3749 // XXX
3750 default:
3751 goto illegal_insn;
3753 #else
3754 cpu_tmp0 = get_temp_tl(dc);
3755 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3756 gen_helper_wrpsr(cpu_env, cpu_tmp0);
3757 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3758 dc->cc_op = CC_OP_FLAGS;
3759 save_state(dc);
3760 gen_op_next_insn();
3761 tcg_gen_exit_tb(0);
3762 dc->is_br = 1;
3763 #endif
3765 break;
3766 case 0x32: /* wrwim, V9 wrpr */
3768 if (!supervisor(dc))
3769 goto priv_insn;
3770 cpu_tmp0 = get_temp_tl(dc);
3771 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3772 #ifdef TARGET_SPARC64
3773 switch (rd) {
3774 case 0: // tpc
3776 TCGv_ptr r_tsptr;
3778 r_tsptr = tcg_temp_new_ptr();
3779 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3780 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3781 offsetof(trap_state, tpc));
3782 tcg_temp_free_ptr(r_tsptr);
3784 break;
3785 case 1: // tnpc
3787 TCGv_ptr r_tsptr;
3789 r_tsptr = tcg_temp_new_ptr();
3790 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3791 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3792 offsetof(trap_state, tnpc));
3793 tcg_temp_free_ptr(r_tsptr);
3795 break;
3796 case 2: // tstate
3798 TCGv_ptr r_tsptr;
3800 r_tsptr = tcg_temp_new_ptr();
3801 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3802 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3803 offsetof(trap_state,
3804 tstate));
3805 tcg_temp_free_ptr(r_tsptr);
3807 break;
3808 case 3: // tt
3810 TCGv_ptr r_tsptr;
3812 r_tsptr = tcg_temp_new_ptr();
3813 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3814 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
3815 offsetof(trap_state, tt));
3816 tcg_temp_free_ptr(r_tsptr);
3818 break;
3819 case 4: // tick
3821 TCGv_ptr r_tickptr;
3823 r_tickptr = tcg_temp_new_ptr();
3824 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3825 offsetof(CPUSPARCState, tick));
3826 gen_helper_tick_set_count(r_tickptr,
3827 cpu_tmp0);
3828 tcg_temp_free_ptr(r_tickptr);
3830 break;
3831 case 5: // tba
3832 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3833 break;
3834 case 6: // pstate
3835 save_state(dc);
3836 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3837 dc->npc = DYNAMIC_PC;
3838 break;
3839 case 7: // tl
3840 save_state(dc);
3841 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3842 offsetof(CPUSPARCState, tl));
3843 dc->npc = DYNAMIC_PC;
3844 break;
3845 case 8: // pil
3846 gen_helper_wrpil(cpu_env, cpu_tmp0);
3847 break;
3848 case 9: // cwp
3849 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3850 break;
3851 case 10: // cansave
3852 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3853 offsetof(CPUSPARCState,
3854 cansave));
3855 break;
3856 case 11: // canrestore
3857 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3858 offsetof(CPUSPARCState,
3859 canrestore));
3860 break;
3861 case 12: // cleanwin
3862 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3863 offsetof(CPUSPARCState,
3864 cleanwin));
3865 break;
3866 case 13: // otherwin
3867 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3868 offsetof(CPUSPARCState,
3869 otherwin));
3870 break;
3871 case 14: // wstate
3872 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3873 offsetof(CPUSPARCState,
3874 wstate));
3875 break;
3876 case 16: // UA2005 gl
3877 CHECK_IU_FEATURE(dc, GL);
3878 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3879 offsetof(CPUSPARCState, gl));
3880 break;
3881 case 26: // UA2005 strand status
3882 CHECK_IU_FEATURE(dc, HYPV);
3883 if (!hypervisor(dc))
3884 goto priv_insn;
3885 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3886 break;
3887 default:
3888 goto illegal_insn;
3890 #else
3891 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
3892 if (dc->def->nwindows != 32) {
3893 tcg_gen_andi_tl(cpu_wim, cpu_wim,
3894 (1 << dc->def->nwindows) - 1);
3896 #endif
3898 break;
3899 case 0x33: /* wrtbr, UA2005 wrhpr */
3901 #ifndef TARGET_SPARC64
3902 if (!supervisor(dc))
3903 goto priv_insn;
3904 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3905 #else
3906 CHECK_IU_FEATURE(dc, HYPV);
3907 if (!hypervisor(dc))
3908 goto priv_insn;
3909 cpu_tmp0 = get_temp_tl(dc);
3910 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3911 switch (rd) {
3912 case 0: // hpstate
3913 // XXX gen_op_wrhpstate();
3914 save_state(dc);
3915 gen_op_next_insn();
3916 tcg_gen_exit_tb(0);
3917 dc->is_br = 1;
3918 break;
3919 case 1: // htstate
3920 // XXX gen_op_wrhtstate();
3921 break;
3922 case 3: // hintp
3923 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3924 break;
3925 case 5: // htba
3926 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3927 break;
3928 case 31: // hstick_cmpr
3930 TCGv_ptr r_tickptr;
3932 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3933 r_tickptr = tcg_temp_new_ptr();
3934 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3935 offsetof(CPUSPARCState, hstick));
3936 gen_helper_tick_set_limit(r_tickptr,
3937 cpu_hstick_cmpr);
3938 tcg_temp_free_ptr(r_tickptr);
3940 break;
3941 case 6: // hver readonly
3942 default:
3943 goto illegal_insn;
3945 #endif
3947 break;
3948 #endif
3949 #ifdef TARGET_SPARC64
3950 case 0x2c: /* V9 movcc */
3952 int cc = GET_FIELD_SP(insn, 11, 12);
3953 int cond = GET_FIELD_SP(insn, 14, 17);
3954 DisasCompare cmp;
3955 TCGv dst;
3957 if (insn & (1 << 18)) {
3958 if (cc == 0) {
3959 gen_compare(&cmp, 0, cond, dc);
3960 } else if (cc == 2) {
3961 gen_compare(&cmp, 1, cond, dc);
3962 } else {
3963 goto illegal_insn;
3965 } else {
3966 gen_fcompare(&cmp, cc, cond);
3969 /* The get_src2 above loaded the normal 13-bit
3970 immediate field, not the 11-bit field we have
3971 in movcc. But it did handle the reg case. */
3972 if (IS_IMM) {
3973 simm = GET_FIELD_SPs(insn, 0, 10);
3974 tcg_gen_movi_tl(cpu_src2, simm);
3977 dst = gen_load_gpr(dc, rd);
3978 tcg_gen_movcond_tl(cmp.cond, dst,
3979 cmp.c1, cmp.c2,
3980 cpu_src2, dst);
3981 free_compare(&cmp);
3982 gen_store_gpr(dc, rd, dst);
3983 break;
3985 case 0x2d: /* V9 sdivx */
3986 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3987 gen_store_gpr(dc, rd, cpu_dst);
3988 break;
3989 case 0x2e: /* V9 popc */
3990 gen_helper_popc(cpu_dst, cpu_src2);
3991 gen_store_gpr(dc, rd, cpu_dst);
3992 break;
3993 case 0x2f: /* V9 movr */
3995 int cond = GET_FIELD_SP(insn, 10, 12);
3996 DisasCompare cmp;
3997 TCGv dst;
3999 gen_compare_reg(&cmp, cond, cpu_src1);
4001 /* The get_src2 above loaded the normal 13-bit
4002 immediate field, not the 10-bit field we have
4003 in movr. But it did handle the reg case. */
4004 if (IS_IMM) {
4005 simm = GET_FIELD_SPs(insn, 0, 9);
4006 tcg_gen_movi_tl(cpu_src2, simm);
4009 dst = gen_load_gpr(dc, rd);
4010 tcg_gen_movcond_tl(cmp.cond, dst,
4011 cmp.c1, cmp.c2,
4012 cpu_src2, dst);
4013 free_compare(&cmp);
4014 gen_store_gpr(dc, rd, dst);
4015 break;
4017 #endif
4018 default:
4019 goto illegal_insn;
4022 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4023 #ifdef TARGET_SPARC64
4024 int opf = GET_FIELD_SP(insn, 5, 13);
4025 rs1 = GET_FIELD(insn, 13, 17);
4026 rs2 = GET_FIELD(insn, 27, 31);
4027 if (gen_trap_ifnofpu(dc)) {
4028 goto jmp_insn;
4031 switch (opf) {
4032 case 0x000: /* VIS I edge8cc */
4033 CHECK_FPU_FEATURE(dc, VIS1);
4034 cpu_src1 = gen_load_gpr(dc, rs1);
4035 cpu_src2 = gen_load_gpr(dc, rs2);
4036 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4037 gen_store_gpr(dc, rd, cpu_dst);
4038 break;
4039 case 0x001: /* VIS II edge8n */
4040 CHECK_FPU_FEATURE(dc, VIS2);
4041 cpu_src1 = gen_load_gpr(dc, rs1);
4042 cpu_src2 = gen_load_gpr(dc, rs2);
4043 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4044 gen_store_gpr(dc, rd, cpu_dst);
4045 break;
4046 case 0x002: /* VIS I edge8lcc */
4047 CHECK_FPU_FEATURE(dc, VIS1);
4048 cpu_src1 = gen_load_gpr(dc, rs1);
4049 cpu_src2 = gen_load_gpr(dc, rs2);
4050 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4051 gen_store_gpr(dc, rd, cpu_dst);
4052 break;
4053 case 0x003: /* VIS II edge8ln */
4054 CHECK_FPU_FEATURE(dc, VIS2);
4055 cpu_src1 = gen_load_gpr(dc, rs1);
4056 cpu_src2 = gen_load_gpr(dc, rs2);
4057 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4058 gen_store_gpr(dc, rd, cpu_dst);
4059 break;
4060 case 0x004: /* VIS I edge16cc */
4061 CHECK_FPU_FEATURE(dc, VIS1);
4062 cpu_src1 = gen_load_gpr(dc, rs1);
4063 cpu_src2 = gen_load_gpr(dc, rs2);
4064 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4065 gen_store_gpr(dc, rd, cpu_dst);
4066 break;
4067 case 0x005: /* VIS II edge16n */
4068 CHECK_FPU_FEATURE(dc, VIS2);
4069 cpu_src1 = gen_load_gpr(dc, rs1);
4070 cpu_src2 = gen_load_gpr(dc, rs2);
4071 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4072 gen_store_gpr(dc, rd, cpu_dst);
4073 break;
4074 case 0x006: /* VIS I edge16lcc */
4075 CHECK_FPU_FEATURE(dc, VIS1);
4076 cpu_src1 = gen_load_gpr(dc, rs1);
4077 cpu_src2 = gen_load_gpr(dc, rs2);
4078 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4079 gen_store_gpr(dc, rd, cpu_dst);
4080 break;
4081 case 0x007: /* VIS II edge16ln */
4082 CHECK_FPU_FEATURE(dc, VIS2);
4083 cpu_src1 = gen_load_gpr(dc, rs1);
4084 cpu_src2 = gen_load_gpr(dc, rs2);
4085 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4086 gen_store_gpr(dc, rd, cpu_dst);
4087 break;
4088 case 0x008: /* VIS I edge32cc */
4089 CHECK_FPU_FEATURE(dc, VIS1);
4090 cpu_src1 = gen_load_gpr(dc, rs1);
4091 cpu_src2 = gen_load_gpr(dc, rs2);
4092 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4093 gen_store_gpr(dc, rd, cpu_dst);
4094 break;
4095 case 0x009: /* VIS II edge32n */
4096 CHECK_FPU_FEATURE(dc, VIS2);
4097 cpu_src1 = gen_load_gpr(dc, rs1);
4098 cpu_src2 = gen_load_gpr(dc, rs2);
4099 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4100 gen_store_gpr(dc, rd, cpu_dst);
4101 break;
4102 case 0x00a: /* VIS I edge32lcc */
4103 CHECK_FPU_FEATURE(dc, VIS1);
4104 cpu_src1 = gen_load_gpr(dc, rs1);
4105 cpu_src2 = gen_load_gpr(dc, rs2);
4106 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4107 gen_store_gpr(dc, rd, cpu_dst);
4108 break;
4109 case 0x00b: /* VIS II edge32ln */
4110 CHECK_FPU_FEATURE(dc, VIS2);
4111 cpu_src1 = gen_load_gpr(dc, rs1);
4112 cpu_src2 = gen_load_gpr(dc, rs2);
4113 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4114 gen_store_gpr(dc, rd, cpu_dst);
4115 break;
4116 case 0x010: /* VIS I array8 */
4117 CHECK_FPU_FEATURE(dc, VIS1);
4118 cpu_src1 = gen_load_gpr(dc, rs1);
4119 cpu_src2 = gen_load_gpr(dc, rs2);
4120 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4121 gen_store_gpr(dc, rd, cpu_dst);
4122 break;
4123 case 0x012: /* VIS I array16 */
4124 CHECK_FPU_FEATURE(dc, VIS1);
4125 cpu_src1 = gen_load_gpr(dc, rs1);
4126 cpu_src2 = gen_load_gpr(dc, rs2);
4127 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4128 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4129 gen_store_gpr(dc, rd, cpu_dst);
4130 break;
4131 case 0x014: /* VIS I array32 */
4132 CHECK_FPU_FEATURE(dc, VIS1);
4133 cpu_src1 = gen_load_gpr(dc, rs1);
4134 cpu_src2 = gen_load_gpr(dc, rs2);
4135 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4136 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4137 gen_store_gpr(dc, rd, cpu_dst);
4138 break;
4139 case 0x018: /* VIS I alignaddr */
4140 CHECK_FPU_FEATURE(dc, VIS1);
4141 cpu_src1 = gen_load_gpr(dc, rs1);
4142 cpu_src2 = gen_load_gpr(dc, rs2);
4143 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4144 gen_store_gpr(dc, rd, cpu_dst);
4145 break;
4146 case 0x01a: /* VIS I alignaddrl */
4147 CHECK_FPU_FEATURE(dc, VIS1);
4148 cpu_src1 = gen_load_gpr(dc, rs1);
4149 cpu_src2 = gen_load_gpr(dc, rs2);
4150 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4151 gen_store_gpr(dc, rd, cpu_dst);
4152 break;
4153 case 0x019: /* VIS II bmask */
4154 CHECK_FPU_FEATURE(dc, VIS2);
4155 cpu_src1 = gen_load_gpr(dc, rs1);
4156 cpu_src2 = gen_load_gpr(dc, rs2);
4157 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4158 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4159 gen_store_gpr(dc, rd, cpu_dst);
4160 break;
4161 case 0x020: /* VIS I fcmple16 */
4162 CHECK_FPU_FEATURE(dc, VIS1);
4163 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4164 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4165 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4166 gen_store_gpr(dc, rd, cpu_dst);
4167 break;
4168 case 0x022: /* VIS I fcmpne16 */
4169 CHECK_FPU_FEATURE(dc, VIS1);
4170 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4171 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4172 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4173 gen_store_gpr(dc, rd, cpu_dst);
4174 break;
4175 case 0x024: /* VIS I fcmple32 */
4176 CHECK_FPU_FEATURE(dc, VIS1);
4177 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4178 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4179 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4180 gen_store_gpr(dc, rd, cpu_dst);
4181 break;
4182 case 0x026: /* VIS I fcmpne32 */
4183 CHECK_FPU_FEATURE(dc, VIS1);
4184 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4185 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4186 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4187 gen_store_gpr(dc, rd, cpu_dst);
4188 break;
4189 case 0x028: /* VIS I fcmpgt16 */
4190 CHECK_FPU_FEATURE(dc, VIS1);
4191 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4192 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4193 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4194 gen_store_gpr(dc, rd, cpu_dst);
4195 break;
4196 case 0x02a: /* VIS I fcmpeq16 */
4197 CHECK_FPU_FEATURE(dc, VIS1);
4198 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4199 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4200 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4201 gen_store_gpr(dc, rd, cpu_dst);
4202 break;
4203 case 0x02c: /* VIS I fcmpgt32 */
4204 CHECK_FPU_FEATURE(dc, VIS1);
4205 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4206 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4207 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4208 gen_store_gpr(dc, rd, cpu_dst);
4209 break;
4210 case 0x02e: /* VIS I fcmpeq32 */
4211 CHECK_FPU_FEATURE(dc, VIS1);
4212 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4213 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4214 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4215 gen_store_gpr(dc, rd, cpu_dst);
4216 break;
4217 case 0x031: /* VIS I fmul8x16 */
4218 CHECK_FPU_FEATURE(dc, VIS1);
4219 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4220 break;
4221 case 0x033: /* VIS I fmul8x16au */
4222 CHECK_FPU_FEATURE(dc, VIS1);
4223 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4224 break;
4225 case 0x035: /* VIS I fmul8x16al */
4226 CHECK_FPU_FEATURE(dc, VIS1);
4227 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4228 break;
4229 case 0x036: /* VIS I fmul8sux16 */
4230 CHECK_FPU_FEATURE(dc, VIS1);
4231 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4232 break;
4233 case 0x037: /* VIS I fmul8ulx16 */
4234 CHECK_FPU_FEATURE(dc, VIS1);
4235 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4236 break;
4237 case 0x038: /* VIS I fmuld8sux16 */
4238 CHECK_FPU_FEATURE(dc, VIS1);
4239 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4240 break;
4241 case 0x039: /* VIS I fmuld8ulx16 */
4242 CHECK_FPU_FEATURE(dc, VIS1);
4243 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4244 break;
4245 case 0x03a: /* VIS I fpack32 */
4246 CHECK_FPU_FEATURE(dc, VIS1);
4247 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4248 break;
4249 case 0x03b: /* VIS I fpack16 */
4250 CHECK_FPU_FEATURE(dc, VIS1);
4251 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4252 cpu_dst_32 = gen_dest_fpr_F(dc);
4253 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4254 gen_store_fpr_F(dc, rd, cpu_dst_32);
4255 break;
4256 case 0x03d: /* VIS I fpackfix */
4257 CHECK_FPU_FEATURE(dc, VIS1);
4258 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4259 cpu_dst_32 = gen_dest_fpr_F(dc);
4260 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4261 gen_store_fpr_F(dc, rd, cpu_dst_32);
4262 break;
4263 case 0x03e: /* VIS I pdist */
4264 CHECK_FPU_FEATURE(dc, VIS1);
4265 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4266 break;
4267 case 0x048: /* VIS I faligndata */
4268 CHECK_FPU_FEATURE(dc, VIS1);
4269 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4270 break;
4271 case 0x04b: /* VIS I fpmerge */
4272 CHECK_FPU_FEATURE(dc, VIS1);
4273 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4274 break;
4275 case 0x04c: /* VIS II bshuffle */
4276 CHECK_FPU_FEATURE(dc, VIS2);
4277 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4278 break;
4279 case 0x04d: /* VIS I fexpand */
4280 CHECK_FPU_FEATURE(dc, VIS1);
4281 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4282 break;
4283 case 0x050: /* VIS I fpadd16 */
4284 CHECK_FPU_FEATURE(dc, VIS1);
4285 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4286 break;
4287 case 0x051: /* VIS I fpadd16s */
4288 CHECK_FPU_FEATURE(dc, VIS1);
4289 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4290 break;
4291 case 0x052: /* VIS I fpadd32 */
4292 CHECK_FPU_FEATURE(dc, VIS1);
4293 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4294 break;
4295 case 0x053: /* VIS I fpadd32s */
4296 CHECK_FPU_FEATURE(dc, VIS1);
4297 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4298 break;
4299 case 0x054: /* VIS I fpsub16 */
4300 CHECK_FPU_FEATURE(dc, VIS1);
4301 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4302 break;
4303 case 0x055: /* VIS I fpsub16s */
4304 CHECK_FPU_FEATURE(dc, VIS1);
4305 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4306 break;
4307 case 0x056: /* VIS I fpsub32 */
4308 CHECK_FPU_FEATURE(dc, VIS1);
4309 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4310 break;
4311 case 0x057: /* VIS I fpsub32s */
4312 CHECK_FPU_FEATURE(dc, VIS1);
4313 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4314 break;
4315 case 0x060: /* VIS I fzero */
4316 CHECK_FPU_FEATURE(dc, VIS1);
4317 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4318 tcg_gen_movi_i64(cpu_dst_64, 0);
4319 gen_store_fpr_D(dc, rd, cpu_dst_64);
4320 break;
4321 case 0x061: /* VIS I fzeros */
4322 CHECK_FPU_FEATURE(dc, VIS1);
4323 cpu_dst_32 = gen_dest_fpr_F(dc);
4324 tcg_gen_movi_i32(cpu_dst_32, 0);
4325 gen_store_fpr_F(dc, rd, cpu_dst_32);
4326 break;
4327 case 0x062: /* VIS I fnor */
4328 CHECK_FPU_FEATURE(dc, VIS1);
4329 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4330 break;
4331 case 0x063: /* VIS I fnors */
4332 CHECK_FPU_FEATURE(dc, VIS1);
4333 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4334 break;
4335 case 0x064: /* VIS I fandnot2 */
4336 CHECK_FPU_FEATURE(dc, VIS1);
4337 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4338 break;
4339 case 0x065: /* VIS I fandnot2s */
4340 CHECK_FPU_FEATURE(dc, VIS1);
4341 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4342 break;
4343 case 0x066: /* VIS I fnot2 */
4344 CHECK_FPU_FEATURE(dc, VIS1);
4345 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4346 break;
4347 case 0x067: /* VIS I fnot2s */
4348 CHECK_FPU_FEATURE(dc, VIS1);
4349 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4350 break;
4351 case 0x068: /* VIS I fandnot1 */
4352 CHECK_FPU_FEATURE(dc, VIS1);
4353 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4354 break;
4355 case 0x069: /* VIS I fandnot1s */
4356 CHECK_FPU_FEATURE(dc, VIS1);
4357 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4358 break;
4359 case 0x06a: /* VIS I fnot1 */
4360 CHECK_FPU_FEATURE(dc, VIS1);
4361 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4362 break;
4363 case 0x06b: /* VIS I fnot1s */
4364 CHECK_FPU_FEATURE(dc, VIS1);
4365 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4366 break;
4367 case 0x06c: /* VIS I fxor */
4368 CHECK_FPU_FEATURE(dc, VIS1);
4369 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4370 break;
4371 case 0x06d: /* VIS I fxors */
4372 CHECK_FPU_FEATURE(dc, VIS1);
4373 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4374 break;
4375 case 0x06e: /* VIS I fnand */
4376 CHECK_FPU_FEATURE(dc, VIS1);
4377 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4378 break;
4379 case 0x06f: /* VIS I fnands */
4380 CHECK_FPU_FEATURE(dc, VIS1);
4381 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4382 break;
4383 case 0x070: /* VIS I fand */
4384 CHECK_FPU_FEATURE(dc, VIS1);
4385 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4386 break;
4387 case 0x071: /* VIS I fands */
4388 CHECK_FPU_FEATURE(dc, VIS1);
4389 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4390 break;
4391 case 0x072: /* VIS I fxnor */
4392 CHECK_FPU_FEATURE(dc, VIS1);
4393 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4394 break;
4395 case 0x073: /* VIS I fxnors */
4396 CHECK_FPU_FEATURE(dc, VIS1);
4397 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4398 break;
4399 case 0x074: /* VIS I fsrc1 */
4400 CHECK_FPU_FEATURE(dc, VIS1);
4401 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4402 gen_store_fpr_D(dc, rd, cpu_src1_64);
4403 break;
4404 case 0x075: /* VIS I fsrc1s */
4405 CHECK_FPU_FEATURE(dc, VIS1);
4406 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4407 gen_store_fpr_F(dc, rd, cpu_src1_32);
4408 break;
4409 case 0x076: /* VIS I fornot2 */
4410 CHECK_FPU_FEATURE(dc, VIS1);
4411 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4412 break;
4413 case 0x077: /* VIS I fornot2s */
4414 CHECK_FPU_FEATURE(dc, VIS1);
4415 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4416 break;
4417 case 0x078: /* VIS I fsrc2 */
4418 CHECK_FPU_FEATURE(dc, VIS1);
4419 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4420 gen_store_fpr_D(dc, rd, cpu_src1_64);
4421 break;
4422 case 0x079: /* VIS I fsrc2s */
4423 CHECK_FPU_FEATURE(dc, VIS1);
4424 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4425 gen_store_fpr_F(dc, rd, cpu_src1_32);
4426 break;
4427 case 0x07a: /* VIS I fornot1 */
4428 CHECK_FPU_FEATURE(dc, VIS1);
4429 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4430 break;
4431 case 0x07b: /* VIS I fornot1s */
4432 CHECK_FPU_FEATURE(dc, VIS1);
4433 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4434 break;
4435 case 0x07c: /* VIS I for */
4436 CHECK_FPU_FEATURE(dc, VIS1);
4437 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4438 break;
4439 case 0x07d: /* VIS I fors */
4440 CHECK_FPU_FEATURE(dc, VIS1);
4441 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4442 break;
4443 case 0x07e: /* VIS I fone */
4444 CHECK_FPU_FEATURE(dc, VIS1);
4445 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4446 tcg_gen_movi_i64(cpu_dst_64, -1);
4447 gen_store_fpr_D(dc, rd, cpu_dst_64);
4448 break;
4449 case 0x07f: /* VIS I fones */
4450 CHECK_FPU_FEATURE(dc, VIS1);
4451 cpu_dst_32 = gen_dest_fpr_F(dc);
4452 tcg_gen_movi_i32(cpu_dst_32, -1);
4453 gen_store_fpr_F(dc, rd, cpu_dst_32);
4454 break;
4455 case 0x080: /* VIS I shutdown */
4456 case 0x081: /* VIS II siam */
4457 // XXX
4458 goto illegal_insn;
4459 default:
4460 goto illegal_insn;
4462 #else
4463 goto ncp_insn;
4464 #endif
4465 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4466 #ifdef TARGET_SPARC64
4467 goto illegal_insn;
4468 #else
4469 goto ncp_insn;
4470 #endif
4471 #ifdef TARGET_SPARC64
4472 } else if (xop == 0x39) { /* V9 return */
4473 TCGv_i32 r_const;
4475 save_state(dc);
4476 cpu_src1 = get_src1(dc, insn);
4477 cpu_tmp0 = get_temp_tl(dc);
4478 if (IS_IMM) { /* immediate */
4479 simm = GET_FIELDs(insn, 19, 31);
4480 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4481 } else { /* register */
4482 rs2 = GET_FIELD(insn, 27, 31);
4483 if (rs2) {
4484 cpu_src2 = gen_load_gpr(dc, rs2);
4485 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4486 } else {
4487 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4490 gen_helper_restore(cpu_env);
4491 gen_mov_pc_npc(dc);
4492 r_const = tcg_const_i32(3);
4493 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4494 tcg_temp_free_i32(r_const);
4495 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4496 dc->npc = DYNAMIC_PC;
4497 goto jmp_insn;
4498 #endif
4499 } else {
4500 cpu_src1 = get_src1(dc, insn);
4501 cpu_tmp0 = get_temp_tl(dc);
4502 if (IS_IMM) { /* immediate */
4503 simm = GET_FIELDs(insn, 19, 31);
4504 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4505 } else { /* register */
4506 rs2 = GET_FIELD(insn, 27, 31);
4507 if (rs2) {
4508 cpu_src2 = gen_load_gpr(dc, rs2);
4509 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4510 } else {
4511 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4514 switch (xop) {
4515 case 0x38: /* jmpl */
4517 TCGv t;
4518 TCGv_i32 r_const;
4520 t = gen_dest_gpr(dc, rd);
4521 tcg_gen_movi_tl(t, dc->pc);
4522 gen_store_gpr(dc, rd, t);
4523 gen_mov_pc_npc(dc);
4524 r_const = tcg_const_i32(3);
4525 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4526 tcg_temp_free_i32(r_const);
4527 gen_address_mask(dc, cpu_tmp0);
4528 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4529 dc->npc = DYNAMIC_PC;
4531 goto jmp_insn;
4532 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4533 case 0x39: /* rett, V9 return */
4535 TCGv_i32 r_const;
4537 if (!supervisor(dc))
4538 goto priv_insn;
4539 gen_mov_pc_npc(dc);
4540 r_const = tcg_const_i32(3);
4541 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4542 tcg_temp_free_i32(r_const);
4543 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4544 dc->npc = DYNAMIC_PC;
4545 gen_helper_rett(cpu_env);
4547 goto jmp_insn;
4548 #endif
4549 case 0x3b: /* flush */
4550 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4551 goto unimp_flush;
4552 /* nop */
4553 break;
4554 case 0x3c: /* save */
4555 save_state(dc);
4556 gen_helper_save(cpu_env);
4557 gen_store_gpr(dc, rd, cpu_tmp0);
4558 break;
4559 case 0x3d: /* restore */
4560 save_state(dc);
4561 gen_helper_restore(cpu_env);
4562 gen_store_gpr(dc, rd, cpu_tmp0);
4563 break;
4564 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4565 case 0x3e: /* V9 done/retry */
4567 switch (rd) {
4568 case 0:
4569 if (!supervisor(dc))
4570 goto priv_insn;
4571 dc->npc = DYNAMIC_PC;
4572 dc->pc = DYNAMIC_PC;
4573 gen_helper_done(cpu_env);
4574 goto jmp_insn;
4575 case 1:
4576 if (!supervisor(dc))
4577 goto priv_insn;
4578 dc->npc = DYNAMIC_PC;
4579 dc->pc = DYNAMIC_PC;
4580 gen_helper_retry(cpu_env);
4581 goto jmp_insn;
4582 default:
4583 goto illegal_insn;
4586 break;
4587 #endif
4588 default:
4589 goto illegal_insn;
4592 break;
4594 break;
4595 case 3: /* load/store instructions */
4597 unsigned int xop = GET_FIELD(insn, 7, 12);
4598 /* ??? gen_address_mask prevents us from using a source
4599 register directly. Always generate a temporary. */
4600 TCGv cpu_addr = get_temp_tl(dc);
4602 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
4603 if (xop == 0x3c || xop == 0x3e) {
4604 /* V9 casa/casxa : no offset */
4605 } else if (IS_IMM) { /* immediate */
4606 simm = GET_FIELDs(insn, 19, 31);
4607 if (simm != 0) {
4608 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
4610 } else { /* register */
4611 rs2 = GET_FIELD(insn, 27, 31);
4612 if (rs2 != 0) {
4613 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4616 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4617 (xop > 0x17 && xop <= 0x1d ) ||
4618 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4619 TCGv cpu_val = gen_dest_gpr(dc, rd);
4621 switch (xop) {
4622 case 0x0: /* ld, V9 lduw, load unsigned word */
4623 gen_address_mask(dc, cpu_addr);
4624 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4625 break;
4626 case 0x1: /* ldub, load unsigned byte */
4627 gen_address_mask(dc, cpu_addr);
4628 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4629 break;
4630 case 0x2: /* lduh, load unsigned halfword */
4631 gen_address_mask(dc, cpu_addr);
4632 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4633 break;
4634 case 0x3: /* ldd, load double word */
4635 if (rd & 1)
4636 goto illegal_insn;
4637 else {
4638 TCGv_i32 r_const;
4639 TCGv_i64 t64;
4641 save_state(dc);
4642 r_const = tcg_const_i32(7);
4643 /* XXX remove alignment check */
4644 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4645 tcg_temp_free_i32(r_const);
4646 gen_address_mask(dc, cpu_addr);
4647 t64 = tcg_temp_new_i64();
4648 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4649 tcg_gen_trunc_i64_tl(cpu_val, t64);
4650 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4651 gen_store_gpr(dc, rd + 1, cpu_val);
4652 tcg_gen_shri_i64(t64, t64, 32);
4653 tcg_gen_trunc_i64_tl(cpu_val, t64);
4654 tcg_temp_free_i64(t64);
4655 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4657 break;
4658 case 0x9: /* ldsb, load signed byte */
4659 gen_address_mask(dc, cpu_addr);
4660 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4661 break;
4662 case 0xa: /* ldsh, load signed halfword */
4663 gen_address_mask(dc, cpu_addr);
4664 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4665 break;
4666 case 0xd: /* ldstub -- XXX: should be atomically */
4668 TCGv r_const;
4669 TCGv tmp = tcg_temp_new();
4671 gen_address_mask(dc, cpu_addr);
4672 tcg_gen_qemu_ld8u(tmp, cpu_addr, dc->mem_idx);
4673 r_const = tcg_const_tl(0xff);
4674 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4675 tcg_gen_mov_tl(cpu_val, tmp);
4676 tcg_temp_free(r_const);
4677 tcg_temp_free(tmp);
4679 break;
4680 case 0x0f:
4681 /* swap, swap register with memory. Also atomically */
4683 TCGv t0 = get_temp_tl(dc);
4684 CHECK_IU_FEATURE(dc, SWAP);
4685 cpu_src1 = gen_load_gpr(dc, rd);
4686 gen_address_mask(dc, cpu_addr);
4687 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4688 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4689 tcg_gen_mov_tl(cpu_val, t0);
4691 break;
4692 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4693 case 0x10: /* lda, V9 lduwa, load word alternate */
4694 #ifndef TARGET_SPARC64
4695 if (IS_IMM)
4696 goto illegal_insn;
4697 if (!supervisor(dc))
4698 goto priv_insn;
4699 #endif
4700 save_state(dc);
4701 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 4, 0);
4702 break;
4703 case 0x11: /* lduba, load unsigned byte alternate */
4704 #ifndef TARGET_SPARC64
4705 if (IS_IMM)
4706 goto illegal_insn;
4707 if (!supervisor(dc))
4708 goto priv_insn;
4709 #endif
4710 save_state(dc);
4711 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 1, 0);
4712 break;
4713 case 0x12: /* lduha, load unsigned halfword alternate */
4714 #ifndef TARGET_SPARC64
4715 if (IS_IMM)
4716 goto illegal_insn;
4717 if (!supervisor(dc))
4718 goto priv_insn;
4719 #endif
4720 save_state(dc);
4721 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 2, 0);
4722 break;
4723 case 0x13: /* ldda, load double word alternate */
4724 #ifndef TARGET_SPARC64
4725 if (IS_IMM)
4726 goto illegal_insn;
4727 if (!supervisor(dc))
4728 goto priv_insn;
4729 #endif
4730 if (rd & 1)
4731 goto illegal_insn;
4732 save_state(dc);
4733 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4734 goto skip_move;
4735 case 0x19: /* ldsba, load signed byte alternate */
4736 #ifndef TARGET_SPARC64
4737 if (IS_IMM)
4738 goto illegal_insn;
4739 if (!supervisor(dc))
4740 goto priv_insn;
4741 #endif
4742 save_state(dc);
4743 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 1, 1);
4744 break;
4745 case 0x1a: /* ldsha, load signed halfword alternate */
4746 #ifndef TARGET_SPARC64
4747 if (IS_IMM)
4748 goto illegal_insn;
4749 if (!supervisor(dc))
4750 goto priv_insn;
4751 #endif
4752 save_state(dc);
4753 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 2, 1);
4754 break;
4755 case 0x1d: /* ldstuba -- XXX: should be atomically */
4756 #ifndef TARGET_SPARC64
4757 if (IS_IMM)
4758 goto illegal_insn;
4759 if (!supervisor(dc))
4760 goto priv_insn;
4761 #endif
4762 save_state(dc);
4763 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
4764 break;
4765 case 0x1f: /* swapa, swap reg with alt. memory. Also
4766 atomically */
4767 CHECK_IU_FEATURE(dc, SWAP);
4768 #ifndef TARGET_SPARC64
4769 if (IS_IMM)
4770 goto illegal_insn;
4771 if (!supervisor(dc))
4772 goto priv_insn;
4773 #endif
4774 save_state(dc);
4775 cpu_src1 = gen_load_gpr(dc, rd);
4776 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
4777 break;
4779 #ifndef TARGET_SPARC64
4780 case 0x30: /* ldc */
4781 case 0x31: /* ldcsr */
4782 case 0x33: /* lddc */
4783 goto ncp_insn;
4784 #endif
4785 #endif
4786 #ifdef TARGET_SPARC64
4787 case 0x08: /* V9 ldsw */
4788 gen_address_mask(dc, cpu_addr);
4789 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4790 break;
4791 case 0x0b: /* V9 ldx */
4792 gen_address_mask(dc, cpu_addr);
4793 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4794 break;
4795 case 0x18: /* V9 ldswa */
4796 save_state(dc);
4797 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 4, 1);
4798 break;
4799 case 0x1b: /* V9 ldxa */
4800 save_state(dc);
4801 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 8, 0);
4802 break;
4803 case 0x2d: /* V9 prefetch, no effect */
4804 goto skip_move;
4805 case 0x30: /* V9 ldfa */
4806 if (gen_trap_ifnofpu(dc)) {
4807 goto jmp_insn;
4809 save_state(dc);
4810 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
4811 gen_update_fprs_dirty(rd);
4812 goto skip_move;
4813 case 0x33: /* V9 lddfa */
4814 if (gen_trap_ifnofpu(dc)) {
4815 goto jmp_insn;
4817 save_state(dc);
4818 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
4819 gen_update_fprs_dirty(DFPREG(rd));
4820 goto skip_move;
4821 case 0x3d: /* V9 prefetcha, no effect */
4822 goto skip_move;
4823 case 0x32: /* V9 ldqfa */
4824 CHECK_FPU_FEATURE(dc, FLOAT128);
4825 if (gen_trap_ifnofpu(dc)) {
4826 goto jmp_insn;
4828 save_state(dc);
4829 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
4830 gen_update_fprs_dirty(QFPREG(rd));
4831 goto skip_move;
4832 #endif
4833 default:
4834 goto illegal_insn;
4836 gen_store_gpr(dc, rd, cpu_val);
4837 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4838 skip_move: ;
4839 #endif
4840 } else if (xop >= 0x20 && xop < 0x24) {
4841 TCGv t0;
4843 if (gen_trap_ifnofpu(dc)) {
4844 goto jmp_insn;
4846 save_state(dc);
4847 switch (xop) {
4848 case 0x20: /* ldf, load fpreg */
4849 gen_address_mask(dc, cpu_addr);
4850 t0 = get_temp_tl(dc);
4851 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4852 cpu_dst_32 = gen_dest_fpr_F(dc);
4853 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4854 gen_store_fpr_F(dc, rd, cpu_dst_32);
4855 break;
4856 case 0x21: /* ldfsr, V9 ldxfsr */
4857 #ifdef TARGET_SPARC64
4858 gen_address_mask(dc, cpu_addr);
4859 if (rd == 1) {
4860 TCGv_i64 t64 = tcg_temp_new_i64();
4861 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4862 gen_helper_ldxfsr(cpu_env, t64);
4863 tcg_temp_free_i64(t64);
4864 break;
4866 #endif
4867 cpu_dst_32 = get_temp_i32(dc);
4868 t0 = get_temp_tl(dc);
4869 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4870 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4871 gen_helper_ldfsr(cpu_env, cpu_dst_32);
4872 break;
4873 case 0x22: /* ldqf, load quad fpreg */
4875 TCGv_i32 r_const;
4877 CHECK_FPU_FEATURE(dc, FLOAT128);
4878 r_const = tcg_const_i32(dc->mem_idx);
4879 gen_address_mask(dc, cpu_addr);
4880 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4881 tcg_temp_free_i32(r_const);
4882 gen_op_store_QT0_fpr(QFPREG(rd));
4883 gen_update_fprs_dirty(QFPREG(rd));
4885 break;
4886 case 0x23: /* lddf, load double fpreg */
4887 gen_address_mask(dc, cpu_addr);
4888 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4889 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4890 gen_store_fpr_D(dc, rd, cpu_dst_64);
4891 break;
4892 default:
4893 goto illegal_insn;
4895 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4896 xop == 0xe || xop == 0x1e) {
4897 TCGv cpu_val = gen_load_gpr(dc, rd);
4899 switch (xop) {
4900 case 0x4: /* st, store word */
4901 gen_address_mask(dc, cpu_addr);
4902 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4903 break;
4904 case 0x5: /* stb, store byte */
4905 gen_address_mask(dc, cpu_addr);
4906 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4907 break;
4908 case 0x6: /* sth, store halfword */
4909 gen_address_mask(dc, cpu_addr);
4910 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4911 break;
4912 case 0x7: /* std, store double word */
4913 if (rd & 1)
4914 goto illegal_insn;
4915 else {
4916 TCGv_i32 r_const;
4917 TCGv_i64 t64;
4918 TCGv lo;
4920 save_state(dc);
4921 gen_address_mask(dc, cpu_addr);
4922 r_const = tcg_const_i32(7);
4923 /* XXX remove alignment check */
4924 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4925 tcg_temp_free_i32(r_const);
4926 lo = gen_load_gpr(dc, rd + 1);
4928 t64 = tcg_temp_new_i64();
4929 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
4930 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
4931 tcg_temp_free_i64(t64);
4933 break;
4934 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4935 case 0x14: /* sta, V9 stwa, store word alternate */
4936 #ifndef TARGET_SPARC64
4937 if (IS_IMM)
4938 goto illegal_insn;
4939 if (!supervisor(dc))
4940 goto priv_insn;
4941 #endif
4942 save_state(dc);
4943 gen_st_asi(dc, cpu_val, cpu_addr, insn, 4);
4944 dc->npc = DYNAMIC_PC;
4945 break;
4946 case 0x15: /* stba, store byte alternate */
4947 #ifndef TARGET_SPARC64
4948 if (IS_IMM)
4949 goto illegal_insn;
4950 if (!supervisor(dc))
4951 goto priv_insn;
4952 #endif
4953 save_state(dc);
4954 gen_st_asi(dc, cpu_val, cpu_addr, insn, 1);
4955 dc->npc = DYNAMIC_PC;
4956 break;
4957 case 0x16: /* stha, store halfword alternate */
4958 #ifndef TARGET_SPARC64
4959 if (IS_IMM)
4960 goto illegal_insn;
4961 if (!supervisor(dc))
4962 goto priv_insn;
4963 #endif
4964 save_state(dc);
4965 gen_st_asi(dc, cpu_val, cpu_addr, insn, 2);
4966 dc->npc = DYNAMIC_PC;
4967 break;
4968 case 0x17: /* stda, store double word alternate */
4969 #ifndef TARGET_SPARC64
4970 if (IS_IMM)
4971 goto illegal_insn;
4972 if (!supervisor(dc))
4973 goto priv_insn;
4974 #endif
4975 if (rd & 1)
4976 goto illegal_insn;
4977 else {
4978 save_state(dc);
4979 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4981 break;
4982 #endif
4983 #ifdef TARGET_SPARC64
4984 case 0x0e: /* V9 stx */
4985 gen_address_mask(dc, cpu_addr);
4986 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4987 break;
4988 case 0x1e: /* V9 stxa */
4989 save_state(dc);
4990 gen_st_asi(dc, cpu_val, cpu_addr, insn, 8);
4991 dc->npc = DYNAMIC_PC;
4992 break;
4993 #endif
4994 default:
4995 goto illegal_insn;
4997 } else if (xop > 0x23 && xop < 0x28) {
4998 if (gen_trap_ifnofpu(dc)) {
4999 goto jmp_insn;
5001 save_state(dc);
5002 switch (xop) {
5003 case 0x24: /* stf, store fpreg */
5005 TCGv t = get_temp_tl(dc);
5006 gen_address_mask(dc, cpu_addr);
5007 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5008 tcg_gen_ext_i32_tl(t, cpu_src1_32);
5009 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5011 break;
5012 case 0x25: /* stfsr, V9 stxfsr */
5014 TCGv t = get_temp_tl(dc);
5016 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUSPARCState, fsr));
5017 #ifdef TARGET_SPARC64
5018 gen_address_mask(dc, cpu_addr);
5019 if (rd == 1) {
5020 tcg_gen_qemu_st64(t, cpu_addr, dc->mem_idx);
5021 break;
5023 #endif
5024 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5026 break;
5027 case 0x26:
5028 #ifdef TARGET_SPARC64
5029 /* V9 stqf, store quad fpreg */
5031 TCGv_i32 r_const;
5033 CHECK_FPU_FEATURE(dc, FLOAT128);
5034 gen_op_load_fpr_QT0(QFPREG(rd));
5035 r_const = tcg_const_i32(dc->mem_idx);
5036 gen_address_mask(dc, cpu_addr);
5037 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5038 tcg_temp_free_i32(r_const);
5040 break;
5041 #else /* !TARGET_SPARC64 */
5042 /* stdfq, store floating point queue */
5043 #if defined(CONFIG_USER_ONLY)
5044 goto illegal_insn;
5045 #else
5046 if (!supervisor(dc))
5047 goto priv_insn;
5048 if (gen_trap_ifnofpu(dc)) {
5049 goto jmp_insn;
5051 goto nfq_insn;
5052 #endif
5053 #endif
5054 case 0x27: /* stdf, store double fpreg */
5055 gen_address_mask(dc, cpu_addr);
5056 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5057 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5058 break;
5059 default:
5060 goto illegal_insn;
5062 } else if (xop > 0x33 && xop < 0x3f) {
5063 save_state(dc);
5064 switch (xop) {
5065 #ifdef TARGET_SPARC64
5066 case 0x34: /* V9 stfa */
5067 if (gen_trap_ifnofpu(dc)) {
5068 goto jmp_insn;
5070 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5071 break;
5072 case 0x36: /* V9 stqfa */
5074 TCGv_i32 r_const;
5076 CHECK_FPU_FEATURE(dc, FLOAT128);
5077 if (gen_trap_ifnofpu(dc)) {
5078 goto jmp_insn;
5080 r_const = tcg_const_i32(7);
5081 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5082 tcg_temp_free_i32(r_const);
5083 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5085 break;
5086 case 0x37: /* V9 stdfa */
5087 if (gen_trap_ifnofpu(dc)) {
5088 goto jmp_insn;
5090 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5091 break;
5092 case 0x3e: /* V9 casxa */
5093 rs2 = GET_FIELD(insn, 27, 31);
5094 cpu_src2 = gen_load_gpr(dc, rs2);
5095 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5096 break;
5097 #else
5098 case 0x34: /* stc */
5099 case 0x35: /* stcsr */
5100 case 0x36: /* stdcq */
5101 case 0x37: /* stdc */
5102 goto ncp_insn;
5103 #endif
5104 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5105 case 0x3c: /* V9 or LEON3 casa */
5106 #ifndef TARGET_SPARC64
5107 CHECK_IU_FEATURE(dc, CASA);
5108 if (IS_IMM) {
5109 goto illegal_insn;
5111 /* LEON3 allows CASA from user space with ASI 0xa */
5112 if ((GET_FIELD(insn, 19, 26) != 0xa) && !supervisor(dc)) {
5113 goto priv_insn;
5115 #endif
5116 rs2 = GET_FIELD(insn, 27, 31);
5117 cpu_src2 = gen_load_gpr(dc, rs2);
5118 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5119 break;
5120 #endif
5121 default:
5122 goto illegal_insn;
5124 } else {
5125 goto illegal_insn;
5128 break;
5130 /* default case for non jump instructions */
5131 if (dc->npc == DYNAMIC_PC) {
5132 dc->pc = DYNAMIC_PC;
5133 gen_op_next_insn();
5134 } else if (dc->npc == JUMP_PC) {
5135 /* we can do a static jump */
5136 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5137 dc->is_br = 1;
5138 } else {
5139 dc->pc = dc->npc;
5140 dc->npc = dc->npc + 4;
5142 jmp_insn:
5143 goto egress;
5144 illegal_insn:
5145 gen_exception(dc, TT_ILL_INSN);
5146 goto egress;
5147 unimp_flush:
5148 gen_exception(dc, TT_UNIMP_FLUSH);
5149 goto egress;
5150 #if !defined(CONFIG_USER_ONLY)
5151 priv_insn:
5152 gen_exception(dc, TT_PRIV_INSN);
5153 goto egress;
5154 #endif
5155 nfpu_insn:
5156 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5157 goto egress;
5158 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5159 nfq_insn:
5160 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5161 goto egress;
5162 #endif
5163 #ifndef TARGET_SPARC64
5164 ncp_insn:
5165 gen_exception(dc, TT_NCP_INSN);
5166 goto egress;
5167 #endif
5168 egress:
5169 if (dc->n_t32 != 0) {
5170 int i;
5171 for (i = dc->n_t32 - 1; i >= 0; --i) {
5172 tcg_temp_free_i32(dc->t32[i]);
5174 dc->n_t32 = 0;
5176 if (dc->n_ttl != 0) {
5177 int i;
5178 for (i = dc->n_ttl - 1; i >= 0; --i) {
5179 tcg_temp_free(dc->ttl[i]);
5181 dc->n_ttl = 0;
5185 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5187 SPARCCPU *cpu = sparc_env_get_cpu(env);
5188 CPUState *cs = CPU(cpu);
5189 target_ulong pc_start, last_pc;
5190 DisasContext dc1, *dc = &dc1;
5191 int num_insns;
5192 int max_insns;
5193 unsigned int insn;
5195 memset(dc, 0, sizeof(DisasContext));
5196 dc->tb = tb;
5197 pc_start = tb->pc;
5198 dc->pc = pc_start;
5199 last_pc = dc->pc;
5200 dc->npc = (target_ulong) tb->cs_base;
5201 dc->cc_op = CC_OP_DYNAMIC;
5202 dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
5203 dc->def = env->def;
5204 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5205 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5206 dc->singlestep = (cs->singlestep_enabled || singlestep);
5207 #ifdef TARGET_SPARC64
5208 dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5209 #endif
5211 num_insns = 0;
5212 max_insns = tb->cflags & CF_COUNT_MASK;
5213 if (max_insns == 0) {
5214 max_insns = CF_COUNT_MASK;
5216 if (max_insns > TCG_MAX_INSNS) {
5217 max_insns = TCG_MAX_INSNS;
5220 gen_tb_start(tb);
5221 do {
5222 if (dc->npc & JUMP_PC) {
5223 assert(dc->jump_pc[1] == dc->pc + 4);
5224 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5225 } else {
5226 tcg_gen_insn_start(dc->pc, dc->npc);
5228 num_insns++;
5229 last_pc = dc->pc;
5231 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5232 if (dc->pc != pc_start) {
5233 save_state(dc);
5235 gen_helper_debug(cpu_env);
5236 tcg_gen_exit_tb(0);
5237 dc->is_br = 1;
5238 goto exit_gen_loop;
5241 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5242 gen_io_start();
5245 insn = cpu_ldl_code(env, dc->pc);
5247 disas_sparc_insn(dc, insn);
5249 if (dc->is_br)
5250 break;
5251 /* if the next PC is different, we abort now */
5252 if (dc->pc != (last_pc + 4))
5253 break;
5254 /* if we reach a page boundary, we stop generation so that the
5255 PC of a TT_TFAULT exception is always in the right page */
5256 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5257 break;
5258 /* if single step mode, we generate only one instruction and
5259 generate an exception */
5260 if (dc->singlestep) {
5261 break;
5263 } while (!tcg_op_buf_full() &&
5264 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5265 num_insns < max_insns);
5267 exit_gen_loop:
5268 if (tb->cflags & CF_LAST_IO) {
5269 gen_io_end();
5271 if (!dc->is_br) {
5272 if (dc->pc != DYNAMIC_PC &&
5273 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5274 /* static PC and NPC: we can use direct chaining */
5275 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5276 } else {
5277 if (dc->pc != DYNAMIC_PC) {
5278 tcg_gen_movi_tl(cpu_pc, dc->pc);
5280 save_npc(dc);
5281 tcg_gen_exit_tb(0);
5284 gen_tb_end(tb, num_insns);
5286 tb->size = last_pc + 4 - pc_start;
5287 tb->icount = num_insns;
5289 #ifdef DEBUG_DISAS
5290 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5291 && qemu_log_in_addr_range(pc_start)) {
5292 qemu_log("--------------\n");
5293 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5294 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5295 qemu_log("\n");
5297 #endif
5300 void gen_intermediate_code_init(CPUSPARCState *env)
5302 static int inited;
5303 static const char gregnames[32][4] = {
5304 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5305 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5306 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5307 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5309 static const char fregnames[32][4] = {
5310 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5311 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5312 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5313 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5316 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5317 #ifdef TARGET_SPARC64
5318 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5319 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5320 #else
5321 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5322 #endif
5323 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5324 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5327 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5328 #ifdef TARGET_SPARC64
5329 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5330 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5331 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5332 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5333 "hstick_cmpr" },
5334 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5335 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5336 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5337 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5338 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5339 #endif
5340 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5341 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5342 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5343 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5344 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5345 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5346 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5347 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5348 #ifndef CONFIG_USER_ONLY
5349 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5350 #endif
5353 unsigned int i;
5355 /* init various static tables */
5356 if (inited) {
5357 return;
5359 inited = 1;
5361 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5362 tcg_ctx.tcg_env = cpu_env;
5364 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5365 offsetof(CPUSPARCState, regwptr),
5366 "regwptr");
5368 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5369 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5372 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5373 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5376 TCGV_UNUSED(cpu_regs[0]);
5377 for (i = 1; i < 8; ++i) {
5378 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5379 offsetof(CPUSPARCState, gregs[i]),
5380 gregnames[i]);
5383 for (i = 8; i < 32; ++i) {
5384 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5385 (i - 8) * sizeof(target_ulong),
5386 gregnames[i]);
5389 for (i = 0; i < TARGET_DPREGS; i++) {
5390 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5391 offsetof(CPUSPARCState, fpr[i]),
5392 fregnames[i]);
5396 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5397 target_ulong *data)
5399 target_ulong pc = data[0];
5400 target_ulong npc = data[1];
5402 env->pc = pc;
5403 if (npc == DYNAMIC_PC) {
5404 /* dynamic NPC: already stored */
5405 } else if (npc & JUMP_PC) {
5406 /* jump PC: use 'cond' and the jump targets of the translation */
5407 if (env->cond) {
5408 env->npc = npc & ~3;
5409 } else {
5410 env->npc = pc + 4;
5412 } else {
5413 env->npc = npc;