target/sparc: Fix boot with SS20 ROM
[qemu/ar7.git] / target / sparc / translate.c
bloba58ad3e1a738f40914b5592b9e5f706abd4e171a
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 #include "asi.h"
37 #define DEBUG_DISAS
39 #define DYNAMIC_PC 1 /* dynamic pc value */
40 #define JUMP_PC 2 /* dynamic pc value which takes only two values
41 according to jump_pc[T2] */
43 /* global register indexes */
44 static TCGv_env cpu_env;
45 static TCGv_ptr cpu_regwptr;
46 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
47 static TCGv_i32 cpu_cc_op;
48 static TCGv_i32 cpu_psr;
49 static TCGv cpu_fsr, cpu_pc, cpu_npc;
50 static TCGv cpu_regs[32];
51 static TCGv cpu_y;
52 #ifndef CONFIG_USER_ONLY
53 static TCGv cpu_tbr;
54 #endif
55 static TCGv cpu_cond;
56 #ifdef TARGET_SPARC64
57 static TCGv_i32 cpu_xcc, cpu_fprs;
58 static TCGv cpu_gsr;
59 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
60 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
61 #else
62 static TCGv cpu_wim;
63 #endif
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext {
70 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
73 int is_br;
74 int mem_idx;
75 bool fpu_enabled;
76 bool address_mask_32bit;
77 bool singlestep;
78 #ifndef CONFIG_USER_ONLY
79 bool supervisor;
80 #ifdef TARGET_SPARC64
81 bool hypervisor;
82 #endif
83 #endif
85 uint32_t cc_op; /* current CC operation */
86 struct TranslationBlock *tb;
87 sparc_def_t *def;
88 TCGv_i32 t32[3];
89 TCGv ttl[5];
90 int n_t32;
91 int n_ttl;
92 #ifdef TARGET_SPARC64
93 int fprs_dirty;
94 int asi;
95 #endif
96 } DisasContext;
98 typedef struct {
99 TCGCond cond;
100 bool is_bool;
101 bool g1, g2;
102 TCGv c1, c2;
103 } DisasCompare;
105 // This function uses non-native bit order
106 #define GET_FIELD(X, FROM, TO) \
107 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
109 // This function uses the order in the manuals, i.e. bit 0 is 2^0
110 #define GET_FIELD_SP(X, FROM, TO) \
111 GET_FIELD(X, 31 - (TO), 31 - (FROM))
113 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
114 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
116 #ifdef TARGET_SPARC64
117 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
118 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
119 #else
120 #define DFPREG(r) (r & 0x1e)
121 #define QFPREG(r) (r & 0x1c)
122 #endif
124 #define UA2005_HTRAP_MASK 0xff
125 #define V8_TRAP_MASK 0x7f
127 static int sign_extend(int x, int len)
129 len = 32 - len;
130 return (x << len) >> len;
133 #define IS_IMM (insn & (1<<13))
135 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
137 TCGv_i32 t;
138 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
139 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
140 return t;
143 static inline TCGv get_temp_tl(DisasContext *dc)
145 TCGv t;
146 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
147 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
148 return t;
151 static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
153 #if defined(TARGET_SPARC64)
154 int bit = (rd < 32) ? 1 : 2;
155 /* If we know we've already set this bit within the TB,
156 we can avoid setting it again. */
157 if (!(dc->fprs_dirty & bit)) {
158 dc->fprs_dirty |= bit;
159 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
161 #endif
164 /* floating point registers moves */
165 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
167 #if TCG_TARGET_REG_BITS == 32
168 if (src & 1) {
169 return TCGV_LOW(cpu_fpr[src / 2]);
170 } else {
171 return TCGV_HIGH(cpu_fpr[src / 2]);
173 #else
174 if (src & 1) {
175 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
176 } else {
177 TCGv_i32 ret = get_temp_i32(dc);
178 TCGv_i64 t = tcg_temp_new_i64();
180 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
181 tcg_gen_extrl_i64_i32(ret, t);
182 tcg_temp_free_i64(t);
184 return ret;
186 #endif
189 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
191 #if TCG_TARGET_REG_BITS == 32
192 if (dst & 1) {
193 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
194 } else {
195 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
197 #else
198 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
199 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
200 (dst & 1 ? 0 : 32), 32);
201 #endif
202 gen_update_fprs_dirty(dc, dst);
205 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
207 return get_temp_i32(dc);
210 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
212 src = DFPREG(src);
213 return cpu_fpr[src / 2];
216 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
218 dst = DFPREG(dst);
219 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
220 gen_update_fprs_dirty(dc, dst);
223 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
225 return cpu_fpr[DFPREG(dst) / 2];
228 static void gen_op_load_fpr_QT0(unsigned int src)
230 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
231 offsetof(CPU_QuadU, ll.upper));
232 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
233 offsetof(CPU_QuadU, ll.lower));
236 static void gen_op_load_fpr_QT1(unsigned int src)
238 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
239 offsetof(CPU_QuadU, ll.upper));
240 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
241 offsetof(CPU_QuadU, ll.lower));
244 static void gen_op_store_QT0_fpr(unsigned int dst)
246 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
247 offsetof(CPU_QuadU, ll.upper));
248 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
249 offsetof(CPU_QuadU, ll.lower));
252 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
253 TCGv_i64 v1, TCGv_i64 v2)
255 dst = QFPREG(dst);
257 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
258 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
259 gen_update_fprs_dirty(dc, dst);
262 #ifdef TARGET_SPARC64
263 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
265 src = QFPREG(src);
266 return cpu_fpr[src / 2];
269 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
271 src = QFPREG(src);
272 return cpu_fpr[src / 2 + 1];
275 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
277 rd = QFPREG(rd);
278 rs = QFPREG(rs);
280 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
281 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
282 gen_update_fprs_dirty(dc, rd);
284 #endif
286 /* moves */
287 #ifdef CONFIG_USER_ONLY
288 #define supervisor(dc) 0
289 #ifdef TARGET_SPARC64
290 #define hypervisor(dc) 0
291 #endif
292 #else
293 #ifdef TARGET_SPARC64
294 #define hypervisor(dc) (dc->hypervisor)
295 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
296 #else
297 #define supervisor(dc) (dc->supervisor)
298 #endif
299 #endif
301 #ifdef TARGET_SPARC64
302 #ifndef TARGET_ABI32
303 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
304 #else
305 #define AM_CHECK(dc) (1)
306 #endif
307 #endif
309 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
311 #ifdef TARGET_SPARC64
312 if (AM_CHECK(dc))
313 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
314 #endif
317 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
319 if (reg > 0) {
320 assert(reg < 32);
321 return cpu_regs[reg];
322 } else {
323 TCGv t = get_temp_tl(dc);
324 tcg_gen_movi_tl(t, 0);
325 return t;
329 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
331 if (reg > 0) {
332 assert(reg < 32);
333 tcg_gen_mov_tl(cpu_regs[reg], v);
337 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
339 if (reg > 0) {
340 assert(reg < 32);
341 return cpu_regs[reg];
342 } else {
343 return get_temp_tl(dc);
347 static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
348 target_ulong npc)
350 if (unlikely(s->singlestep)) {
351 return false;
354 #ifndef CONFIG_USER_ONLY
355 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
356 (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
357 #else
358 return true;
359 #endif
362 static inline void gen_goto_tb(DisasContext *s, int tb_num,
363 target_ulong pc, target_ulong npc)
365 if (use_goto_tb(s, pc, npc)) {
366 /* jump to same page: we can use a direct jump */
367 tcg_gen_goto_tb(tb_num);
368 tcg_gen_movi_tl(cpu_pc, pc);
369 tcg_gen_movi_tl(cpu_npc, npc);
370 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
371 } else {
372 /* jump to another page: currently not optimized */
373 tcg_gen_movi_tl(cpu_pc, pc);
374 tcg_gen_movi_tl(cpu_npc, npc);
375 tcg_gen_exit_tb(0);
379 // XXX suboptimal
380 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
382 tcg_gen_extu_i32_tl(reg, src);
383 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
384 tcg_gen_andi_tl(reg, reg, 0x1);
387 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
389 tcg_gen_extu_i32_tl(reg, src);
390 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
391 tcg_gen_andi_tl(reg, reg, 0x1);
394 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
396 tcg_gen_extu_i32_tl(reg, src);
397 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
398 tcg_gen_andi_tl(reg, reg, 0x1);
401 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
403 tcg_gen_extu_i32_tl(reg, src);
404 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
405 tcg_gen_andi_tl(reg, reg, 0x1);
408 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
410 tcg_gen_mov_tl(cpu_cc_src, src1);
411 tcg_gen_mov_tl(cpu_cc_src2, src2);
412 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
413 tcg_gen_mov_tl(dst, cpu_cc_dst);
416 static TCGv_i32 gen_add32_carry32(void)
418 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
420 /* Carry is computed from a previous add: (dst < src) */
421 #if TARGET_LONG_BITS == 64
422 cc_src1_32 = tcg_temp_new_i32();
423 cc_src2_32 = tcg_temp_new_i32();
424 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
425 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
426 #else
427 cc_src1_32 = cpu_cc_dst;
428 cc_src2_32 = cpu_cc_src;
429 #endif
431 carry_32 = tcg_temp_new_i32();
432 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
434 #if TARGET_LONG_BITS == 64
435 tcg_temp_free_i32(cc_src1_32);
436 tcg_temp_free_i32(cc_src2_32);
437 #endif
439 return carry_32;
442 static TCGv_i32 gen_sub32_carry32(void)
444 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
446 /* Carry is computed from a previous borrow: (src1 < src2) */
447 #if TARGET_LONG_BITS == 64
448 cc_src1_32 = tcg_temp_new_i32();
449 cc_src2_32 = tcg_temp_new_i32();
450 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
451 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
452 #else
453 cc_src1_32 = cpu_cc_src;
454 cc_src2_32 = cpu_cc_src2;
455 #endif
457 carry_32 = tcg_temp_new_i32();
458 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
460 #if TARGET_LONG_BITS == 64
461 tcg_temp_free_i32(cc_src1_32);
462 tcg_temp_free_i32(cc_src2_32);
463 #endif
465 return carry_32;
468 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
469 TCGv src2, int update_cc)
471 TCGv_i32 carry_32;
472 TCGv carry;
474 switch (dc->cc_op) {
475 case CC_OP_DIV:
476 case CC_OP_LOGIC:
477 /* Carry is known to be zero. Fall back to plain ADD. */
478 if (update_cc) {
479 gen_op_add_cc(dst, src1, src2);
480 } else {
481 tcg_gen_add_tl(dst, src1, src2);
483 return;
485 case CC_OP_ADD:
486 case CC_OP_TADD:
487 case CC_OP_TADDTV:
488 if (TARGET_LONG_BITS == 32) {
489 /* We can re-use the host's hardware carry generation by using
490 an ADD2 opcode. We discard the low part of the output.
491 Ideally we'd combine this operation with the add that
492 generated the carry in the first place. */
493 carry = tcg_temp_new();
494 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
495 tcg_temp_free(carry);
496 goto add_done;
498 carry_32 = gen_add32_carry32();
499 break;
501 case CC_OP_SUB:
502 case CC_OP_TSUB:
503 case CC_OP_TSUBTV:
504 carry_32 = gen_sub32_carry32();
505 break;
507 default:
508 /* We need external help to produce the carry. */
509 carry_32 = tcg_temp_new_i32();
510 gen_helper_compute_C_icc(carry_32, cpu_env);
511 break;
514 #if TARGET_LONG_BITS == 64
515 carry = tcg_temp_new();
516 tcg_gen_extu_i32_i64(carry, carry_32);
517 #else
518 carry = carry_32;
519 #endif
521 tcg_gen_add_tl(dst, src1, src2);
522 tcg_gen_add_tl(dst, dst, carry);
524 tcg_temp_free_i32(carry_32);
525 #if TARGET_LONG_BITS == 64
526 tcg_temp_free(carry);
527 #endif
529 add_done:
530 if (update_cc) {
531 tcg_gen_mov_tl(cpu_cc_src, src1);
532 tcg_gen_mov_tl(cpu_cc_src2, src2);
533 tcg_gen_mov_tl(cpu_cc_dst, dst);
534 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
535 dc->cc_op = CC_OP_ADDX;
539 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
541 tcg_gen_mov_tl(cpu_cc_src, src1);
542 tcg_gen_mov_tl(cpu_cc_src2, src2);
543 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
544 tcg_gen_mov_tl(dst, cpu_cc_dst);
547 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
548 TCGv src2, int update_cc)
550 TCGv_i32 carry_32;
551 TCGv carry;
553 switch (dc->cc_op) {
554 case CC_OP_DIV:
555 case CC_OP_LOGIC:
556 /* Carry is known to be zero. Fall back to plain SUB. */
557 if (update_cc) {
558 gen_op_sub_cc(dst, src1, src2);
559 } else {
560 tcg_gen_sub_tl(dst, src1, src2);
562 return;
564 case CC_OP_ADD:
565 case CC_OP_TADD:
566 case CC_OP_TADDTV:
567 carry_32 = gen_add32_carry32();
568 break;
570 case CC_OP_SUB:
571 case CC_OP_TSUB:
572 case CC_OP_TSUBTV:
573 if (TARGET_LONG_BITS == 32) {
574 /* We can re-use the host's hardware carry generation by using
575 a SUB2 opcode. We discard the low part of the output.
576 Ideally we'd combine this operation with the add that
577 generated the carry in the first place. */
578 carry = tcg_temp_new();
579 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
580 tcg_temp_free(carry);
581 goto sub_done;
583 carry_32 = gen_sub32_carry32();
584 break;
586 default:
587 /* We need external help to produce the carry. */
588 carry_32 = tcg_temp_new_i32();
589 gen_helper_compute_C_icc(carry_32, cpu_env);
590 break;
593 #if TARGET_LONG_BITS == 64
594 carry = tcg_temp_new();
595 tcg_gen_extu_i32_i64(carry, carry_32);
596 #else
597 carry = carry_32;
598 #endif
600 tcg_gen_sub_tl(dst, src1, src2);
601 tcg_gen_sub_tl(dst, dst, carry);
603 tcg_temp_free_i32(carry_32);
604 #if TARGET_LONG_BITS == 64
605 tcg_temp_free(carry);
606 #endif
608 sub_done:
609 if (update_cc) {
610 tcg_gen_mov_tl(cpu_cc_src, src1);
611 tcg_gen_mov_tl(cpu_cc_src2, src2);
612 tcg_gen_mov_tl(cpu_cc_dst, dst);
613 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
614 dc->cc_op = CC_OP_SUBX;
618 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
620 TCGv r_temp, zero, t0;
622 r_temp = tcg_temp_new();
623 t0 = tcg_temp_new();
625 /* old op:
626 if (!(env->y & 1))
627 T1 = 0;
629 zero = tcg_const_tl(0);
630 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
631 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
632 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
633 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
634 zero, cpu_cc_src2);
635 tcg_temp_free(zero);
637 // b2 = T0 & 1;
638 // env->y = (b2 << 31) | (env->y >> 1);
639 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
640 tcg_gen_shli_tl(r_temp, r_temp, 31);
641 tcg_gen_shri_tl(t0, cpu_y, 1);
642 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
643 tcg_gen_or_tl(t0, t0, r_temp);
644 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
646 // b1 = N ^ V;
647 gen_mov_reg_N(t0, cpu_psr);
648 gen_mov_reg_V(r_temp, cpu_psr);
649 tcg_gen_xor_tl(t0, t0, r_temp);
650 tcg_temp_free(r_temp);
652 // T0 = (b1 << 31) | (T0 >> 1);
653 // src1 = T0;
654 tcg_gen_shli_tl(t0, t0, 31);
655 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
656 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
657 tcg_temp_free(t0);
659 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
661 tcg_gen_mov_tl(dst, cpu_cc_dst);
664 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
666 #if TARGET_LONG_BITS == 32
667 if (sign_ext) {
668 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
669 } else {
670 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
672 #else
673 TCGv t0 = tcg_temp_new_i64();
674 TCGv t1 = tcg_temp_new_i64();
676 if (sign_ext) {
677 tcg_gen_ext32s_i64(t0, src1);
678 tcg_gen_ext32s_i64(t1, src2);
679 } else {
680 tcg_gen_ext32u_i64(t0, src1);
681 tcg_gen_ext32u_i64(t1, src2);
684 tcg_gen_mul_i64(dst, t0, t1);
685 tcg_temp_free(t0);
686 tcg_temp_free(t1);
688 tcg_gen_shri_i64(cpu_y, dst, 32);
689 #endif
692 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
694 /* zero-extend truncated operands before multiplication */
695 gen_op_multiply(dst, src1, src2, 0);
698 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
700 /* sign-extend truncated operands before multiplication */
701 gen_op_multiply(dst, src1, src2, 1);
704 // 1
705 static inline void gen_op_eval_ba(TCGv dst)
707 tcg_gen_movi_tl(dst, 1);
710 // Z
711 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
713 gen_mov_reg_Z(dst, src);
716 // Z | (N ^ V)
717 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
719 TCGv t0 = tcg_temp_new();
720 gen_mov_reg_N(t0, src);
721 gen_mov_reg_V(dst, src);
722 tcg_gen_xor_tl(dst, dst, t0);
723 gen_mov_reg_Z(t0, src);
724 tcg_gen_or_tl(dst, dst, t0);
725 tcg_temp_free(t0);
728 // N ^ V
729 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
731 TCGv t0 = tcg_temp_new();
732 gen_mov_reg_V(t0, src);
733 gen_mov_reg_N(dst, src);
734 tcg_gen_xor_tl(dst, dst, t0);
735 tcg_temp_free(t0);
738 // C | Z
739 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
741 TCGv t0 = tcg_temp_new();
742 gen_mov_reg_Z(t0, src);
743 gen_mov_reg_C(dst, src);
744 tcg_gen_or_tl(dst, dst, t0);
745 tcg_temp_free(t0);
748 // C
749 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
751 gen_mov_reg_C(dst, src);
754 // V
755 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
757 gen_mov_reg_V(dst, src);
760 // 0
761 static inline void gen_op_eval_bn(TCGv dst)
763 tcg_gen_movi_tl(dst, 0);
766 // N
767 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
769 gen_mov_reg_N(dst, src);
772 // !Z
773 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
775 gen_mov_reg_Z(dst, src);
776 tcg_gen_xori_tl(dst, dst, 0x1);
779 // !(Z | (N ^ V))
780 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
782 gen_op_eval_ble(dst, src);
783 tcg_gen_xori_tl(dst, dst, 0x1);
786 // !(N ^ V)
787 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
789 gen_op_eval_bl(dst, src);
790 tcg_gen_xori_tl(dst, dst, 0x1);
793 // !(C | Z)
794 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
796 gen_op_eval_bleu(dst, src);
797 tcg_gen_xori_tl(dst, dst, 0x1);
800 // !C
801 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
803 gen_mov_reg_C(dst, src);
804 tcg_gen_xori_tl(dst, dst, 0x1);
807 // !N
808 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
810 gen_mov_reg_N(dst, src);
811 tcg_gen_xori_tl(dst, dst, 0x1);
814 // !V
815 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
817 gen_mov_reg_V(dst, src);
818 tcg_gen_xori_tl(dst, dst, 0x1);
822 FPSR bit field FCC1 | FCC0:
826 3 unordered
828 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
829 unsigned int fcc_offset)
831 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
832 tcg_gen_andi_tl(reg, reg, 0x1);
835 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
836 unsigned int fcc_offset)
838 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
839 tcg_gen_andi_tl(reg, reg, 0x1);
842 // !0: FCC0 | FCC1
843 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
844 unsigned int fcc_offset)
846 TCGv t0 = tcg_temp_new();
847 gen_mov_reg_FCC0(dst, src, fcc_offset);
848 gen_mov_reg_FCC1(t0, src, fcc_offset);
849 tcg_gen_or_tl(dst, dst, t0);
850 tcg_temp_free(t0);
853 // 1 or 2: FCC0 ^ FCC1
854 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
855 unsigned int fcc_offset)
857 TCGv t0 = tcg_temp_new();
858 gen_mov_reg_FCC0(dst, src, fcc_offset);
859 gen_mov_reg_FCC1(t0, src, fcc_offset);
860 tcg_gen_xor_tl(dst, dst, t0);
861 tcg_temp_free(t0);
864 // 1 or 3: FCC0
865 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
866 unsigned int fcc_offset)
868 gen_mov_reg_FCC0(dst, src, fcc_offset);
871 // 1: FCC0 & !FCC1
872 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
873 unsigned int fcc_offset)
875 TCGv t0 = tcg_temp_new();
876 gen_mov_reg_FCC0(dst, src, fcc_offset);
877 gen_mov_reg_FCC1(t0, src, fcc_offset);
878 tcg_gen_andc_tl(dst, dst, t0);
879 tcg_temp_free(t0);
882 // 2 or 3: FCC1
883 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
884 unsigned int fcc_offset)
886 gen_mov_reg_FCC1(dst, src, fcc_offset);
889 // 2: !FCC0 & FCC1
890 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
891 unsigned int fcc_offset)
893 TCGv t0 = tcg_temp_new();
894 gen_mov_reg_FCC0(dst, src, fcc_offset);
895 gen_mov_reg_FCC1(t0, src, fcc_offset);
896 tcg_gen_andc_tl(dst, t0, dst);
897 tcg_temp_free(t0);
900 // 3: FCC0 & FCC1
901 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
902 unsigned int fcc_offset)
904 TCGv t0 = tcg_temp_new();
905 gen_mov_reg_FCC0(dst, src, fcc_offset);
906 gen_mov_reg_FCC1(t0, src, fcc_offset);
907 tcg_gen_and_tl(dst, dst, t0);
908 tcg_temp_free(t0);
911 // 0: !(FCC0 | FCC1)
912 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
913 unsigned int fcc_offset)
915 TCGv t0 = tcg_temp_new();
916 gen_mov_reg_FCC0(dst, src, fcc_offset);
917 gen_mov_reg_FCC1(t0, src, fcc_offset);
918 tcg_gen_or_tl(dst, dst, t0);
919 tcg_gen_xori_tl(dst, dst, 0x1);
920 tcg_temp_free(t0);
923 // 0 or 3: !(FCC0 ^ FCC1)
924 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
925 unsigned int fcc_offset)
927 TCGv t0 = tcg_temp_new();
928 gen_mov_reg_FCC0(dst, src, fcc_offset);
929 gen_mov_reg_FCC1(t0, src, fcc_offset);
930 tcg_gen_xor_tl(dst, dst, t0);
931 tcg_gen_xori_tl(dst, dst, 0x1);
932 tcg_temp_free(t0);
935 // 0 or 2: !FCC0
936 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
937 unsigned int fcc_offset)
939 gen_mov_reg_FCC0(dst, src, fcc_offset);
940 tcg_gen_xori_tl(dst, dst, 0x1);
943 // !1: !(FCC0 & !FCC1)
944 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
945 unsigned int fcc_offset)
947 TCGv t0 = tcg_temp_new();
948 gen_mov_reg_FCC0(dst, src, fcc_offset);
949 gen_mov_reg_FCC1(t0, src, fcc_offset);
950 tcg_gen_andc_tl(dst, dst, t0);
951 tcg_gen_xori_tl(dst, dst, 0x1);
952 tcg_temp_free(t0);
955 // 0 or 1: !FCC1
956 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
957 unsigned int fcc_offset)
959 gen_mov_reg_FCC1(dst, src, fcc_offset);
960 tcg_gen_xori_tl(dst, dst, 0x1);
963 // !2: !(!FCC0 & FCC1)
964 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
965 unsigned int fcc_offset)
967 TCGv t0 = tcg_temp_new();
968 gen_mov_reg_FCC0(dst, src, fcc_offset);
969 gen_mov_reg_FCC1(t0, src, fcc_offset);
970 tcg_gen_andc_tl(dst, t0, dst);
971 tcg_gen_xori_tl(dst, dst, 0x1);
972 tcg_temp_free(t0);
975 // !3: !(FCC0 & FCC1)
976 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
977 unsigned int fcc_offset)
979 TCGv t0 = tcg_temp_new();
980 gen_mov_reg_FCC0(dst, src, fcc_offset);
981 gen_mov_reg_FCC1(t0, src, fcc_offset);
982 tcg_gen_and_tl(dst, dst, t0);
983 tcg_gen_xori_tl(dst, dst, 0x1);
984 tcg_temp_free(t0);
987 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
988 target_ulong pc2, TCGv r_cond)
990 TCGLabel *l1 = gen_new_label();
992 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
994 gen_goto_tb(dc, 0, pc1, pc1 + 4);
996 gen_set_label(l1);
997 gen_goto_tb(dc, 1, pc2, pc2 + 4);
1000 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
1002 TCGLabel *l1 = gen_new_label();
1003 target_ulong npc = dc->npc;
1005 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
1007 gen_goto_tb(dc, 0, npc, pc1);
1009 gen_set_label(l1);
1010 gen_goto_tb(dc, 1, npc + 4, npc + 8);
1012 dc->is_br = 1;
1015 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
1017 target_ulong npc = dc->npc;
1019 if (likely(npc != DYNAMIC_PC)) {
1020 dc->pc = npc;
1021 dc->jump_pc[0] = pc1;
1022 dc->jump_pc[1] = npc + 4;
1023 dc->npc = JUMP_PC;
1024 } else {
1025 TCGv t, z;
1027 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1029 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1030 t = tcg_const_tl(pc1);
1031 z = tcg_const_tl(0);
1032 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
1033 tcg_temp_free(t);
1034 tcg_temp_free(z);
1036 dc->pc = DYNAMIC_PC;
1040 static inline void gen_generic_branch(DisasContext *dc)
1042 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1043 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1044 TCGv zero = tcg_const_tl(0);
1046 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1048 tcg_temp_free(npc0);
1049 tcg_temp_free(npc1);
1050 tcg_temp_free(zero);
1053 /* call this function before using the condition register as it may
1054 have been set for a jump */
1055 static inline void flush_cond(DisasContext *dc)
1057 if (dc->npc == JUMP_PC) {
1058 gen_generic_branch(dc);
1059 dc->npc = DYNAMIC_PC;
1063 static inline void save_npc(DisasContext *dc)
1065 if (dc->npc == JUMP_PC) {
1066 gen_generic_branch(dc);
1067 dc->npc = DYNAMIC_PC;
1068 } else if (dc->npc != DYNAMIC_PC) {
1069 tcg_gen_movi_tl(cpu_npc, dc->npc);
1073 static inline void update_psr(DisasContext *dc)
1075 if (dc->cc_op != CC_OP_FLAGS) {
1076 dc->cc_op = CC_OP_FLAGS;
1077 gen_helper_compute_psr(cpu_env);
1081 static inline void save_state(DisasContext *dc)
1083 tcg_gen_movi_tl(cpu_pc, dc->pc);
1084 save_npc(dc);
1087 static void gen_exception(DisasContext *dc, int which)
1089 TCGv_i32 t;
1091 save_state(dc);
1092 t = tcg_const_i32(which);
1093 gen_helper_raise_exception(cpu_env, t);
1094 tcg_temp_free_i32(t);
1095 dc->is_br = 1;
1098 static void gen_check_align(TCGv addr, int mask)
1100 TCGv_i32 r_mask = tcg_const_i32(mask);
1101 gen_helper_check_align(cpu_env, addr, r_mask);
1102 tcg_temp_free_i32(r_mask);
1105 static inline void gen_mov_pc_npc(DisasContext *dc)
1107 if (dc->npc == JUMP_PC) {
1108 gen_generic_branch(dc);
1109 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1110 dc->pc = DYNAMIC_PC;
1111 } else if (dc->npc == DYNAMIC_PC) {
1112 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1113 dc->pc = DYNAMIC_PC;
1114 } else {
1115 dc->pc = dc->npc;
1119 static inline void gen_op_next_insn(void)
1121 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1122 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1125 static void free_compare(DisasCompare *cmp)
1127 if (!cmp->g1) {
1128 tcg_temp_free(cmp->c1);
1130 if (!cmp->g2) {
1131 tcg_temp_free(cmp->c2);
1135 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1136 DisasContext *dc)
1138 static int subcc_cond[16] = {
1139 TCG_COND_NEVER,
1140 TCG_COND_EQ,
1141 TCG_COND_LE,
1142 TCG_COND_LT,
1143 TCG_COND_LEU,
1144 TCG_COND_LTU,
1145 -1, /* neg */
1146 -1, /* overflow */
1147 TCG_COND_ALWAYS,
1148 TCG_COND_NE,
1149 TCG_COND_GT,
1150 TCG_COND_GE,
1151 TCG_COND_GTU,
1152 TCG_COND_GEU,
1153 -1, /* pos */
1154 -1, /* no overflow */
1157 static int logic_cond[16] = {
1158 TCG_COND_NEVER,
1159 TCG_COND_EQ, /* eq: Z */
1160 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1161 TCG_COND_LT, /* lt: N ^ V -> N */
1162 TCG_COND_EQ, /* leu: C | Z -> Z */
1163 TCG_COND_NEVER, /* ltu: C -> 0 */
1164 TCG_COND_LT, /* neg: N */
1165 TCG_COND_NEVER, /* vs: V -> 0 */
1166 TCG_COND_ALWAYS,
1167 TCG_COND_NE, /* ne: !Z */
1168 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1169 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1170 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1171 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1172 TCG_COND_GE, /* pos: !N */
1173 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1176 TCGv_i32 r_src;
1177 TCGv r_dst;
1179 #ifdef TARGET_SPARC64
1180 if (xcc) {
1181 r_src = cpu_xcc;
1182 } else {
1183 r_src = cpu_psr;
1185 #else
1186 r_src = cpu_psr;
1187 #endif
1189 switch (dc->cc_op) {
1190 case CC_OP_LOGIC:
1191 cmp->cond = logic_cond[cond];
1192 do_compare_dst_0:
1193 cmp->is_bool = false;
1194 cmp->g2 = false;
1195 cmp->c2 = tcg_const_tl(0);
1196 #ifdef TARGET_SPARC64
1197 if (!xcc) {
1198 cmp->g1 = false;
1199 cmp->c1 = tcg_temp_new();
1200 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1201 break;
1203 #endif
1204 cmp->g1 = true;
1205 cmp->c1 = cpu_cc_dst;
1206 break;
1208 case CC_OP_SUB:
1209 switch (cond) {
1210 case 6: /* neg */
1211 case 14: /* pos */
1212 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1213 goto do_compare_dst_0;
1215 case 7: /* overflow */
1216 case 15: /* !overflow */
1217 goto do_dynamic;
1219 default:
1220 cmp->cond = subcc_cond[cond];
1221 cmp->is_bool = false;
1222 #ifdef TARGET_SPARC64
1223 if (!xcc) {
1224 /* Note that sign-extension works for unsigned compares as
1225 long as both operands are sign-extended. */
1226 cmp->g1 = cmp->g2 = false;
1227 cmp->c1 = tcg_temp_new();
1228 cmp->c2 = tcg_temp_new();
1229 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1230 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1231 break;
1233 #endif
1234 cmp->g1 = cmp->g2 = true;
1235 cmp->c1 = cpu_cc_src;
1236 cmp->c2 = cpu_cc_src2;
1237 break;
1239 break;
1241 default:
1242 do_dynamic:
1243 gen_helper_compute_psr(cpu_env);
1244 dc->cc_op = CC_OP_FLAGS;
1245 /* FALLTHRU */
1247 case CC_OP_FLAGS:
1248 /* We're going to generate a boolean result. */
1249 cmp->cond = TCG_COND_NE;
1250 cmp->is_bool = true;
1251 cmp->g1 = cmp->g2 = false;
1252 cmp->c1 = r_dst = tcg_temp_new();
1253 cmp->c2 = tcg_const_tl(0);
1255 switch (cond) {
1256 case 0x0:
1257 gen_op_eval_bn(r_dst);
1258 break;
1259 case 0x1:
1260 gen_op_eval_be(r_dst, r_src);
1261 break;
1262 case 0x2:
1263 gen_op_eval_ble(r_dst, r_src);
1264 break;
1265 case 0x3:
1266 gen_op_eval_bl(r_dst, r_src);
1267 break;
1268 case 0x4:
1269 gen_op_eval_bleu(r_dst, r_src);
1270 break;
1271 case 0x5:
1272 gen_op_eval_bcs(r_dst, r_src);
1273 break;
1274 case 0x6:
1275 gen_op_eval_bneg(r_dst, r_src);
1276 break;
1277 case 0x7:
1278 gen_op_eval_bvs(r_dst, r_src);
1279 break;
1280 case 0x8:
1281 gen_op_eval_ba(r_dst);
1282 break;
1283 case 0x9:
1284 gen_op_eval_bne(r_dst, r_src);
1285 break;
1286 case 0xa:
1287 gen_op_eval_bg(r_dst, r_src);
1288 break;
1289 case 0xb:
1290 gen_op_eval_bge(r_dst, r_src);
1291 break;
1292 case 0xc:
1293 gen_op_eval_bgu(r_dst, r_src);
1294 break;
1295 case 0xd:
1296 gen_op_eval_bcc(r_dst, r_src);
1297 break;
1298 case 0xe:
1299 gen_op_eval_bpos(r_dst, r_src);
1300 break;
1301 case 0xf:
1302 gen_op_eval_bvc(r_dst, r_src);
1303 break;
1305 break;
1309 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1311 unsigned int offset;
1312 TCGv r_dst;
1314 /* For now we still generate a straight boolean result. */
1315 cmp->cond = TCG_COND_NE;
1316 cmp->is_bool = true;
1317 cmp->g1 = cmp->g2 = false;
1318 cmp->c1 = r_dst = tcg_temp_new();
1319 cmp->c2 = tcg_const_tl(0);
1321 switch (cc) {
1322 default:
1323 case 0x0:
1324 offset = 0;
1325 break;
1326 case 0x1:
1327 offset = 32 - 10;
1328 break;
1329 case 0x2:
1330 offset = 34 - 10;
1331 break;
1332 case 0x3:
1333 offset = 36 - 10;
1334 break;
1337 switch (cond) {
1338 case 0x0:
1339 gen_op_eval_bn(r_dst);
1340 break;
1341 case 0x1:
1342 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1343 break;
1344 case 0x2:
1345 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1346 break;
1347 case 0x3:
1348 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1349 break;
1350 case 0x4:
1351 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1352 break;
1353 case 0x5:
1354 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1355 break;
1356 case 0x6:
1357 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1358 break;
1359 case 0x7:
1360 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1361 break;
1362 case 0x8:
1363 gen_op_eval_ba(r_dst);
1364 break;
1365 case 0x9:
1366 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1367 break;
1368 case 0xa:
1369 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1370 break;
1371 case 0xb:
1372 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1373 break;
1374 case 0xc:
1375 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1376 break;
1377 case 0xd:
1378 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1379 break;
1380 case 0xe:
1381 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1382 break;
1383 case 0xf:
1384 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1385 break;
1389 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1390 DisasContext *dc)
1392 DisasCompare cmp;
1393 gen_compare(&cmp, cc, cond, dc);
1395 /* The interface is to return a boolean in r_dst. */
1396 if (cmp.is_bool) {
1397 tcg_gen_mov_tl(r_dst, cmp.c1);
1398 } else {
1399 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1402 free_compare(&cmp);
1405 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1407 DisasCompare cmp;
1408 gen_fcompare(&cmp, cc, cond);
1410 /* The interface is to return a boolean in r_dst. */
1411 if (cmp.is_bool) {
1412 tcg_gen_mov_tl(r_dst, cmp.c1);
1413 } else {
1414 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1417 free_compare(&cmp);
1420 #ifdef TARGET_SPARC64
1421 // Inverted logic
1422 static const int gen_tcg_cond_reg[8] = {
1424 TCG_COND_NE,
1425 TCG_COND_GT,
1426 TCG_COND_GE,
1428 TCG_COND_EQ,
1429 TCG_COND_LE,
1430 TCG_COND_LT,
1433 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1435 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1436 cmp->is_bool = false;
1437 cmp->g1 = true;
1438 cmp->g2 = false;
1439 cmp->c1 = r_src;
1440 cmp->c2 = tcg_const_tl(0);
1443 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1445 DisasCompare cmp;
1446 gen_compare_reg(&cmp, cond, r_src);
1448 /* The interface is to return a boolean in r_dst. */
1449 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1451 free_compare(&cmp);
1453 #endif
1455 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1457 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1458 target_ulong target = dc->pc + offset;
1460 #ifdef TARGET_SPARC64
1461 if (unlikely(AM_CHECK(dc))) {
1462 target &= 0xffffffffULL;
1464 #endif
1465 if (cond == 0x0) {
1466 /* unconditional not taken */
1467 if (a) {
1468 dc->pc = dc->npc + 4;
1469 dc->npc = dc->pc + 4;
1470 } else {
1471 dc->pc = dc->npc;
1472 dc->npc = dc->pc + 4;
1474 } else if (cond == 0x8) {
1475 /* unconditional taken */
1476 if (a) {
1477 dc->pc = target;
1478 dc->npc = dc->pc + 4;
1479 } else {
1480 dc->pc = dc->npc;
1481 dc->npc = target;
1482 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1484 } else {
1485 flush_cond(dc);
1486 gen_cond(cpu_cond, cc, cond, dc);
1487 if (a) {
1488 gen_branch_a(dc, target);
1489 } else {
1490 gen_branch_n(dc, target);
1495 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1497 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1498 target_ulong target = dc->pc + offset;
1500 #ifdef TARGET_SPARC64
1501 if (unlikely(AM_CHECK(dc))) {
1502 target &= 0xffffffffULL;
1504 #endif
1505 if (cond == 0x0) {
1506 /* unconditional not taken */
1507 if (a) {
1508 dc->pc = dc->npc + 4;
1509 dc->npc = dc->pc + 4;
1510 } else {
1511 dc->pc = dc->npc;
1512 dc->npc = dc->pc + 4;
1514 } else if (cond == 0x8) {
1515 /* unconditional taken */
1516 if (a) {
1517 dc->pc = target;
1518 dc->npc = dc->pc + 4;
1519 } else {
1520 dc->pc = dc->npc;
1521 dc->npc = target;
1522 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1524 } else {
1525 flush_cond(dc);
1526 gen_fcond(cpu_cond, cc, cond);
1527 if (a) {
1528 gen_branch_a(dc, target);
1529 } else {
1530 gen_branch_n(dc, target);
1535 #ifdef TARGET_SPARC64
1536 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1537 TCGv r_reg)
1539 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1540 target_ulong target = dc->pc + offset;
1542 if (unlikely(AM_CHECK(dc))) {
1543 target &= 0xffffffffULL;
1545 flush_cond(dc);
1546 gen_cond_reg(cpu_cond, cond, r_reg);
1547 if (a) {
1548 gen_branch_a(dc, target);
1549 } else {
1550 gen_branch_n(dc, target);
1554 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1556 switch (fccno) {
1557 case 0:
1558 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1559 break;
1560 case 1:
1561 gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1562 break;
1563 case 2:
1564 gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1565 break;
1566 case 3:
1567 gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1568 break;
1572 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1574 switch (fccno) {
1575 case 0:
1576 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1577 break;
1578 case 1:
1579 gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1580 break;
1581 case 2:
1582 gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1583 break;
1584 case 3:
1585 gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1586 break;
1590 static inline void gen_op_fcmpq(int fccno)
1592 switch (fccno) {
1593 case 0:
1594 gen_helper_fcmpq(cpu_fsr, cpu_env);
1595 break;
1596 case 1:
1597 gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1598 break;
1599 case 2:
1600 gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1601 break;
1602 case 3:
1603 gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1604 break;
1608 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1610 switch (fccno) {
1611 case 0:
1612 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1613 break;
1614 case 1:
1615 gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1616 break;
1617 case 2:
1618 gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1619 break;
1620 case 3:
1621 gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1622 break;
1626 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1628 switch (fccno) {
1629 case 0:
1630 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1631 break;
1632 case 1:
1633 gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1634 break;
1635 case 2:
1636 gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1637 break;
1638 case 3:
1639 gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1640 break;
1644 static inline void gen_op_fcmpeq(int fccno)
1646 switch (fccno) {
1647 case 0:
1648 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1649 break;
1650 case 1:
1651 gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1652 break;
1653 case 2:
1654 gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1655 break;
1656 case 3:
1657 gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1658 break;
1662 #else
1664 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1666 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1669 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1671 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1674 static inline void gen_op_fcmpq(int fccno)
1676 gen_helper_fcmpq(cpu_fsr, cpu_env);
1679 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1681 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1684 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1686 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1689 static inline void gen_op_fcmpeq(int fccno)
1691 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1693 #endif
1695 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1697 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1698 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1699 gen_exception(dc, TT_FP_EXCP);
1702 static int gen_trap_ifnofpu(DisasContext *dc)
1704 #if !defined(CONFIG_USER_ONLY)
1705 if (!dc->fpu_enabled) {
1706 gen_exception(dc, TT_NFPU_INSN);
1707 return 1;
1709 #endif
1710 return 0;
1713 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1715 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1718 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1719 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1721 TCGv_i32 dst, src;
1723 src = gen_load_fpr_F(dc, rs);
1724 dst = gen_dest_fpr_F(dc);
1726 gen(dst, cpu_env, src);
1727 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1729 gen_store_fpr_F(dc, rd, dst);
1732 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1733 void (*gen)(TCGv_i32, TCGv_i32))
1735 TCGv_i32 dst, src;
1737 src = gen_load_fpr_F(dc, rs);
1738 dst = gen_dest_fpr_F(dc);
1740 gen(dst, src);
1742 gen_store_fpr_F(dc, rd, dst);
1745 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1746 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1748 TCGv_i32 dst, src1, src2;
1750 src1 = gen_load_fpr_F(dc, rs1);
1751 src2 = gen_load_fpr_F(dc, rs2);
1752 dst = gen_dest_fpr_F(dc);
1754 gen(dst, cpu_env, src1, src2);
1755 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1757 gen_store_fpr_F(dc, rd, dst);
1760 #ifdef TARGET_SPARC64
1761 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1762 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1764 TCGv_i32 dst, src1, src2;
1766 src1 = gen_load_fpr_F(dc, rs1);
1767 src2 = gen_load_fpr_F(dc, rs2);
1768 dst = gen_dest_fpr_F(dc);
1770 gen(dst, src1, src2);
1772 gen_store_fpr_F(dc, rd, dst);
1774 #endif
1776 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1777 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1779 TCGv_i64 dst, src;
1781 src = gen_load_fpr_D(dc, rs);
1782 dst = gen_dest_fpr_D(dc, rd);
1784 gen(dst, cpu_env, src);
1785 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1787 gen_store_fpr_D(dc, rd, dst);
1790 #ifdef TARGET_SPARC64
1791 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1792 void (*gen)(TCGv_i64, TCGv_i64))
1794 TCGv_i64 dst, src;
1796 src = gen_load_fpr_D(dc, rs);
1797 dst = gen_dest_fpr_D(dc, rd);
1799 gen(dst, src);
1801 gen_store_fpr_D(dc, rd, dst);
1803 #endif
1805 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1806 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1808 TCGv_i64 dst, src1, src2;
1810 src1 = gen_load_fpr_D(dc, rs1);
1811 src2 = gen_load_fpr_D(dc, rs2);
1812 dst = gen_dest_fpr_D(dc, rd);
1814 gen(dst, cpu_env, src1, src2);
1815 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1817 gen_store_fpr_D(dc, rd, dst);
1820 #ifdef TARGET_SPARC64
1821 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1822 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1824 TCGv_i64 dst, src1, src2;
1826 src1 = gen_load_fpr_D(dc, rs1);
1827 src2 = gen_load_fpr_D(dc, rs2);
1828 dst = gen_dest_fpr_D(dc, rd);
1830 gen(dst, src1, src2);
1832 gen_store_fpr_D(dc, rd, dst);
1835 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1836 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1838 TCGv_i64 dst, src1, src2;
1840 src1 = gen_load_fpr_D(dc, rs1);
1841 src2 = gen_load_fpr_D(dc, rs2);
1842 dst = gen_dest_fpr_D(dc, rd);
1844 gen(dst, cpu_gsr, src1, src2);
1846 gen_store_fpr_D(dc, rd, dst);
1849 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1850 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1852 TCGv_i64 dst, src0, src1, src2;
1854 src1 = gen_load_fpr_D(dc, rs1);
1855 src2 = gen_load_fpr_D(dc, rs2);
1856 src0 = gen_load_fpr_D(dc, rd);
1857 dst = gen_dest_fpr_D(dc, rd);
1859 gen(dst, src0, src1, src2);
1861 gen_store_fpr_D(dc, rd, dst);
1863 #endif
1865 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1866 void (*gen)(TCGv_ptr))
1868 gen_op_load_fpr_QT1(QFPREG(rs));
1870 gen(cpu_env);
1871 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1873 gen_op_store_QT0_fpr(QFPREG(rd));
1874 gen_update_fprs_dirty(dc, QFPREG(rd));
1877 #ifdef TARGET_SPARC64
1878 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1879 void (*gen)(TCGv_ptr))
1881 gen_op_load_fpr_QT1(QFPREG(rs));
1883 gen(cpu_env);
1885 gen_op_store_QT0_fpr(QFPREG(rd));
1886 gen_update_fprs_dirty(dc, QFPREG(rd));
1888 #endif
1890 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1891 void (*gen)(TCGv_ptr))
1893 gen_op_load_fpr_QT0(QFPREG(rs1));
1894 gen_op_load_fpr_QT1(QFPREG(rs2));
1896 gen(cpu_env);
1897 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1899 gen_op_store_QT0_fpr(QFPREG(rd));
1900 gen_update_fprs_dirty(dc, QFPREG(rd));
1903 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1904 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1906 TCGv_i64 dst;
1907 TCGv_i32 src1, src2;
1909 src1 = gen_load_fpr_F(dc, rs1);
1910 src2 = gen_load_fpr_F(dc, rs2);
1911 dst = gen_dest_fpr_D(dc, rd);
1913 gen(dst, cpu_env, src1, src2);
1914 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1916 gen_store_fpr_D(dc, rd, dst);
1919 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1920 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1922 TCGv_i64 src1, src2;
1924 src1 = gen_load_fpr_D(dc, rs1);
1925 src2 = gen_load_fpr_D(dc, rs2);
1927 gen(cpu_env, src1, src2);
1928 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1930 gen_op_store_QT0_fpr(QFPREG(rd));
1931 gen_update_fprs_dirty(dc, QFPREG(rd));
1934 #ifdef TARGET_SPARC64
1935 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1936 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1938 TCGv_i64 dst;
1939 TCGv_i32 src;
1941 src = gen_load_fpr_F(dc, rs);
1942 dst = gen_dest_fpr_D(dc, rd);
1944 gen(dst, cpu_env, src);
1945 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1947 gen_store_fpr_D(dc, rd, dst);
1949 #endif
1951 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1952 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1954 TCGv_i64 dst;
1955 TCGv_i32 src;
1957 src = gen_load_fpr_F(dc, rs);
1958 dst = gen_dest_fpr_D(dc, rd);
1960 gen(dst, cpu_env, src);
1962 gen_store_fpr_D(dc, rd, dst);
1965 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1966 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1968 TCGv_i32 dst;
1969 TCGv_i64 src;
1971 src = gen_load_fpr_D(dc, rs);
1972 dst = gen_dest_fpr_F(dc);
1974 gen(dst, cpu_env, src);
1975 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1977 gen_store_fpr_F(dc, rd, dst);
1980 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1981 void (*gen)(TCGv_i32, TCGv_ptr))
1983 TCGv_i32 dst;
1985 gen_op_load_fpr_QT1(QFPREG(rs));
1986 dst = gen_dest_fpr_F(dc);
1988 gen(dst, cpu_env);
1989 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1991 gen_store_fpr_F(dc, rd, dst);
1994 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1995 void (*gen)(TCGv_i64, TCGv_ptr))
1997 TCGv_i64 dst;
1999 gen_op_load_fpr_QT1(QFPREG(rs));
2000 dst = gen_dest_fpr_D(dc, rd);
2002 gen(dst, cpu_env);
2003 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
2005 gen_store_fpr_D(dc, rd, dst);
2008 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
2009 void (*gen)(TCGv_ptr, TCGv_i32))
2011 TCGv_i32 src;
2013 src = gen_load_fpr_F(dc, rs);
2015 gen(cpu_env, src);
2017 gen_op_store_QT0_fpr(QFPREG(rd));
2018 gen_update_fprs_dirty(dc, QFPREG(rd));
2021 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
2022 void (*gen)(TCGv_ptr, TCGv_i64))
2024 TCGv_i64 src;
2026 src = gen_load_fpr_D(dc, rs);
2028 gen(cpu_env, src);
2030 gen_op_store_QT0_fpr(QFPREG(rd));
2031 gen_update_fprs_dirty(dc, QFPREG(rd));
2034 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
2035 TCGv addr, int mmu_idx, TCGMemOp memop)
2037 gen_address_mask(dc, addr);
2038 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
2041 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
2043 TCGv m1 = tcg_const_tl(0xff);
2044 gen_address_mask(dc, addr);
2045 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
2046 tcg_temp_free(m1);
2049 /* asi moves */
2050 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2051 typedef enum {
2052 GET_ASI_HELPER,
2053 GET_ASI_EXCP,
2054 GET_ASI_DIRECT,
2055 GET_ASI_DTWINX,
2056 GET_ASI_BLOCK,
2057 GET_ASI_SHORT,
2058 GET_ASI_BCOPY,
2059 GET_ASI_BFILL,
2060 } ASIType;
2062 typedef struct {
2063 ASIType type;
2064 int asi;
2065 int mem_idx;
2066 TCGMemOp memop;
2067 } DisasASI;
2069 static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
2071 int asi = GET_FIELD(insn, 19, 26);
2072 ASIType type = GET_ASI_HELPER;
2073 int mem_idx = dc->mem_idx;
2075 #ifndef TARGET_SPARC64
2076 /* Before v9, all asis are immediate and privileged. */
2077 if (IS_IMM) {
2078 gen_exception(dc, TT_ILL_INSN);
2079 type = GET_ASI_EXCP;
2080 } else if (supervisor(dc)
2081 /* Note that LEON accepts ASI_USERDATA in user mode, for
2082 use with CASA. Also note that previous versions of
2083 QEMU allowed (and old versions of gcc emitted) ASI_P
2084 for LEON, which is incorrect. */
2085 || (asi == ASI_USERDATA
2086 && (dc->def->features & CPU_FEATURE_CASA))) {
2087 switch (asi) {
2088 case ASI_USERDATA: /* User data access */
2089 mem_idx = MMU_USER_IDX;
2090 type = GET_ASI_DIRECT;
2091 break;
2092 case ASI_KERNELDATA: /* Supervisor data access */
2093 mem_idx = MMU_KERNEL_IDX;
2094 type = GET_ASI_DIRECT;
2095 break;
2096 case ASI_M_BYPASS: /* MMU passthrough */
2097 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
2098 mem_idx = MMU_PHYS_IDX;
2099 type = GET_ASI_DIRECT;
2100 break;
2101 case ASI_M_BCOPY: /* Block copy, sta access */
2102 mem_idx = MMU_KERNEL_IDX;
2103 type = GET_ASI_BCOPY;
2104 break;
2105 case ASI_M_BFILL: /* Block fill, stda access */
2106 mem_idx = MMU_KERNEL_IDX;
2107 type = GET_ASI_BFILL;
2108 break;
2110 } else {
2111 gen_exception(dc, TT_PRIV_INSN);
2112 type = GET_ASI_EXCP;
2114 #else
2115 if (IS_IMM) {
2116 asi = dc->asi;
2118 /* With v9, all asis below 0x80 are privileged. */
2119 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2120 down that bit into DisasContext. For the moment that's ok,
2121 since the direct implementations below doesn't have any ASIs
2122 in the restricted [0x30, 0x7f] range, and the check will be
2123 done properly in the helper. */
2124 if (!supervisor(dc) && asi < 0x80) {
2125 gen_exception(dc, TT_PRIV_ACT);
2126 type = GET_ASI_EXCP;
2127 } else {
2128 switch (asi) {
2129 case ASI_REAL: /* Bypass */
2130 case ASI_REAL_IO: /* Bypass, non-cacheable */
2131 case ASI_REAL_L: /* Bypass LE */
2132 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
2133 case ASI_TWINX_REAL: /* Real address, twinx */
2134 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2135 case ASI_QUAD_LDD_PHYS:
2136 case ASI_QUAD_LDD_PHYS_L:
2137 mem_idx = MMU_PHYS_IDX;
2138 break;
2139 case ASI_N: /* Nucleus */
2140 case ASI_NL: /* Nucleus LE */
2141 case ASI_TWINX_N:
2142 case ASI_TWINX_NL:
2143 case ASI_NUCLEUS_QUAD_LDD:
2144 case ASI_NUCLEUS_QUAD_LDD_L:
2145 if (hypervisor(dc)) {
2146 mem_idx = MMU_PHYS_IDX;
2147 } else {
2148 mem_idx = MMU_NUCLEUS_IDX;
2150 break;
2151 case ASI_AIUP: /* As if user primary */
2152 case ASI_AIUPL: /* As if user primary LE */
2153 case ASI_TWINX_AIUP:
2154 case ASI_TWINX_AIUP_L:
2155 case ASI_BLK_AIUP_4V:
2156 case ASI_BLK_AIUP_L_4V:
2157 case ASI_BLK_AIUP:
2158 case ASI_BLK_AIUPL:
2159 mem_idx = MMU_USER_IDX;
2160 break;
2161 case ASI_AIUS: /* As if user secondary */
2162 case ASI_AIUSL: /* As if user secondary LE */
2163 case ASI_TWINX_AIUS:
2164 case ASI_TWINX_AIUS_L:
2165 case ASI_BLK_AIUS_4V:
2166 case ASI_BLK_AIUS_L_4V:
2167 case ASI_BLK_AIUS:
2168 case ASI_BLK_AIUSL:
2169 mem_idx = MMU_USER_SECONDARY_IDX;
2170 break;
2171 case ASI_S: /* Secondary */
2172 case ASI_SL: /* Secondary LE */
2173 case ASI_TWINX_S:
2174 case ASI_TWINX_SL:
2175 case ASI_BLK_COMMIT_S:
2176 case ASI_BLK_S:
2177 case ASI_BLK_SL:
2178 case ASI_FL8_S:
2179 case ASI_FL8_SL:
2180 case ASI_FL16_S:
2181 case ASI_FL16_SL:
2182 if (mem_idx == MMU_USER_IDX) {
2183 mem_idx = MMU_USER_SECONDARY_IDX;
2184 } else if (mem_idx == MMU_KERNEL_IDX) {
2185 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2187 break;
2188 case ASI_P: /* Primary */
2189 case ASI_PL: /* Primary LE */
2190 case ASI_TWINX_P:
2191 case ASI_TWINX_PL:
2192 case ASI_BLK_COMMIT_P:
2193 case ASI_BLK_P:
2194 case ASI_BLK_PL:
2195 case ASI_FL8_P:
2196 case ASI_FL8_PL:
2197 case ASI_FL16_P:
2198 case ASI_FL16_PL:
2199 break;
2201 switch (asi) {
2202 case ASI_REAL:
2203 case ASI_REAL_IO:
2204 case ASI_REAL_L:
2205 case ASI_REAL_IO_L:
2206 case ASI_N:
2207 case ASI_NL:
2208 case ASI_AIUP:
2209 case ASI_AIUPL:
2210 case ASI_AIUS:
2211 case ASI_AIUSL:
2212 case ASI_S:
2213 case ASI_SL:
2214 case ASI_P:
2215 case ASI_PL:
2216 type = GET_ASI_DIRECT;
2217 break;
2218 case ASI_TWINX_REAL:
2219 case ASI_TWINX_REAL_L:
2220 case ASI_TWINX_N:
2221 case ASI_TWINX_NL:
2222 case ASI_TWINX_AIUP:
2223 case ASI_TWINX_AIUP_L:
2224 case ASI_TWINX_AIUS:
2225 case ASI_TWINX_AIUS_L:
2226 case ASI_TWINX_P:
2227 case ASI_TWINX_PL:
2228 case ASI_TWINX_S:
2229 case ASI_TWINX_SL:
2230 case ASI_QUAD_LDD_PHYS:
2231 case ASI_QUAD_LDD_PHYS_L:
2232 case ASI_NUCLEUS_QUAD_LDD:
2233 case ASI_NUCLEUS_QUAD_LDD_L:
2234 type = GET_ASI_DTWINX;
2235 break;
2236 case ASI_BLK_COMMIT_P:
2237 case ASI_BLK_COMMIT_S:
2238 case ASI_BLK_AIUP_4V:
2239 case ASI_BLK_AIUP_L_4V:
2240 case ASI_BLK_AIUP:
2241 case ASI_BLK_AIUPL:
2242 case ASI_BLK_AIUS_4V:
2243 case ASI_BLK_AIUS_L_4V:
2244 case ASI_BLK_AIUS:
2245 case ASI_BLK_AIUSL:
2246 case ASI_BLK_S:
2247 case ASI_BLK_SL:
2248 case ASI_BLK_P:
2249 case ASI_BLK_PL:
2250 type = GET_ASI_BLOCK;
2251 break;
2252 case ASI_FL8_S:
2253 case ASI_FL8_SL:
2254 case ASI_FL8_P:
2255 case ASI_FL8_PL:
2256 memop = MO_UB;
2257 type = GET_ASI_SHORT;
2258 break;
2259 case ASI_FL16_S:
2260 case ASI_FL16_SL:
2261 case ASI_FL16_P:
2262 case ASI_FL16_PL:
2263 memop = MO_TEUW;
2264 type = GET_ASI_SHORT;
2265 break;
2267 /* The little-endian asis all have bit 3 set. */
2268 if (asi & 8) {
2269 memop ^= MO_BSWAP;
2272 #endif
2274 return (DisasASI){ type, asi, mem_idx, memop };
2277 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2278 int insn, TCGMemOp memop)
2280 DisasASI da = get_asi(dc, insn, memop);
2282 switch (da.type) {
2283 case GET_ASI_EXCP:
2284 break;
2285 case GET_ASI_DTWINX: /* Reserved for ldda. */
2286 gen_exception(dc, TT_ILL_INSN);
2287 break;
2288 case GET_ASI_DIRECT:
2289 gen_address_mask(dc, addr);
2290 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
2291 break;
2292 default:
2294 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2295 TCGv_i32 r_mop = tcg_const_i32(memop);
2297 save_state(dc);
2298 #ifdef TARGET_SPARC64
2299 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2300 #else
2302 TCGv_i64 t64 = tcg_temp_new_i64();
2303 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2304 tcg_gen_trunc_i64_tl(dst, t64);
2305 tcg_temp_free_i64(t64);
2307 #endif
2308 tcg_temp_free_i32(r_mop);
2309 tcg_temp_free_i32(r_asi);
2311 break;
2315 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2316 int insn, TCGMemOp memop)
2318 DisasASI da = get_asi(dc, insn, memop);
2320 switch (da.type) {
2321 case GET_ASI_EXCP:
2322 break;
2323 case GET_ASI_DTWINX: /* Reserved for stda. */
2324 #ifndef TARGET_SPARC64
2325 gen_exception(dc, TT_ILL_INSN);
2326 break;
2327 #else
2328 if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2329 /* Pre OpenSPARC CPUs don't have these */
2330 gen_exception(dc, TT_ILL_INSN);
2331 return;
2333 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2334 * are ST_BLKINIT_ ASIs */
2335 /* fall through */
2336 #endif
2337 case GET_ASI_DIRECT:
2338 gen_address_mask(dc, addr);
2339 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
2340 break;
2341 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2342 case GET_ASI_BCOPY:
2343 /* Copy 32 bytes from the address in SRC to ADDR. */
2344 /* ??? The original qemu code suggests 4-byte alignment, dropping
2345 the low bits, but the only place I can see this used is in the
2346 Linux kernel with 32 byte alignment, which would make more sense
2347 as a cacheline-style operation. */
2349 TCGv saddr = tcg_temp_new();
2350 TCGv daddr = tcg_temp_new();
2351 TCGv four = tcg_const_tl(4);
2352 TCGv_i32 tmp = tcg_temp_new_i32();
2353 int i;
2355 tcg_gen_andi_tl(saddr, src, -4);
2356 tcg_gen_andi_tl(daddr, addr, -4);
2357 for (i = 0; i < 32; i += 4) {
2358 /* Since the loads and stores are paired, allow the
2359 copy to happen in the host endianness. */
2360 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2361 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2362 tcg_gen_add_tl(saddr, saddr, four);
2363 tcg_gen_add_tl(daddr, daddr, four);
2366 tcg_temp_free(saddr);
2367 tcg_temp_free(daddr);
2368 tcg_temp_free(four);
2369 tcg_temp_free_i32(tmp);
2371 break;
2372 #endif
2373 default:
2375 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2376 TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
2378 save_state(dc);
2379 #ifdef TARGET_SPARC64
2380 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2381 #else
2383 TCGv_i64 t64 = tcg_temp_new_i64();
2384 tcg_gen_extu_tl_i64(t64, src);
2385 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2386 tcg_temp_free_i64(t64);
2388 #endif
2389 tcg_temp_free_i32(r_mop);
2390 tcg_temp_free_i32(r_asi);
2392 /* A write to a TLB register may alter page maps. End the TB. */
2393 dc->npc = DYNAMIC_PC;
2395 break;
2399 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2400 TCGv addr, int insn)
2402 DisasASI da = get_asi(dc, insn, MO_TEUL);
2404 switch (da.type) {
2405 case GET_ASI_EXCP:
2406 break;
2407 case GET_ASI_DIRECT:
2408 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2409 break;
2410 default:
2411 /* ??? Should be DAE_invalid_asi. */
2412 gen_exception(dc, TT_DATA_ACCESS);
2413 break;
2417 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2418 int insn, int rd)
2420 DisasASI da = get_asi(dc, insn, MO_TEUL);
2421 TCGv oldv;
2423 switch (da.type) {
2424 case GET_ASI_EXCP:
2425 return;
2426 case GET_ASI_DIRECT:
2427 oldv = tcg_temp_new();
2428 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2429 da.mem_idx, da.memop);
2430 gen_store_gpr(dc, rd, oldv);
2431 tcg_temp_free(oldv);
2432 break;
2433 default:
2434 /* ??? Should be DAE_invalid_asi. */
2435 gen_exception(dc, TT_DATA_ACCESS);
2436 break;
2440 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2442 DisasASI da = get_asi(dc, insn, MO_UB);
2444 switch (da.type) {
2445 case GET_ASI_EXCP:
2446 break;
2447 case GET_ASI_DIRECT:
2448 gen_ldstub(dc, dst, addr, da.mem_idx);
2449 break;
2450 default:
2451 #if 0
2452 /* ??? Should be DAE_invalid_asi. */
2453 gen_exception(dc, TT_DATA_ACCESS);
2454 #else
2455 fprintf(stderr, "%s:%u - %s, ASIType = %d\n",
2456 __FILE__, __LINE__, __func__, da.type);
2459 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2460 TCGv_i32 r_mop = tcg_const_i32(MO_UB);
2461 TCGv_i64 s64, t64;
2463 save_state(dc);
2464 t64 = tcg_temp_new_i64();
2465 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2467 s64 = tcg_const_i64(0xff);
2468 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
2469 tcg_temp_free_i64(s64);
2470 tcg_temp_free_i32(r_mop);
2471 tcg_temp_free_i32(r_asi);
2473 tcg_gen_trunc_i64_tl(dst, t64);
2474 tcg_temp_free_i64(t64);
2476 #endif
2477 break;
2480 #endif
2482 #ifdef TARGET_SPARC64
2483 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2484 int insn, int size, int rd)
2486 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2487 TCGv_i32 d32;
2488 TCGv_i64 d64;
2490 switch (da.type) {
2491 case GET_ASI_EXCP:
2492 break;
2494 case GET_ASI_DIRECT:
2495 gen_address_mask(dc, addr);
2496 switch (size) {
2497 case 4:
2498 d32 = gen_dest_fpr_F(dc);
2499 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
2500 gen_store_fpr_F(dc, rd, d32);
2501 break;
2502 case 8:
2503 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2504 da.memop | MO_ALIGN_4);
2505 break;
2506 case 16:
2507 d64 = tcg_temp_new_i64();
2508 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2509 tcg_gen_addi_tl(addr, addr, 8);
2510 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2511 da.memop | MO_ALIGN_4);
2512 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2513 tcg_temp_free_i64(d64);
2514 break;
2515 default:
2516 g_assert_not_reached();
2518 break;
2520 case GET_ASI_BLOCK:
2521 /* Valid for lddfa on aligned registers only. */
2522 if (size == 8 && (rd & 7) == 0) {
2523 TCGMemOp memop;
2524 TCGv eight;
2525 int i;
2527 gen_address_mask(dc, addr);
2529 /* The first operation checks required alignment. */
2530 memop = da.memop | MO_ALIGN_64;
2531 eight = tcg_const_tl(8);
2532 for (i = 0; ; ++i) {
2533 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2534 da.mem_idx, memop);
2535 if (i == 7) {
2536 break;
2538 tcg_gen_add_tl(addr, addr, eight);
2539 memop = da.memop;
2541 tcg_temp_free(eight);
2542 } else {
2543 gen_exception(dc, TT_ILL_INSN);
2545 break;
2547 case GET_ASI_SHORT:
2548 /* Valid for lddfa only. */
2549 if (size == 8) {
2550 gen_address_mask(dc, addr);
2551 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2552 } else {
2553 gen_exception(dc, TT_ILL_INSN);
2555 break;
2557 default:
2559 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2560 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2562 save_state(dc);
2563 /* According to the table in the UA2011 manual, the only
2564 other asis that are valid for ldfa/lddfa/ldqfa are
2565 the NO_FAULT asis. We still need a helper for these,
2566 but we can just use the integer asi helper for them. */
2567 switch (size) {
2568 case 4:
2569 d64 = tcg_temp_new_i64();
2570 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2571 d32 = gen_dest_fpr_F(dc);
2572 tcg_gen_extrl_i64_i32(d32, d64);
2573 tcg_temp_free_i64(d64);
2574 gen_store_fpr_F(dc, rd, d32);
2575 break;
2576 case 8:
2577 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2578 break;
2579 case 16:
2580 d64 = tcg_temp_new_i64();
2581 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2582 tcg_gen_addi_tl(addr, addr, 8);
2583 gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2584 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2585 tcg_temp_free_i64(d64);
2586 break;
2587 default:
2588 g_assert_not_reached();
2590 tcg_temp_free_i32(r_mop);
2591 tcg_temp_free_i32(r_asi);
2593 break;
2597 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2598 int insn, int size, int rd)
2600 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2601 TCGv_i32 d32;
2603 switch (da.type) {
2604 case GET_ASI_EXCP:
2605 break;
2607 case GET_ASI_DIRECT:
2608 gen_address_mask(dc, addr);
2609 switch (size) {
2610 case 4:
2611 d32 = gen_load_fpr_F(dc, rd);
2612 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
2613 break;
2614 case 8:
2615 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2616 da.memop | MO_ALIGN_4);
2617 break;
2618 case 16:
2619 /* Only 4-byte alignment required. However, it is legal for the
2620 cpu to signal the alignment fault, and the OS trap handler is
2621 required to fix it up. Requiring 16-byte alignment here avoids
2622 having to probe the second page before performing the first
2623 write. */
2624 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2625 da.memop | MO_ALIGN_16);
2626 tcg_gen_addi_tl(addr, addr, 8);
2627 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2628 break;
2629 default:
2630 g_assert_not_reached();
2632 break;
2634 case GET_ASI_BLOCK:
2635 /* Valid for stdfa on aligned registers only. */
2636 if (size == 8 && (rd & 7) == 0) {
2637 TCGMemOp memop;
2638 TCGv eight;
2639 int i;
2641 gen_address_mask(dc, addr);
2643 /* The first operation checks required alignment. */
2644 memop = da.memop | MO_ALIGN_64;
2645 eight = tcg_const_tl(8);
2646 for (i = 0; ; ++i) {
2647 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2648 da.mem_idx, memop);
2649 if (i == 7) {
2650 break;
2652 tcg_gen_add_tl(addr, addr, eight);
2653 memop = da.memop;
2655 tcg_temp_free(eight);
2656 } else {
2657 gen_exception(dc, TT_ILL_INSN);
2659 break;
2661 case GET_ASI_SHORT:
2662 /* Valid for stdfa only. */
2663 if (size == 8) {
2664 gen_address_mask(dc, addr);
2665 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2666 } else {
2667 gen_exception(dc, TT_ILL_INSN);
2669 break;
2671 default:
2672 /* According to the table in the UA2011 manual, the only
2673 other asis that are valid for ldfa/lddfa/ldqfa are
2674 the PST* asis, which aren't currently handled. */
2675 gen_exception(dc, TT_ILL_INSN);
2676 break;
2680 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2682 DisasASI da = get_asi(dc, insn, MO_TEQ);
2683 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2684 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2686 switch (da.type) {
2687 case GET_ASI_EXCP:
2688 return;
2690 case GET_ASI_DTWINX:
2691 gen_address_mask(dc, addr);
2692 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2693 tcg_gen_addi_tl(addr, addr, 8);
2694 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2695 break;
2697 case GET_ASI_DIRECT:
2699 TCGv_i64 tmp = tcg_temp_new_i64();
2701 gen_address_mask(dc, addr);
2702 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
2704 /* Note that LE ldda acts as if each 32-bit register
2705 result is byte swapped. Having just performed one
2706 64-bit bswap, we need now to swap the writebacks. */
2707 if ((da.memop & MO_BSWAP) == MO_TE) {
2708 tcg_gen_extr32_i64(lo, hi, tmp);
2709 } else {
2710 tcg_gen_extr32_i64(hi, lo, tmp);
2712 tcg_temp_free_i64(tmp);
2714 break;
2716 default:
2717 /* ??? In theory we've handled all of the ASIs that are valid
2718 for ldda, and this should raise DAE_invalid_asi. However,
2719 real hardware allows others. This can be seen with e.g.
2720 FreeBSD 10.3 wrt ASI_IC_TAG. */
2722 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2723 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2724 TCGv_i64 tmp = tcg_temp_new_i64();
2726 save_state(dc);
2727 gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
2728 tcg_temp_free_i32(r_asi);
2729 tcg_temp_free_i32(r_mop);
2731 /* See above. */
2732 if ((da.memop & MO_BSWAP) == MO_TE) {
2733 tcg_gen_extr32_i64(lo, hi, tmp);
2734 } else {
2735 tcg_gen_extr32_i64(hi, lo, tmp);
2737 tcg_temp_free_i64(tmp);
2739 break;
2742 gen_store_gpr(dc, rd, hi);
2743 gen_store_gpr(dc, rd + 1, lo);
2746 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2747 int insn, int rd)
2749 DisasASI da = get_asi(dc, insn, MO_TEQ);
2750 TCGv lo = gen_load_gpr(dc, rd + 1);
2752 switch (da.type) {
2753 case GET_ASI_EXCP:
2754 break;
2756 case GET_ASI_DTWINX:
2757 gen_address_mask(dc, addr);
2758 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2759 tcg_gen_addi_tl(addr, addr, 8);
2760 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2761 break;
2763 case GET_ASI_DIRECT:
2765 TCGv_i64 t64 = tcg_temp_new_i64();
2767 /* Note that LE stda acts as if each 32-bit register result is
2768 byte swapped. We will perform one 64-bit LE store, so now
2769 we must swap the order of the construction. */
2770 if ((da.memop & MO_BSWAP) == MO_TE) {
2771 tcg_gen_concat32_i64(t64, lo, hi);
2772 } else {
2773 tcg_gen_concat32_i64(t64, hi, lo);
2775 gen_address_mask(dc, addr);
2776 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2777 tcg_temp_free_i64(t64);
2779 break;
2781 default:
2782 /* ??? In theory we've handled all of the ASIs that are valid
2783 for stda, and this should raise DAE_invalid_asi. */
2785 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2786 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2787 TCGv_i64 t64 = tcg_temp_new_i64();
2789 /* See above. */
2790 if ((da.memop & MO_BSWAP) == MO_TE) {
2791 tcg_gen_concat32_i64(t64, lo, hi);
2792 } else {
2793 tcg_gen_concat32_i64(t64, hi, lo);
2796 save_state(dc);
2797 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2798 tcg_temp_free_i32(r_mop);
2799 tcg_temp_free_i32(r_asi);
2800 tcg_temp_free_i64(t64);
2802 break;
2806 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2807 int insn, int rd)
2809 DisasASI da = get_asi(dc, insn, MO_TEQ);
2810 TCGv oldv;
2812 switch (da.type) {
2813 case GET_ASI_EXCP:
2814 return;
2815 case GET_ASI_DIRECT:
2816 oldv = tcg_temp_new();
2817 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2818 da.mem_idx, da.memop);
2819 gen_store_gpr(dc, rd, oldv);
2820 tcg_temp_free(oldv);
2821 break;
2822 default:
2823 /* ??? Should be DAE_invalid_asi. */
2824 gen_exception(dc, TT_DATA_ACCESS);
2825 break;
2829 #elif !defined(CONFIG_USER_ONLY)
2830 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2832 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2833 whereby "rd + 1" elicits "error: array subscript is above array".
2834 Since we have already asserted that rd is even, the semantics
2835 are unchanged. */
2836 TCGv lo = gen_dest_gpr(dc, rd | 1);
2837 TCGv hi = gen_dest_gpr(dc, rd);
2838 TCGv_i64 t64 = tcg_temp_new_i64();
2839 DisasASI da = get_asi(dc, insn, MO_TEQ);
2841 switch (da.type) {
2842 case GET_ASI_EXCP:
2843 tcg_temp_free_i64(t64);
2844 return;
2845 case GET_ASI_DIRECT:
2846 gen_address_mask(dc, addr);
2847 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
2848 break;
2849 default:
2851 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2852 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2854 save_state(dc);
2855 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2856 tcg_temp_free_i32(r_mop);
2857 tcg_temp_free_i32(r_asi);
2859 break;
2862 tcg_gen_extr_i64_i32(lo, hi, t64);
2863 tcg_temp_free_i64(t64);
2864 gen_store_gpr(dc, rd | 1, lo);
2865 gen_store_gpr(dc, rd, hi);
2868 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2869 int insn, int rd)
2871 DisasASI da = get_asi(dc, insn, MO_TEQ);
2872 TCGv lo = gen_load_gpr(dc, rd + 1);
2873 TCGv_i64 t64 = tcg_temp_new_i64();
2875 tcg_gen_concat_tl_i64(t64, lo, hi);
2877 switch (da.type) {
2878 case GET_ASI_EXCP:
2879 break;
2880 case GET_ASI_DIRECT:
2881 gen_address_mask(dc, addr);
2882 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2883 break;
2884 case GET_ASI_BFILL:
2885 /* Store 32 bytes of T64 to ADDR. */
2886 /* ??? The original qemu code suggests 8-byte alignment, dropping
2887 the low bits, but the only place I can see this used is in the
2888 Linux kernel with 32 byte alignment, which would make more sense
2889 as a cacheline-style operation. */
2891 TCGv d_addr = tcg_temp_new();
2892 TCGv eight = tcg_const_tl(8);
2893 int i;
2895 tcg_gen_andi_tl(d_addr, addr, -8);
2896 for (i = 0; i < 32; i += 8) {
2897 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2898 tcg_gen_add_tl(d_addr, d_addr, eight);
2901 tcg_temp_free(d_addr);
2902 tcg_temp_free(eight);
2904 break;
2905 default:
2907 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2908 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2910 save_state(dc);
2911 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2912 tcg_temp_free_i32(r_mop);
2913 tcg_temp_free_i32(r_asi);
2915 break;
2918 tcg_temp_free_i64(t64);
2920 #endif
2922 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2924 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2925 return gen_load_gpr(dc, rs1);
2928 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2930 if (IS_IMM) { /* immediate */
2931 target_long simm = GET_FIELDs(insn, 19, 31);
2932 TCGv t = get_temp_tl(dc);
2933 tcg_gen_movi_tl(t, simm);
2934 return t;
2935 } else { /* register */
2936 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2937 return gen_load_gpr(dc, rs2);
2941 #ifdef TARGET_SPARC64
2942 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2944 TCGv_i32 c32, zero, dst, s1, s2;
2946 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2947 or fold the comparison down to 32 bits and use movcond_i32. Choose
2948 the later. */
2949 c32 = tcg_temp_new_i32();
2950 if (cmp->is_bool) {
2951 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2952 } else {
2953 TCGv_i64 c64 = tcg_temp_new_i64();
2954 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2955 tcg_gen_extrl_i64_i32(c32, c64);
2956 tcg_temp_free_i64(c64);
2959 s1 = gen_load_fpr_F(dc, rs);
2960 s2 = gen_load_fpr_F(dc, rd);
2961 dst = gen_dest_fpr_F(dc);
2962 zero = tcg_const_i32(0);
2964 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2966 tcg_temp_free_i32(c32);
2967 tcg_temp_free_i32(zero);
2968 gen_store_fpr_F(dc, rd, dst);
2971 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2973 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2974 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2975 gen_load_fpr_D(dc, rs),
2976 gen_load_fpr_D(dc, rd));
2977 gen_store_fpr_D(dc, rd, dst);
2980 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2982 int qd = QFPREG(rd);
2983 int qs = QFPREG(rs);
2985 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2986 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2987 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2988 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2990 gen_update_fprs_dirty(dc, qd);
2993 #ifndef CONFIG_USER_ONLY
2994 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2996 TCGv_i32 r_tl = tcg_temp_new_i32();
2998 /* load env->tl into r_tl */
2999 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
3001 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
3002 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
3004 /* calculate offset to current trap state from env->ts, reuse r_tl */
3005 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
3006 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
3008 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
3010 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
3011 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
3012 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
3013 tcg_temp_free_ptr(r_tl_tmp);
3016 tcg_temp_free_i32(r_tl);
3018 #endif
3020 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
3021 int width, bool cc, bool left)
3023 TCGv lo1, lo2, t1, t2;
3024 uint64_t amask, tabl, tabr;
3025 int shift, imask, omask;
3027 if (cc) {
3028 tcg_gen_mov_tl(cpu_cc_src, s1);
3029 tcg_gen_mov_tl(cpu_cc_src2, s2);
3030 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
3031 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3032 dc->cc_op = CC_OP_SUB;
3035 /* Theory of operation: there are two tables, left and right (not to
3036 be confused with the left and right versions of the opcode). These
3037 are indexed by the low 3 bits of the inputs. To make things "easy",
3038 these tables are loaded into two constants, TABL and TABR below.
3039 The operation index = (input & imask) << shift calculates the index
3040 into the constant, while val = (table >> index) & omask calculates
3041 the value we're looking for. */
3042 switch (width) {
3043 case 8:
3044 imask = 0x7;
3045 shift = 3;
3046 omask = 0xff;
3047 if (left) {
3048 tabl = 0x80c0e0f0f8fcfeffULL;
3049 tabr = 0xff7f3f1f0f070301ULL;
3050 } else {
3051 tabl = 0x0103070f1f3f7fffULL;
3052 tabr = 0xfffefcf8f0e0c080ULL;
3054 break;
3055 case 16:
3056 imask = 0x6;
3057 shift = 1;
3058 omask = 0xf;
3059 if (left) {
3060 tabl = 0x8cef;
3061 tabr = 0xf731;
3062 } else {
3063 tabl = 0x137f;
3064 tabr = 0xfec8;
3066 break;
3067 case 32:
3068 imask = 0x4;
3069 shift = 0;
3070 omask = 0x3;
3071 if (left) {
3072 tabl = (2 << 2) | 3;
3073 tabr = (3 << 2) | 1;
3074 } else {
3075 tabl = (1 << 2) | 3;
3076 tabr = (3 << 2) | 2;
3078 break;
3079 default:
3080 abort();
3083 lo1 = tcg_temp_new();
3084 lo2 = tcg_temp_new();
3085 tcg_gen_andi_tl(lo1, s1, imask);
3086 tcg_gen_andi_tl(lo2, s2, imask);
3087 tcg_gen_shli_tl(lo1, lo1, shift);
3088 tcg_gen_shli_tl(lo2, lo2, shift);
3090 t1 = tcg_const_tl(tabl);
3091 t2 = tcg_const_tl(tabr);
3092 tcg_gen_shr_tl(lo1, t1, lo1);
3093 tcg_gen_shr_tl(lo2, t2, lo2);
3094 tcg_gen_andi_tl(dst, lo1, omask);
3095 tcg_gen_andi_tl(lo2, lo2, omask);
3097 amask = -8;
3098 if (AM_CHECK(dc)) {
3099 amask &= 0xffffffffULL;
3101 tcg_gen_andi_tl(s1, s1, amask);
3102 tcg_gen_andi_tl(s2, s2, amask);
3104 /* We want to compute
3105 dst = (s1 == s2 ? lo1 : lo1 & lo2).
3106 We've already done dst = lo1, so this reduces to
3107 dst &= (s1 == s2 ? -1 : lo2)
3108 Which we perform by
3109 lo2 |= -(s1 == s2)
3110 dst &= lo2
3112 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
3113 tcg_gen_neg_tl(t1, t1);
3114 tcg_gen_or_tl(lo2, lo2, t1);
3115 tcg_gen_and_tl(dst, dst, lo2);
3117 tcg_temp_free(lo1);
3118 tcg_temp_free(lo2);
3119 tcg_temp_free(t1);
3120 tcg_temp_free(t2);
3123 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
3125 TCGv tmp = tcg_temp_new();
3127 tcg_gen_add_tl(tmp, s1, s2);
3128 tcg_gen_andi_tl(dst, tmp, -8);
3129 if (left) {
3130 tcg_gen_neg_tl(tmp, tmp);
3132 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3134 tcg_temp_free(tmp);
3137 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
3139 TCGv t1, t2, shift;
3141 t1 = tcg_temp_new();
3142 t2 = tcg_temp_new();
3143 shift = tcg_temp_new();
3145 tcg_gen_andi_tl(shift, gsr, 7);
3146 tcg_gen_shli_tl(shift, shift, 3);
3147 tcg_gen_shl_tl(t1, s1, shift);
3149 /* A shift of 64 does not produce 0 in TCG. Divide this into a
3150 shift of (up to 63) followed by a constant shift of 1. */
3151 tcg_gen_xori_tl(shift, shift, 63);
3152 tcg_gen_shr_tl(t2, s2, shift);
3153 tcg_gen_shri_tl(t2, t2, 1);
3155 tcg_gen_or_tl(dst, t1, t2);
3157 tcg_temp_free(t1);
3158 tcg_temp_free(t2);
3159 tcg_temp_free(shift);
3161 #endif
3163 #define CHECK_IU_FEATURE(dc, FEATURE) \
3164 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3165 goto illegal_insn;
3166 #define CHECK_FPU_FEATURE(dc, FEATURE) \
3167 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3168 goto nfpu_insn;
3170 /* before an instruction, dc->pc must be static */
3171 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
3173 unsigned int opc, rs1, rs2, rd;
3174 TCGv cpu_src1, cpu_src2;
3175 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
3176 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
3177 target_long simm;
3179 opc = GET_FIELD(insn, 0, 1);
3180 rd = GET_FIELD(insn, 2, 6);
3182 switch (opc) {
3183 case 0: /* branches/sethi */
3185 unsigned int xop = GET_FIELD(insn, 7, 9);
3186 int32_t target;
3187 switch (xop) {
3188 #ifdef TARGET_SPARC64
3189 case 0x1: /* V9 BPcc */
3191 int cc;
3193 target = GET_FIELD_SP(insn, 0, 18);
3194 target = sign_extend(target, 19);
3195 target <<= 2;
3196 cc = GET_FIELD_SP(insn, 20, 21);
3197 if (cc == 0)
3198 do_branch(dc, target, insn, 0);
3199 else if (cc == 2)
3200 do_branch(dc, target, insn, 1);
3201 else
3202 goto illegal_insn;
3203 goto jmp_insn;
3205 case 0x3: /* V9 BPr */
3207 target = GET_FIELD_SP(insn, 0, 13) |
3208 (GET_FIELD_SP(insn, 20, 21) << 14);
3209 target = sign_extend(target, 16);
3210 target <<= 2;
3211 cpu_src1 = get_src1(dc, insn);
3212 do_branch_reg(dc, target, insn, cpu_src1);
3213 goto jmp_insn;
3215 case 0x5: /* V9 FBPcc */
3217 int cc = GET_FIELD_SP(insn, 20, 21);
3218 if (gen_trap_ifnofpu(dc)) {
3219 goto jmp_insn;
3221 target = GET_FIELD_SP(insn, 0, 18);
3222 target = sign_extend(target, 19);
3223 target <<= 2;
3224 do_fbranch(dc, target, insn, cc);
3225 goto jmp_insn;
3227 #else
3228 case 0x7: /* CBN+x */
3230 goto ncp_insn;
3232 #endif
3233 case 0x2: /* BN+x */
3235 target = GET_FIELD(insn, 10, 31);
3236 target = sign_extend(target, 22);
3237 target <<= 2;
3238 do_branch(dc, target, insn, 0);
3239 goto jmp_insn;
3241 case 0x6: /* FBN+x */
3243 if (gen_trap_ifnofpu(dc)) {
3244 goto jmp_insn;
3246 target = GET_FIELD(insn, 10, 31);
3247 target = sign_extend(target, 22);
3248 target <<= 2;
3249 do_fbranch(dc, target, insn, 0);
3250 goto jmp_insn;
3252 case 0x4: /* SETHI */
3253 /* Special-case %g0 because that's the canonical nop. */
3254 if (rd) {
3255 uint32_t value = GET_FIELD(insn, 10, 31);
3256 TCGv t = gen_dest_gpr(dc, rd);
3257 tcg_gen_movi_tl(t, value << 10);
3258 gen_store_gpr(dc, rd, t);
3260 break;
3261 case 0x0: /* UNIMPL */
3262 default:
3263 goto illegal_insn;
3265 break;
3267 break;
3268 case 1: /*CALL*/
3270 target_long target = GET_FIELDs(insn, 2, 31) << 2;
3271 TCGv o7 = gen_dest_gpr(dc, 15);
3273 tcg_gen_movi_tl(o7, dc->pc);
3274 gen_store_gpr(dc, 15, o7);
3275 target += dc->pc;
3276 gen_mov_pc_npc(dc);
3277 #ifdef TARGET_SPARC64
3278 if (unlikely(AM_CHECK(dc))) {
3279 target &= 0xffffffffULL;
3281 #endif
3282 dc->npc = target;
3284 goto jmp_insn;
3285 case 2: /* FPU & Logical Operations */
3287 unsigned int xop = GET_FIELD(insn, 7, 12);
3288 TCGv cpu_dst = get_temp_tl(dc);
3289 TCGv cpu_tmp0;
3291 if (xop == 0x3a) { /* generate trap */
3292 int cond = GET_FIELD(insn, 3, 6);
3293 TCGv_i32 trap;
3294 TCGLabel *l1 = NULL;
3295 int mask;
3297 if (cond == 0) {
3298 /* Trap never. */
3299 break;
3302 save_state(dc);
3304 if (cond != 8) {
3305 /* Conditional trap. */
3306 DisasCompare cmp;
3307 #ifdef TARGET_SPARC64
3308 /* V9 icc/xcc */
3309 int cc = GET_FIELD_SP(insn, 11, 12);
3310 if (cc == 0) {
3311 gen_compare(&cmp, 0, cond, dc);
3312 } else if (cc == 2) {
3313 gen_compare(&cmp, 1, cond, dc);
3314 } else {
3315 goto illegal_insn;
3317 #else
3318 gen_compare(&cmp, 0, cond, dc);
3319 #endif
3320 l1 = gen_new_label();
3321 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3322 cmp.c1, cmp.c2, l1);
3323 free_compare(&cmp);
3326 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3327 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3329 /* Don't use the normal temporaries, as they may well have
3330 gone out of scope with the branch above. While we're
3331 doing that we might as well pre-truncate to 32-bit. */
3332 trap = tcg_temp_new_i32();
3334 rs1 = GET_FIELD_SP(insn, 14, 18);
3335 if (IS_IMM) {
3336 rs2 = GET_FIELD_SP(insn, 0, 7);
3337 if (rs1 == 0) {
3338 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3339 /* Signal that the trap value is fully constant. */
3340 mask = 0;
3341 } else {
3342 TCGv t1 = gen_load_gpr(dc, rs1);
3343 tcg_gen_trunc_tl_i32(trap, t1);
3344 tcg_gen_addi_i32(trap, trap, rs2);
3346 } else {
3347 TCGv t1, t2;
3348 rs2 = GET_FIELD_SP(insn, 0, 4);
3349 t1 = gen_load_gpr(dc, rs1);
3350 t2 = gen_load_gpr(dc, rs2);
3351 tcg_gen_add_tl(t1, t1, t2);
3352 tcg_gen_trunc_tl_i32(trap, t1);
3354 if (mask != 0) {
3355 tcg_gen_andi_i32(trap, trap, mask);
3356 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3359 gen_helper_raise_exception(cpu_env, trap);
3360 tcg_temp_free_i32(trap);
3362 if (cond == 8) {
3363 /* An unconditional trap ends the TB. */
3364 dc->is_br = 1;
3365 goto jmp_insn;
3366 } else {
3367 /* A conditional trap falls through to the next insn. */
3368 gen_set_label(l1);
3369 break;
3371 } else if (xop == 0x28) {
3372 rs1 = GET_FIELD(insn, 13, 17);
3373 switch(rs1) {
3374 case 0: /* rdy */
3375 #ifndef TARGET_SPARC64
3376 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3377 manual, rdy on the microSPARC
3378 II */
3379 case 0x0f: /* stbar in the SPARCv8 manual,
3380 rdy on the microSPARC II */
3381 case 0x10 ... 0x1f: /* implementation-dependent in the
3382 SPARCv8 manual, rdy on the
3383 microSPARC II */
3384 /* Read Asr17 */
3385 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3386 TCGv t = gen_dest_gpr(dc, rd);
3387 /* Read Asr17 for a Leon3 monoprocessor */
3388 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3389 gen_store_gpr(dc, rd, t);
3390 break;
3392 #endif
3393 gen_store_gpr(dc, rd, cpu_y);
3394 break;
3395 #ifdef TARGET_SPARC64
3396 case 0x2: /* V9 rdccr */
3397 update_psr(dc);
3398 gen_helper_rdccr(cpu_dst, cpu_env);
3399 gen_store_gpr(dc, rd, cpu_dst);
3400 break;
3401 case 0x3: /* V9 rdasi */
3402 tcg_gen_movi_tl(cpu_dst, dc->asi);
3403 gen_store_gpr(dc, rd, cpu_dst);
3404 break;
3405 case 0x4: /* V9 rdtick */
3407 TCGv_ptr r_tickptr;
3408 TCGv_i32 r_const;
3410 r_tickptr = tcg_temp_new_ptr();
3411 r_const = tcg_const_i32(dc->mem_idx);
3412 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3413 offsetof(CPUSPARCState, tick));
3414 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3415 r_const);
3416 tcg_temp_free_ptr(r_tickptr);
3417 tcg_temp_free_i32(r_const);
3418 gen_store_gpr(dc, rd, cpu_dst);
3420 break;
3421 case 0x5: /* V9 rdpc */
3423 TCGv t = gen_dest_gpr(dc, rd);
3424 if (unlikely(AM_CHECK(dc))) {
3425 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3426 } else {
3427 tcg_gen_movi_tl(t, dc->pc);
3429 gen_store_gpr(dc, rd, t);
3431 break;
3432 case 0x6: /* V9 rdfprs */
3433 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3434 gen_store_gpr(dc, rd, cpu_dst);
3435 break;
3436 case 0xf: /* V9 membar */
3437 break; /* no effect */
3438 case 0x13: /* Graphics Status */
3439 if (gen_trap_ifnofpu(dc)) {
3440 goto jmp_insn;
3442 gen_store_gpr(dc, rd, cpu_gsr);
3443 break;
3444 case 0x16: /* Softint */
3445 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3446 offsetof(CPUSPARCState, softint));
3447 gen_store_gpr(dc, rd, cpu_dst);
3448 break;
3449 case 0x17: /* Tick compare */
3450 gen_store_gpr(dc, rd, cpu_tick_cmpr);
3451 break;
3452 case 0x18: /* System tick */
3454 TCGv_ptr r_tickptr;
3455 TCGv_i32 r_const;
3457 r_tickptr = tcg_temp_new_ptr();
3458 r_const = tcg_const_i32(dc->mem_idx);
3459 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3460 offsetof(CPUSPARCState, stick));
3461 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3462 r_const);
3463 tcg_temp_free_ptr(r_tickptr);
3464 tcg_temp_free_i32(r_const);
3465 gen_store_gpr(dc, rd, cpu_dst);
3467 break;
3468 case 0x19: /* System tick compare */
3469 gen_store_gpr(dc, rd, cpu_stick_cmpr);
3470 break;
3471 case 0x1a: /* UltraSPARC-T1 Strand status */
3472 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3473 * this ASR as impl. dep
3475 CHECK_IU_FEATURE(dc, HYPV);
3477 TCGv t = gen_dest_gpr(dc, rd);
3478 tcg_gen_movi_tl(t, 1UL);
3479 gen_store_gpr(dc, rd, t);
3481 break;
3482 case 0x10: /* Performance Control */
3483 case 0x11: /* Performance Instrumentation Counter */
3484 case 0x12: /* Dispatch Control */
3485 case 0x14: /* Softint set, WO */
3486 case 0x15: /* Softint clear, WO */
3487 #endif
3488 default:
3489 goto illegal_insn;
3491 #if !defined(CONFIG_USER_ONLY)
3492 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3493 #ifndef TARGET_SPARC64
3494 if (!supervisor(dc)) {
3495 goto priv_insn;
3497 update_psr(dc);
3498 gen_helper_rdpsr(cpu_dst, cpu_env);
3499 #else
3500 CHECK_IU_FEATURE(dc, HYPV);
3501 if (!hypervisor(dc))
3502 goto priv_insn;
3503 rs1 = GET_FIELD(insn, 13, 17);
3504 switch (rs1) {
3505 case 0: // hpstate
3506 tcg_gen_ld_i64(cpu_dst, cpu_env,
3507 offsetof(CPUSPARCState, hpstate));
3508 break;
3509 case 1: // htstate
3510 // gen_op_rdhtstate();
3511 break;
3512 case 3: // hintp
3513 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3514 break;
3515 case 5: // htba
3516 tcg_gen_mov_tl(cpu_dst, cpu_htba);
3517 break;
3518 case 6: // hver
3519 tcg_gen_mov_tl(cpu_dst, cpu_hver);
3520 break;
3521 case 31: // hstick_cmpr
3522 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3523 break;
3524 default:
3525 goto illegal_insn;
3527 #endif
3528 gen_store_gpr(dc, rd, cpu_dst);
3529 break;
3530 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3531 if (!supervisor(dc)) {
3532 goto priv_insn;
3534 cpu_tmp0 = get_temp_tl(dc);
3535 #ifdef TARGET_SPARC64
3536 rs1 = GET_FIELD(insn, 13, 17);
3537 switch (rs1) {
3538 case 0: // tpc
3540 TCGv_ptr r_tsptr;
3542 r_tsptr = tcg_temp_new_ptr();
3543 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3544 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3545 offsetof(trap_state, tpc));
3546 tcg_temp_free_ptr(r_tsptr);
3548 break;
3549 case 1: // tnpc
3551 TCGv_ptr r_tsptr;
3553 r_tsptr = tcg_temp_new_ptr();
3554 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3555 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3556 offsetof(trap_state, tnpc));
3557 tcg_temp_free_ptr(r_tsptr);
3559 break;
3560 case 2: // tstate
3562 TCGv_ptr r_tsptr;
3564 r_tsptr = tcg_temp_new_ptr();
3565 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3566 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3567 offsetof(trap_state, tstate));
3568 tcg_temp_free_ptr(r_tsptr);
3570 break;
3571 case 3: // tt
3573 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3575 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3576 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3577 offsetof(trap_state, tt));
3578 tcg_temp_free_ptr(r_tsptr);
3580 break;
3581 case 4: // tick
3583 TCGv_ptr r_tickptr;
3584 TCGv_i32 r_const;
3586 r_tickptr = tcg_temp_new_ptr();
3587 r_const = tcg_const_i32(dc->mem_idx);
3588 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3589 offsetof(CPUSPARCState, tick));
3590 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3591 r_tickptr, r_const);
3592 tcg_temp_free_ptr(r_tickptr);
3593 tcg_temp_free_i32(r_const);
3595 break;
3596 case 5: // tba
3597 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3598 break;
3599 case 6: // pstate
3600 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3601 offsetof(CPUSPARCState, pstate));
3602 break;
3603 case 7: // tl
3604 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3605 offsetof(CPUSPARCState, tl));
3606 break;
3607 case 8: // pil
3608 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3609 offsetof(CPUSPARCState, psrpil));
3610 break;
3611 case 9: // cwp
3612 gen_helper_rdcwp(cpu_tmp0, cpu_env);
3613 break;
3614 case 10: // cansave
3615 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3616 offsetof(CPUSPARCState, cansave));
3617 break;
3618 case 11: // canrestore
3619 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3620 offsetof(CPUSPARCState, canrestore));
3621 break;
3622 case 12: // cleanwin
3623 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3624 offsetof(CPUSPARCState, cleanwin));
3625 break;
3626 case 13: // otherwin
3627 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3628 offsetof(CPUSPARCState, otherwin));
3629 break;
3630 case 14: // wstate
3631 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3632 offsetof(CPUSPARCState, wstate));
3633 break;
3634 case 16: // UA2005 gl
3635 CHECK_IU_FEATURE(dc, GL);
3636 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3637 offsetof(CPUSPARCState, gl));
3638 break;
3639 case 26: // UA2005 strand status
3640 CHECK_IU_FEATURE(dc, HYPV);
3641 if (!hypervisor(dc))
3642 goto priv_insn;
3643 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3644 break;
3645 case 31: // ver
3646 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3647 break;
3648 case 15: // fq
3649 default:
3650 goto illegal_insn;
3652 #else
3653 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3654 #endif
3655 gen_store_gpr(dc, rd, cpu_tmp0);
3656 break;
3657 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3658 #ifdef TARGET_SPARC64
3659 gen_helper_flushw(cpu_env);
3660 #else
3661 if (!supervisor(dc))
3662 goto priv_insn;
3663 gen_store_gpr(dc, rd, cpu_tbr);
3664 #endif
3665 break;
3666 #endif
3667 } else if (xop == 0x34) { /* FPU Operations */
3668 if (gen_trap_ifnofpu(dc)) {
3669 goto jmp_insn;
3671 gen_op_clear_ieee_excp_and_FTT();
3672 rs1 = GET_FIELD(insn, 13, 17);
3673 rs2 = GET_FIELD(insn, 27, 31);
3674 xop = GET_FIELD(insn, 18, 26);
3676 switch (xop) {
3677 case 0x1: /* fmovs */
3678 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3679 gen_store_fpr_F(dc, rd, cpu_src1_32);
3680 break;
3681 case 0x5: /* fnegs */
3682 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3683 break;
3684 case 0x9: /* fabss */
3685 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3686 break;
3687 case 0x29: /* fsqrts */
3688 CHECK_FPU_FEATURE(dc, FSQRT);
3689 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3690 break;
3691 case 0x2a: /* fsqrtd */
3692 CHECK_FPU_FEATURE(dc, FSQRT);
3693 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3694 break;
3695 case 0x2b: /* fsqrtq */
3696 CHECK_FPU_FEATURE(dc, FLOAT128);
3697 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3698 break;
3699 case 0x41: /* fadds */
3700 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3701 break;
3702 case 0x42: /* faddd */
3703 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3704 break;
3705 case 0x43: /* faddq */
3706 CHECK_FPU_FEATURE(dc, FLOAT128);
3707 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3708 break;
3709 case 0x45: /* fsubs */
3710 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3711 break;
3712 case 0x46: /* fsubd */
3713 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3714 break;
3715 case 0x47: /* fsubq */
3716 CHECK_FPU_FEATURE(dc, FLOAT128);
3717 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3718 break;
3719 case 0x49: /* fmuls */
3720 CHECK_FPU_FEATURE(dc, FMUL);
3721 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3722 break;
3723 case 0x4a: /* fmuld */
3724 CHECK_FPU_FEATURE(dc, FMUL);
3725 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3726 break;
3727 case 0x4b: /* fmulq */
3728 CHECK_FPU_FEATURE(dc, FLOAT128);
3729 CHECK_FPU_FEATURE(dc, FMUL);
3730 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3731 break;
3732 case 0x4d: /* fdivs */
3733 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3734 break;
3735 case 0x4e: /* fdivd */
3736 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3737 break;
3738 case 0x4f: /* fdivq */
3739 CHECK_FPU_FEATURE(dc, FLOAT128);
3740 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3741 break;
3742 case 0x69: /* fsmuld */
3743 CHECK_FPU_FEATURE(dc, FSMULD);
3744 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3745 break;
3746 case 0x6e: /* fdmulq */
3747 CHECK_FPU_FEATURE(dc, FLOAT128);
3748 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3749 break;
3750 case 0xc4: /* fitos */
3751 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3752 break;
3753 case 0xc6: /* fdtos */
3754 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3755 break;
3756 case 0xc7: /* fqtos */
3757 CHECK_FPU_FEATURE(dc, FLOAT128);
3758 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3759 break;
3760 case 0xc8: /* fitod */
3761 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3762 break;
3763 case 0xc9: /* fstod */
3764 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3765 break;
3766 case 0xcb: /* fqtod */
3767 CHECK_FPU_FEATURE(dc, FLOAT128);
3768 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3769 break;
3770 case 0xcc: /* fitoq */
3771 CHECK_FPU_FEATURE(dc, FLOAT128);
3772 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3773 break;
3774 case 0xcd: /* fstoq */
3775 CHECK_FPU_FEATURE(dc, FLOAT128);
3776 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3777 break;
3778 case 0xce: /* fdtoq */
3779 CHECK_FPU_FEATURE(dc, FLOAT128);
3780 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3781 break;
3782 case 0xd1: /* fstoi */
3783 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3784 break;
3785 case 0xd2: /* fdtoi */
3786 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3787 break;
3788 case 0xd3: /* fqtoi */
3789 CHECK_FPU_FEATURE(dc, FLOAT128);
3790 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3791 break;
3792 #ifdef TARGET_SPARC64
3793 case 0x2: /* V9 fmovd */
3794 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3795 gen_store_fpr_D(dc, rd, cpu_src1_64);
3796 break;
3797 case 0x3: /* V9 fmovq */
3798 CHECK_FPU_FEATURE(dc, FLOAT128);
3799 gen_move_Q(dc, rd, rs2);
3800 break;
3801 case 0x6: /* V9 fnegd */
3802 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3803 break;
3804 case 0x7: /* V9 fnegq */
3805 CHECK_FPU_FEATURE(dc, FLOAT128);
3806 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3807 break;
3808 case 0xa: /* V9 fabsd */
3809 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3810 break;
3811 case 0xb: /* V9 fabsq */
3812 CHECK_FPU_FEATURE(dc, FLOAT128);
3813 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3814 break;
3815 case 0x81: /* V9 fstox */
3816 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3817 break;
3818 case 0x82: /* V9 fdtox */
3819 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3820 break;
3821 case 0x83: /* V9 fqtox */
3822 CHECK_FPU_FEATURE(dc, FLOAT128);
3823 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3824 break;
3825 case 0x84: /* V9 fxtos */
3826 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3827 break;
3828 case 0x88: /* V9 fxtod */
3829 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3830 break;
3831 case 0x8c: /* V9 fxtoq */
3832 CHECK_FPU_FEATURE(dc, FLOAT128);
3833 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3834 break;
3835 #endif
3836 default:
3837 goto illegal_insn;
3839 } else if (xop == 0x35) { /* FPU Operations */
3840 #ifdef TARGET_SPARC64
3841 int cond;
3842 #endif
3843 if (gen_trap_ifnofpu(dc)) {
3844 goto jmp_insn;
3846 gen_op_clear_ieee_excp_and_FTT();
3847 rs1 = GET_FIELD(insn, 13, 17);
3848 rs2 = GET_FIELD(insn, 27, 31);
3849 xop = GET_FIELD(insn, 18, 26);
3851 #ifdef TARGET_SPARC64
3852 #define FMOVR(sz) \
3853 do { \
3854 DisasCompare cmp; \
3855 cond = GET_FIELD_SP(insn, 10, 12); \
3856 cpu_src1 = get_src1(dc, insn); \
3857 gen_compare_reg(&cmp, cond, cpu_src1); \
3858 gen_fmov##sz(dc, &cmp, rd, rs2); \
3859 free_compare(&cmp); \
3860 } while (0)
3862 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3863 FMOVR(s);
3864 break;
3865 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3866 FMOVR(d);
3867 break;
3868 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3869 CHECK_FPU_FEATURE(dc, FLOAT128);
3870 FMOVR(q);
3871 break;
3873 #undef FMOVR
3874 #endif
3875 switch (xop) {
3876 #ifdef TARGET_SPARC64
3877 #define FMOVCC(fcc, sz) \
3878 do { \
3879 DisasCompare cmp; \
3880 cond = GET_FIELD_SP(insn, 14, 17); \
3881 gen_fcompare(&cmp, fcc, cond); \
3882 gen_fmov##sz(dc, &cmp, rd, rs2); \
3883 free_compare(&cmp); \
3884 } while (0)
3886 case 0x001: /* V9 fmovscc %fcc0 */
3887 FMOVCC(0, s);
3888 break;
3889 case 0x002: /* V9 fmovdcc %fcc0 */
3890 FMOVCC(0, d);
3891 break;
3892 case 0x003: /* V9 fmovqcc %fcc0 */
3893 CHECK_FPU_FEATURE(dc, FLOAT128);
3894 FMOVCC(0, q);
3895 break;
3896 case 0x041: /* V9 fmovscc %fcc1 */
3897 FMOVCC(1, s);
3898 break;
3899 case 0x042: /* V9 fmovdcc %fcc1 */
3900 FMOVCC(1, d);
3901 break;
3902 case 0x043: /* V9 fmovqcc %fcc1 */
3903 CHECK_FPU_FEATURE(dc, FLOAT128);
3904 FMOVCC(1, q);
3905 break;
3906 case 0x081: /* V9 fmovscc %fcc2 */
3907 FMOVCC(2, s);
3908 break;
3909 case 0x082: /* V9 fmovdcc %fcc2 */
3910 FMOVCC(2, d);
3911 break;
3912 case 0x083: /* V9 fmovqcc %fcc2 */
3913 CHECK_FPU_FEATURE(dc, FLOAT128);
3914 FMOVCC(2, q);
3915 break;
3916 case 0x0c1: /* V9 fmovscc %fcc3 */
3917 FMOVCC(3, s);
3918 break;
3919 case 0x0c2: /* V9 fmovdcc %fcc3 */
3920 FMOVCC(3, d);
3921 break;
3922 case 0x0c3: /* V9 fmovqcc %fcc3 */
3923 CHECK_FPU_FEATURE(dc, FLOAT128);
3924 FMOVCC(3, q);
3925 break;
3926 #undef FMOVCC
3927 #define FMOVCC(xcc, sz) \
3928 do { \
3929 DisasCompare cmp; \
3930 cond = GET_FIELD_SP(insn, 14, 17); \
3931 gen_compare(&cmp, xcc, cond, dc); \
3932 gen_fmov##sz(dc, &cmp, rd, rs2); \
3933 free_compare(&cmp); \
3934 } while (0)
3936 case 0x101: /* V9 fmovscc %icc */
3937 FMOVCC(0, s);
3938 break;
3939 case 0x102: /* V9 fmovdcc %icc */
3940 FMOVCC(0, d);
3941 break;
3942 case 0x103: /* V9 fmovqcc %icc */
3943 CHECK_FPU_FEATURE(dc, FLOAT128);
3944 FMOVCC(0, q);
3945 break;
3946 case 0x181: /* V9 fmovscc %xcc */
3947 FMOVCC(1, s);
3948 break;
3949 case 0x182: /* V9 fmovdcc %xcc */
3950 FMOVCC(1, d);
3951 break;
3952 case 0x183: /* V9 fmovqcc %xcc */
3953 CHECK_FPU_FEATURE(dc, FLOAT128);
3954 FMOVCC(1, q);
3955 break;
3956 #undef FMOVCC
3957 #endif
3958 case 0x51: /* fcmps, V9 %fcc */
3959 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3960 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3961 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3962 break;
3963 case 0x52: /* fcmpd, V9 %fcc */
3964 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3965 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3966 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3967 break;
3968 case 0x53: /* fcmpq, V9 %fcc */
3969 CHECK_FPU_FEATURE(dc, FLOAT128);
3970 gen_op_load_fpr_QT0(QFPREG(rs1));
3971 gen_op_load_fpr_QT1(QFPREG(rs2));
3972 gen_op_fcmpq(rd & 3);
3973 break;
3974 case 0x55: /* fcmpes, V9 %fcc */
3975 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3976 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3977 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3978 break;
3979 case 0x56: /* fcmped, V9 %fcc */
3980 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3981 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3982 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3983 break;
3984 case 0x57: /* fcmpeq, V9 %fcc */
3985 CHECK_FPU_FEATURE(dc, FLOAT128);
3986 gen_op_load_fpr_QT0(QFPREG(rs1));
3987 gen_op_load_fpr_QT1(QFPREG(rs2));
3988 gen_op_fcmpeq(rd & 3);
3989 break;
3990 default:
3991 goto illegal_insn;
3993 } else if (xop == 0x2) {
3994 TCGv dst = gen_dest_gpr(dc, rd);
3995 rs1 = GET_FIELD(insn, 13, 17);
3996 if (rs1 == 0) {
3997 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3998 if (IS_IMM) { /* immediate */
3999 simm = GET_FIELDs(insn, 19, 31);
4000 tcg_gen_movi_tl(dst, simm);
4001 gen_store_gpr(dc, rd, dst);
4002 } else { /* register */
4003 rs2 = GET_FIELD(insn, 27, 31);
4004 if (rs2 == 0) {
4005 tcg_gen_movi_tl(dst, 0);
4006 gen_store_gpr(dc, rd, dst);
4007 } else {
4008 cpu_src2 = gen_load_gpr(dc, rs2);
4009 gen_store_gpr(dc, rd, cpu_src2);
4012 } else {
4013 cpu_src1 = get_src1(dc, insn);
4014 if (IS_IMM) { /* immediate */
4015 simm = GET_FIELDs(insn, 19, 31);
4016 tcg_gen_ori_tl(dst, cpu_src1, simm);
4017 gen_store_gpr(dc, rd, dst);
4018 } else { /* register */
4019 rs2 = GET_FIELD(insn, 27, 31);
4020 if (rs2 == 0) {
4021 /* mov shortcut: or x, %g0, y -> mov x, y */
4022 gen_store_gpr(dc, rd, cpu_src1);
4023 } else {
4024 cpu_src2 = gen_load_gpr(dc, rs2);
4025 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
4026 gen_store_gpr(dc, rd, dst);
4030 #ifdef TARGET_SPARC64
4031 } else if (xop == 0x25) { /* sll, V9 sllx */
4032 cpu_src1 = get_src1(dc, insn);
4033 if (IS_IMM) { /* immediate */
4034 simm = GET_FIELDs(insn, 20, 31);
4035 if (insn & (1 << 12)) {
4036 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
4037 } else {
4038 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
4040 } else { /* register */
4041 rs2 = GET_FIELD(insn, 27, 31);
4042 cpu_src2 = gen_load_gpr(dc, rs2);
4043 cpu_tmp0 = get_temp_tl(dc);
4044 if (insn & (1 << 12)) {
4045 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4046 } else {
4047 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4049 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
4051 gen_store_gpr(dc, rd, cpu_dst);
4052 } else if (xop == 0x26) { /* srl, V9 srlx */
4053 cpu_src1 = get_src1(dc, insn);
4054 if (IS_IMM) { /* immediate */
4055 simm = GET_FIELDs(insn, 20, 31);
4056 if (insn & (1 << 12)) {
4057 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
4058 } else {
4059 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4060 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
4062 } else { /* register */
4063 rs2 = GET_FIELD(insn, 27, 31);
4064 cpu_src2 = gen_load_gpr(dc, rs2);
4065 cpu_tmp0 = get_temp_tl(dc);
4066 if (insn & (1 << 12)) {
4067 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4068 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
4069 } else {
4070 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4071 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4072 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
4075 gen_store_gpr(dc, rd, cpu_dst);
4076 } else if (xop == 0x27) { /* sra, V9 srax */
4077 cpu_src1 = get_src1(dc, insn);
4078 if (IS_IMM) { /* immediate */
4079 simm = GET_FIELDs(insn, 20, 31);
4080 if (insn & (1 << 12)) {
4081 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
4082 } else {
4083 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4084 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
4086 } else { /* register */
4087 rs2 = GET_FIELD(insn, 27, 31);
4088 cpu_src2 = gen_load_gpr(dc, rs2);
4089 cpu_tmp0 = get_temp_tl(dc);
4090 if (insn & (1 << 12)) {
4091 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4092 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
4093 } else {
4094 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4095 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4096 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
4099 gen_store_gpr(dc, rd, cpu_dst);
4100 #endif
4101 } else if (xop < 0x36) {
4102 if (xop < 0x20) {
4103 cpu_src1 = get_src1(dc, insn);
4104 cpu_src2 = get_src2(dc, insn);
4105 switch (xop & ~0x10) {
4106 case 0x0: /* add */
4107 if (xop & 0x10) {
4108 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4109 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4110 dc->cc_op = CC_OP_ADD;
4111 } else {
4112 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4114 break;
4115 case 0x1: /* and */
4116 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
4117 if (xop & 0x10) {
4118 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4119 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4120 dc->cc_op = CC_OP_LOGIC;
4122 break;
4123 case 0x2: /* or */
4124 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
4125 if (xop & 0x10) {
4126 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4127 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4128 dc->cc_op = CC_OP_LOGIC;
4130 break;
4131 case 0x3: /* xor */
4132 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
4133 if (xop & 0x10) {
4134 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4135 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4136 dc->cc_op = CC_OP_LOGIC;
4138 break;
4139 case 0x4: /* sub */
4140 if (xop & 0x10) {
4141 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4142 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
4143 dc->cc_op = CC_OP_SUB;
4144 } else {
4145 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
4147 break;
4148 case 0x5: /* andn */
4149 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
4150 if (xop & 0x10) {
4151 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4152 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4153 dc->cc_op = CC_OP_LOGIC;
4155 break;
4156 case 0x6: /* orn */
4157 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
4158 if (xop & 0x10) {
4159 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4160 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4161 dc->cc_op = CC_OP_LOGIC;
4163 break;
4164 case 0x7: /* xorn */
4165 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
4166 if (xop & 0x10) {
4167 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4168 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4169 dc->cc_op = CC_OP_LOGIC;
4171 break;
4172 case 0x8: /* addx, V9 addc */
4173 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4174 (xop & 0x10));
4175 break;
4176 #ifdef TARGET_SPARC64
4177 case 0x9: /* V9 mulx */
4178 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4179 break;
4180 #endif
4181 case 0xa: /* umul */
4182 CHECK_IU_FEATURE(dc, MUL);
4183 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
4184 if (xop & 0x10) {
4185 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4186 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4187 dc->cc_op = CC_OP_LOGIC;
4189 break;
4190 case 0xb: /* smul */
4191 CHECK_IU_FEATURE(dc, MUL);
4192 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
4193 if (xop & 0x10) {
4194 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4195 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4196 dc->cc_op = CC_OP_LOGIC;
4198 break;
4199 case 0xc: /* subx, V9 subc */
4200 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4201 (xop & 0x10));
4202 break;
4203 #ifdef TARGET_SPARC64
4204 case 0xd: /* V9 udivx */
4205 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4206 break;
4207 #endif
4208 case 0xe: /* udiv */
4209 CHECK_IU_FEATURE(dc, DIV);
4210 if (xop & 0x10) {
4211 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4212 cpu_src2);
4213 dc->cc_op = CC_OP_DIV;
4214 } else {
4215 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4216 cpu_src2);
4218 break;
4219 case 0xf: /* sdiv */
4220 CHECK_IU_FEATURE(dc, DIV);
4221 if (xop & 0x10) {
4222 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4223 cpu_src2);
4224 dc->cc_op = CC_OP_DIV;
4225 } else {
4226 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4227 cpu_src2);
4229 break;
4230 default:
4231 goto illegal_insn;
4233 gen_store_gpr(dc, rd, cpu_dst);
4234 } else {
4235 cpu_src1 = get_src1(dc, insn);
4236 cpu_src2 = get_src2(dc, insn);
4237 switch (xop) {
4238 case 0x20: /* taddcc */
4239 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4240 gen_store_gpr(dc, rd, cpu_dst);
4241 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4242 dc->cc_op = CC_OP_TADD;
4243 break;
4244 case 0x21: /* tsubcc */
4245 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4246 gen_store_gpr(dc, rd, cpu_dst);
4247 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4248 dc->cc_op = CC_OP_TSUB;
4249 break;
4250 case 0x22: /* taddcctv */
4251 gen_helper_taddcctv(cpu_dst, cpu_env,
4252 cpu_src1, cpu_src2);
4253 gen_store_gpr(dc, rd, cpu_dst);
4254 dc->cc_op = CC_OP_TADDTV;
4255 break;
4256 case 0x23: /* tsubcctv */
4257 gen_helper_tsubcctv(cpu_dst, cpu_env,
4258 cpu_src1, cpu_src2);
4259 gen_store_gpr(dc, rd, cpu_dst);
4260 dc->cc_op = CC_OP_TSUBTV;
4261 break;
4262 case 0x24: /* mulscc */
4263 update_psr(dc);
4264 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4265 gen_store_gpr(dc, rd, cpu_dst);
4266 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4267 dc->cc_op = CC_OP_ADD;
4268 break;
4269 #ifndef TARGET_SPARC64
4270 case 0x25: /* sll */
4271 if (IS_IMM) { /* immediate */
4272 simm = GET_FIELDs(insn, 20, 31);
4273 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4274 } else { /* register */
4275 cpu_tmp0 = get_temp_tl(dc);
4276 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4277 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4279 gen_store_gpr(dc, rd, cpu_dst);
4280 break;
4281 case 0x26: /* srl */
4282 if (IS_IMM) { /* immediate */
4283 simm = GET_FIELDs(insn, 20, 31);
4284 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4285 } else { /* register */
4286 cpu_tmp0 = get_temp_tl(dc);
4287 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4288 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4290 gen_store_gpr(dc, rd, cpu_dst);
4291 break;
4292 case 0x27: /* sra */
4293 if (IS_IMM) { /* immediate */
4294 simm = GET_FIELDs(insn, 20, 31);
4295 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4296 } else { /* register */
4297 cpu_tmp0 = get_temp_tl(dc);
4298 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4299 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4301 gen_store_gpr(dc, rd, cpu_dst);
4302 break;
4303 #endif
4304 case 0x30:
4306 cpu_tmp0 = get_temp_tl(dc);
4307 switch(rd) {
4308 case 0: /* wry */
4309 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4310 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4311 break;
4312 #ifndef TARGET_SPARC64
4313 case 0x01 ... 0x0f: /* undefined in the
4314 SPARCv8 manual, nop
4315 on the microSPARC
4316 II */
4317 case 0x10 ... 0x1f: /* implementation-dependent
4318 in the SPARCv8
4319 manual, nop on the
4320 microSPARC II */
4321 if ((rd == 0x13) && (dc->def->features &
4322 CPU_FEATURE_POWERDOWN)) {
4323 /* LEON3 power-down */
4324 save_state(dc);
4325 gen_helper_power_down(cpu_env);
4327 break;
4328 #else
4329 case 0x2: /* V9 wrccr */
4330 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4331 gen_helper_wrccr(cpu_env, cpu_tmp0);
4332 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4333 dc->cc_op = CC_OP_FLAGS;
4334 break;
4335 case 0x3: /* V9 wrasi */
4336 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4337 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4338 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4339 offsetof(CPUSPARCState, asi));
4340 /* End TB to notice changed ASI. */
4341 save_state(dc);
4342 gen_op_next_insn();
4343 tcg_gen_exit_tb(0);
4344 dc->is_br = 1;
4345 break;
4346 case 0x6: /* V9 wrfprs */
4347 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4348 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4349 dc->fprs_dirty = 0;
4350 save_state(dc);
4351 gen_op_next_insn();
4352 tcg_gen_exit_tb(0);
4353 dc->is_br = 1;
4354 break;
4355 case 0xf: /* V9 sir, nop if user */
4356 #if !defined(CONFIG_USER_ONLY)
4357 if (supervisor(dc)) {
4358 ; // XXX
4360 #endif
4361 break;
4362 case 0x13: /* Graphics Status */
4363 if (gen_trap_ifnofpu(dc)) {
4364 goto jmp_insn;
4366 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4367 break;
4368 case 0x14: /* Softint set */
4369 if (!supervisor(dc))
4370 goto illegal_insn;
4371 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4372 gen_helper_set_softint(cpu_env, cpu_tmp0);
4373 break;
4374 case 0x15: /* Softint clear */
4375 if (!supervisor(dc))
4376 goto illegal_insn;
4377 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4378 gen_helper_clear_softint(cpu_env, cpu_tmp0);
4379 break;
4380 case 0x16: /* Softint write */
4381 if (!supervisor(dc))
4382 goto illegal_insn;
4383 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4384 gen_helper_write_softint(cpu_env, cpu_tmp0);
4385 break;
4386 case 0x17: /* Tick compare */
4387 #if !defined(CONFIG_USER_ONLY)
4388 if (!supervisor(dc))
4389 goto illegal_insn;
4390 #endif
4392 TCGv_ptr r_tickptr;
4394 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4395 cpu_src2);
4396 r_tickptr = tcg_temp_new_ptr();
4397 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4398 offsetof(CPUSPARCState, tick));
4399 gen_helper_tick_set_limit(r_tickptr,
4400 cpu_tick_cmpr);
4401 tcg_temp_free_ptr(r_tickptr);
4403 break;
4404 case 0x18: /* System tick */
4405 #if !defined(CONFIG_USER_ONLY)
4406 if (!supervisor(dc))
4407 goto illegal_insn;
4408 #endif
4410 TCGv_ptr r_tickptr;
4412 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4413 cpu_src2);
4414 r_tickptr = tcg_temp_new_ptr();
4415 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4416 offsetof(CPUSPARCState, stick));
4417 gen_helper_tick_set_count(r_tickptr,
4418 cpu_tmp0);
4419 tcg_temp_free_ptr(r_tickptr);
4421 break;
4422 case 0x19: /* System tick compare */
4423 #if !defined(CONFIG_USER_ONLY)
4424 if (!supervisor(dc))
4425 goto illegal_insn;
4426 #endif
4428 TCGv_ptr r_tickptr;
4430 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4431 cpu_src2);
4432 r_tickptr = tcg_temp_new_ptr();
4433 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4434 offsetof(CPUSPARCState, stick));
4435 gen_helper_tick_set_limit(r_tickptr,
4436 cpu_stick_cmpr);
4437 tcg_temp_free_ptr(r_tickptr);
4439 break;
4441 case 0x10: /* Performance Control */
4442 case 0x11: /* Performance Instrumentation
4443 Counter */
4444 case 0x12: /* Dispatch Control */
4445 #endif
4446 default:
4447 goto illegal_insn;
4450 break;
4451 #if !defined(CONFIG_USER_ONLY)
4452 case 0x31: /* wrpsr, V9 saved, restored */
4454 if (!supervisor(dc))
4455 goto priv_insn;
4456 #ifdef TARGET_SPARC64
4457 switch (rd) {
4458 case 0:
4459 gen_helper_saved(cpu_env);
4460 break;
4461 case 1:
4462 gen_helper_restored(cpu_env);
4463 break;
4464 case 2: /* UA2005 allclean */
4465 case 3: /* UA2005 otherw */
4466 case 4: /* UA2005 normalw */
4467 case 5: /* UA2005 invalw */
4468 // XXX
4469 default:
4470 goto illegal_insn;
4472 #else
4473 cpu_tmp0 = get_temp_tl(dc);
4474 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4475 gen_helper_wrpsr(cpu_env, cpu_tmp0);
4476 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4477 dc->cc_op = CC_OP_FLAGS;
4478 save_state(dc);
4479 gen_op_next_insn();
4480 tcg_gen_exit_tb(0);
4481 dc->is_br = 1;
4482 #endif
4484 break;
4485 case 0x32: /* wrwim, V9 wrpr */
4487 if (!supervisor(dc))
4488 goto priv_insn;
4489 cpu_tmp0 = get_temp_tl(dc);
4490 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4491 #ifdef TARGET_SPARC64
4492 switch (rd) {
4493 case 0: // tpc
4495 TCGv_ptr r_tsptr;
4497 r_tsptr = tcg_temp_new_ptr();
4498 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4499 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4500 offsetof(trap_state, tpc));
4501 tcg_temp_free_ptr(r_tsptr);
4503 break;
4504 case 1: // tnpc
4506 TCGv_ptr r_tsptr;
4508 r_tsptr = tcg_temp_new_ptr();
4509 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4510 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4511 offsetof(trap_state, tnpc));
4512 tcg_temp_free_ptr(r_tsptr);
4514 break;
4515 case 2: // tstate
4517 TCGv_ptr r_tsptr;
4519 r_tsptr = tcg_temp_new_ptr();
4520 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4521 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4522 offsetof(trap_state,
4523 tstate));
4524 tcg_temp_free_ptr(r_tsptr);
4526 break;
4527 case 3: // tt
4529 TCGv_ptr r_tsptr;
4531 r_tsptr = tcg_temp_new_ptr();
4532 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4533 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4534 offsetof(trap_state, tt));
4535 tcg_temp_free_ptr(r_tsptr);
4537 break;
4538 case 4: // tick
4540 TCGv_ptr r_tickptr;
4542 r_tickptr = tcg_temp_new_ptr();
4543 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4544 offsetof(CPUSPARCState, tick));
4545 gen_helper_tick_set_count(r_tickptr,
4546 cpu_tmp0);
4547 tcg_temp_free_ptr(r_tickptr);
4549 break;
4550 case 5: // tba
4551 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4552 break;
4553 case 6: // pstate
4554 save_state(dc);
4555 gen_helper_wrpstate(cpu_env, cpu_tmp0);
4556 dc->npc = DYNAMIC_PC;
4557 break;
4558 case 7: // tl
4559 save_state(dc);
4560 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4561 offsetof(CPUSPARCState, tl));
4562 dc->npc = DYNAMIC_PC;
4563 break;
4564 case 8: // pil
4565 gen_helper_wrpil(cpu_env, cpu_tmp0);
4566 break;
4567 case 9: // cwp
4568 gen_helper_wrcwp(cpu_env, cpu_tmp0);
4569 break;
4570 case 10: // cansave
4571 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4572 offsetof(CPUSPARCState,
4573 cansave));
4574 break;
4575 case 11: // canrestore
4576 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4577 offsetof(CPUSPARCState,
4578 canrestore));
4579 break;
4580 case 12: // cleanwin
4581 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4582 offsetof(CPUSPARCState,
4583 cleanwin));
4584 break;
4585 case 13: // otherwin
4586 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4587 offsetof(CPUSPARCState,
4588 otherwin));
4589 break;
4590 case 14: // wstate
4591 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4592 offsetof(CPUSPARCState,
4593 wstate));
4594 break;
4595 case 16: // UA2005 gl
4596 CHECK_IU_FEATURE(dc, GL);
4597 gen_helper_wrgl(cpu_env, cpu_tmp0);
4598 break;
4599 case 26: // UA2005 strand status
4600 CHECK_IU_FEATURE(dc, HYPV);
4601 if (!hypervisor(dc))
4602 goto priv_insn;
4603 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4604 break;
4605 default:
4606 goto illegal_insn;
4608 #else
4609 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4610 if (dc->def->nwindows != 32) {
4611 tcg_gen_andi_tl(cpu_wim, cpu_wim,
4612 (1 << dc->def->nwindows) - 1);
4614 #endif
4616 break;
4617 case 0x33: /* wrtbr, UA2005 wrhpr */
4619 #ifndef TARGET_SPARC64
4620 if (!supervisor(dc))
4621 goto priv_insn;
4622 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4623 #else
4624 CHECK_IU_FEATURE(dc, HYPV);
4625 if (!hypervisor(dc))
4626 goto priv_insn;
4627 cpu_tmp0 = get_temp_tl(dc);
4628 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4629 switch (rd) {
4630 case 0: // hpstate
4631 tcg_gen_st_i64(cpu_tmp0, cpu_env,
4632 offsetof(CPUSPARCState,
4633 hpstate));
4634 save_state(dc);
4635 gen_op_next_insn();
4636 tcg_gen_exit_tb(0);
4637 dc->is_br = 1;
4638 break;
4639 case 1: // htstate
4640 // XXX gen_op_wrhtstate();
4641 break;
4642 case 3: // hintp
4643 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4644 break;
4645 case 5: // htba
4646 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4647 break;
4648 case 31: // hstick_cmpr
4650 TCGv_ptr r_tickptr;
4652 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4653 r_tickptr = tcg_temp_new_ptr();
4654 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4655 offsetof(CPUSPARCState, hstick));
4656 gen_helper_tick_set_limit(r_tickptr,
4657 cpu_hstick_cmpr);
4658 tcg_temp_free_ptr(r_tickptr);
4660 break;
4661 case 6: // hver readonly
4662 default:
4663 goto illegal_insn;
4665 #endif
4667 break;
4668 #endif
4669 #ifdef TARGET_SPARC64
4670 case 0x2c: /* V9 movcc */
4672 int cc = GET_FIELD_SP(insn, 11, 12);
4673 int cond = GET_FIELD_SP(insn, 14, 17);
4674 DisasCompare cmp;
4675 TCGv dst;
4677 if (insn & (1 << 18)) {
4678 if (cc == 0) {
4679 gen_compare(&cmp, 0, cond, dc);
4680 } else if (cc == 2) {
4681 gen_compare(&cmp, 1, cond, dc);
4682 } else {
4683 goto illegal_insn;
4685 } else {
4686 gen_fcompare(&cmp, cc, cond);
4689 /* The get_src2 above loaded the normal 13-bit
4690 immediate field, not the 11-bit field we have
4691 in movcc. But it did handle the reg case. */
4692 if (IS_IMM) {
4693 simm = GET_FIELD_SPs(insn, 0, 10);
4694 tcg_gen_movi_tl(cpu_src2, simm);
4697 dst = gen_load_gpr(dc, rd);
4698 tcg_gen_movcond_tl(cmp.cond, dst,
4699 cmp.c1, cmp.c2,
4700 cpu_src2, dst);
4701 free_compare(&cmp);
4702 gen_store_gpr(dc, rd, dst);
4703 break;
4705 case 0x2d: /* V9 sdivx */
4706 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4707 gen_store_gpr(dc, rd, cpu_dst);
4708 break;
4709 case 0x2e: /* V9 popc */
4710 tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
4711 gen_store_gpr(dc, rd, cpu_dst);
4712 break;
4713 case 0x2f: /* V9 movr */
4715 int cond = GET_FIELD_SP(insn, 10, 12);
4716 DisasCompare cmp;
4717 TCGv dst;
4719 gen_compare_reg(&cmp, cond, cpu_src1);
4721 /* The get_src2 above loaded the normal 13-bit
4722 immediate field, not the 10-bit field we have
4723 in movr. But it did handle the reg case. */
4724 if (IS_IMM) {
4725 simm = GET_FIELD_SPs(insn, 0, 9);
4726 tcg_gen_movi_tl(cpu_src2, simm);
4729 dst = gen_load_gpr(dc, rd);
4730 tcg_gen_movcond_tl(cmp.cond, dst,
4731 cmp.c1, cmp.c2,
4732 cpu_src2, dst);
4733 free_compare(&cmp);
4734 gen_store_gpr(dc, rd, dst);
4735 break;
4737 #endif
4738 default:
4739 goto illegal_insn;
4742 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4743 #ifdef TARGET_SPARC64
4744 int opf = GET_FIELD_SP(insn, 5, 13);
4745 rs1 = GET_FIELD(insn, 13, 17);
4746 rs2 = GET_FIELD(insn, 27, 31);
4747 if (gen_trap_ifnofpu(dc)) {
4748 goto jmp_insn;
4751 switch (opf) {
4752 case 0x000: /* VIS I edge8cc */
4753 CHECK_FPU_FEATURE(dc, VIS1);
4754 cpu_src1 = gen_load_gpr(dc, rs1);
4755 cpu_src2 = gen_load_gpr(dc, rs2);
4756 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4757 gen_store_gpr(dc, rd, cpu_dst);
4758 break;
4759 case 0x001: /* VIS II edge8n */
4760 CHECK_FPU_FEATURE(dc, VIS2);
4761 cpu_src1 = gen_load_gpr(dc, rs1);
4762 cpu_src2 = gen_load_gpr(dc, rs2);
4763 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4764 gen_store_gpr(dc, rd, cpu_dst);
4765 break;
4766 case 0x002: /* VIS I edge8lcc */
4767 CHECK_FPU_FEATURE(dc, VIS1);
4768 cpu_src1 = gen_load_gpr(dc, rs1);
4769 cpu_src2 = gen_load_gpr(dc, rs2);
4770 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4771 gen_store_gpr(dc, rd, cpu_dst);
4772 break;
4773 case 0x003: /* VIS II edge8ln */
4774 CHECK_FPU_FEATURE(dc, VIS2);
4775 cpu_src1 = gen_load_gpr(dc, rs1);
4776 cpu_src2 = gen_load_gpr(dc, rs2);
4777 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4778 gen_store_gpr(dc, rd, cpu_dst);
4779 break;
4780 case 0x004: /* VIS I edge16cc */
4781 CHECK_FPU_FEATURE(dc, VIS1);
4782 cpu_src1 = gen_load_gpr(dc, rs1);
4783 cpu_src2 = gen_load_gpr(dc, rs2);
4784 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4785 gen_store_gpr(dc, rd, cpu_dst);
4786 break;
4787 case 0x005: /* VIS II edge16n */
4788 CHECK_FPU_FEATURE(dc, VIS2);
4789 cpu_src1 = gen_load_gpr(dc, rs1);
4790 cpu_src2 = gen_load_gpr(dc, rs2);
4791 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4792 gen_store_gpr(dc, rd, cpu_dst);
4793 break;
4794 case 0x006: /* VIS I edge16lcc */
4795 CHECK_FPU_FEATURE(dc, VIS1);
4796 cpu_src1 = gen_load_gpr(dc, rs1);
4797 cpu_src2 = gen_load_gpr(dc, rs2);
4798 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4799 gen_store_gpr(dc, rd, cpu_dst);
4800 break;
4801 case 0x007: /* VIS II edge16ln */
4802 CHECK_FPU_FEATURE(dc, VIS2);
4803 cpu_src1 = gen_load_gpr(dc, rs1);
4804 cpu_src2 = gen_load_gpr(dc, rs2);
4805 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4806 gen_store_gpr(dc, rd, cpu_dst);
4807 break;
4808 case 0x008: /* VIS I edge32cc */
4809 CHECK_FPU_FEATURE(dc, VIS1);
4810 cpu_src1 = gen_load_gpr(dc, rs1);
4811 cpu_src2 = gen_load_gpr(dc, rs2);
4812 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4813 gen_store_gpr(dc, rd, cpu_dst);
4814 break;
4815 case 0x009: /* VIS II edge32n */
4816 CHECK_FPU_FEATURE(dc, VIS2);
4817 cpu_src1 = gen_load_gpr(dc, rs1);
4818 cpu_src2 = gen_load_gpr(dc, rs2);
4819 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4820 gen_store_gpr(dc, rd, cpu_dst);
4821 break;
4822 case 0x00a: /* VIS I edge32lcc */
4823 CHECK_FPU_FEATURE(dc, VIS1);
4824 cpu_src1 = gen_load_gpr(dc, rs1);
4825 cpu_src2 = gen_load_gpr(dc, rs2);
4826 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4827 gen_store_gpr(dc, rd, cpu_dst);
4828 break;
4829 case 0x00b: /* VIS II edge32ln */
4830 CHECK_FPU_FEATURE(dc, VIS2);
4831 cpu_src1 = gen_load_gpr(dc, rs1);
4832 cpu_src2 = gen_load_gpr(dc, rs2);
4833 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4834 gen_store_gpr(dc, rd, cpu_dst);
4835 break;
4836 case 0x010: /* VIS I array8 */
4837 CHECK_FPU_FEATURE(dc, VIS1);
4838 cpu_src1 = gen_load_gpr(dc, rs1);
4839 cpu_src2 = gen_load_gpr(dc, rs2);
4840 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4841 gen_store_gpr(dc, rd, cpu_dst);
4842 break;
4843 case 0x012: /* VIS I array16 */
4844 CHECK_FPU_FEATURE(dc, VIS1);
4845 cpu_src1 = gen_load_gpr(dc, rs1);
4846 cpu_src2 = gen_load_gpr(dc, rs2);
4847 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4848 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4849 gen_store_gpr(dc, rd, cpu_dst);
4850 break;
4851 case 0x014: /* VIS I array32 */
4852 CHECK_FPU_FEATURE(dc, VIS1);
4853 cpu_src1 = gen_load_gpr(dc, rs1);
4854 cpu_src2 = gen_load_gpr(dc, rs2);
4855 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4856 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4857 gen_store_gpr(dc, rd, cpu_dst);
4858 break;
4859 case 0x018: /* VIS I alignaddr */
4860 CHECK_FPU_FEATURE(dc, VIS1);
4861 cpu_src1 = gen_load_gpr(dc, rs1);
4862 cpu_src2 = gen_load_gpr(dc, rs2);
4863 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4864 gen_store_gpr(dc, rd, cpu_dst);
4865 break;
4866 case 0x01a: /* VIS I alignaddrl */
4867 CHECK_FPU_FEATURE(dc, VIS1);
4868 cpu_src1 = gen_load_gpr(dc, rs1);
4869 cpu_src2 = gen_load_gpr(dc, rs2);
4870 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4871 gen_store_gpr(dc, rd, cpu_dst);
4872 break;
4873 case 0x019: /* VIS II bmask */
4874 CHECK_FPU_FEATURE(dc, VIS2);
4875 cpu_src1 = gen_load_gpr(dc, rs1);
4876 cpu_src2 = gen_load_gpr(dc, rs2);
4877 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4878 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4879 gen_store_gpr(dc, rd, cpu_dst);
4880 break;
4881 case 0x020: /* VIS I fcmple16 */
4882 CHECK_FPU_FEATURE(dc, VIS1);
4883 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4884 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4885 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4886 gen_store_gpr(dc, rd, cpu_dst);
4887 break;
4888 case 0x022: /* VIS I fcmpne16 */
4889 CHECK_FPU_FEATURE(dc, VIS1);
4890 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4891 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4892 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4893 gen_store_gpr(dc, rd, cpu_dst);
4894 break;
4895 case 0x024: /* VIS I fcmple32 */
4896 CHECK_FPU_FEATURE(dc, VIS1);
4897 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4898 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4899 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4900 gen_store_gpr(dc, rd, cpu_dst);
4901 break;
4902 case 0x026: /* VIS I fcmpne32 */
4903 CHECK_FPU_FEATURE(dc, VIS1);
4904 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4905 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4906 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4907 gen_store_gpr(dc, rd, cpu_dst);
4908 break;
4909 case 0x028: /* VIS I fcmpgt16 */
4910 CHECK_FPU_FEATURE(dc, VIS1);
4911 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4912 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4913 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4914 gen_store_gpr(dc, rd, cpu_dst);
4915 break;
4916 case 0x02a: /* VIS I fcmpeq16 */
4917 CHECK_FPU_FEATURE(dc, VIS1);
4918 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4919 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4920 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4921 gen_store_gpr(dc, rd, cpu_dst);
4922 break;
4923 case 0x02c: /* VIS I fcmpgt32 */
4924 CHECK_FPU_FEATURE(dc, VIS1);
4925 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4926 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4927 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4928 gen_store_gpr(dc, rd, cpu_dst);
4929 break;
4930 case 0x02e: /* VIS I fcmpeq32 */
4931 CHECK_FPU_FEATURE(dc, VIS1);
4932 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4933 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4934 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4935 gen_store_gpr(dc, rd, cpu_dst);
4936 break;
4937 case 0x031: /* VIS I fmul8x16 */
4938 CHECK_FPU_FEATURE(dc, VIS1);
4939 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4940 break;
4941 case 0x033: /* VIS I fmul8x16au */
4942 CHECK_FPU_FEATURE(dc, VIS1);
4943 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4944 break;
4945 case 0x035: /* VIS I fmul8x16al */
4946 CHECK_FPU_FEATURE(dc, VIS1);
4947 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4948 break;
4949 case 0x036: /* VIS I fmul8sux16 */
4950 CHECK_FPU_FEATURE(dc, VIS1);
4951 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4952 break;
4953 case 0x037: /* VIS I fmul8ulx16 */
4954 CHECK_FPU_FEATURE(dc, VIS1);
4955 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4956 break;
4957 case 0x038: /* VIS I fmuld8sux16 */
4958 CHECK_FPU_FEATURE(dc, VIS1);
4959 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4960 break;
4961 case 0x039: /* VIS I fmuld8ulx16 */
4962 CHECK_FPU_FEATURE(dc, VIS1);
4963 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4964 break;
4965 case 0x03a: /* VIS I fpack32 */
4966 CHECK_FPU_FEATURE(dc, VIS1);
4967 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4968 break;
4969 case 0x03b: /* VIS I fpack16 */
4970 CHECK_FPU_FEATURE(dc, VIS1);
4971 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4972 cpu_dst_32 = gen_dest_fpr_F(dc);
4973 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4974 gen_store_fpr_F(dc, rd, cpu_dst_32);
4975 break;
4976 case 0x03d: /* VIS I fpackfix */
4977 CHECK_FPU_FEATURE(dc, VIS1);
4978 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4979 cpu_dst_32 = gen_dest_fpr_F(dc);
4980 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4981 gen_store_fpr_F(dc, rd, cpu_dst_32);
4982 break;
4983 case 0x03e: /* VIS I pdist */
4984 CHECK_FPU_FEATURE(dc, VIS1);
4985 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4986 break;
4987 case 0x048: /* VIS I faligndata */
4988 CHECK_FPU_FEATURE(dc, VIS1);
4989 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4990 break;
4991 case 0x04b: /* VIS I fpmerge */
4992 CHECK_FPU_FEATURE(dc, VIS1);
4993 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4994 break;
4995 case 0x04c: /* VIS II bshuffle */
4996 CHECK_FPU_FEATURE(dc, VIS2);
4997 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4998 break;
4999 case 0x04d: /* VIS I fexpand */
5000 CHECK_FPU_FEATURE(dc, VIS1);
5001 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
5002 break;
5003 case 0x050: /* VIS I fpadd16 */
5004 CHECK_FPU_FEATURE(dc, VIS1);
5005 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
5006 break;
5007 case 0x051: /* VIS I fpadd16s */
5008 CHECK_FPU_FEATURE(dc, VIS1);
5009 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
5010 break;
5011 case 0x052: /* VIS I fpadd32 */
5012 CHECK_FPU_FEATURE(dc, VIS1);
5013 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
5014 break;
5015 case 0x053: /* VIS I fpadd32s */
5016 CHECK_FPU_FEATURE(dc, VIS1);
5017 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
5018 break;
5019 case 0x054: /* VIS I fpsub16 */
5020 CHECK_FPU_FEATURE(dc, VIS1);
5021 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
5022 break;
5023 case 0x055: /* VIS I fpsub16s */
5024 CHECK_FPU_FEATURE(dc, VIS1);
5025 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
5026 break;
5027 case 0x056: /* VIS I fpsub32 */
5028 CHECK_FPU_FEATURE(dc, VIS1);
5029 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
5030 break;
5031 case 0x057: /* VIS I fpsub32s */
5032 CHECK_FPU_FEATURE(dc, VIS1);
5033 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
5034 break;
5035 case 0x060: /* VIS I fzero */
5036 CHECK_FPU_FEATURE(dc, VIS1);
5037 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5038 tcg_gen_movi_i64(cpu_dst_64, 0);
5039 gen_store_fpr_D(dc, rd, cpu_dst_64);
5040 break;
5041 case 0x061: /* VIS I fzeros */
5042 CHECK_FPU_FEATURE(dc, VIS1);
5043 cpu_dst_32 = gen_dest_fpr_F(dc);
5044 tcg_gen_movi_i32(cpu_dst_32, 0);
5045 gen_store_fpr_F(dc, rd, cpu_dst_32);
5046 break;
5047 case 0x062: /* VIS I fnor */
5048 CHECK_FPU_FEATURE(dc, VIS1);
5049 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
5050 break;
5051 case 0x063: /* VIS I fnors */
5052 CHECK_FPU_FEATURE(dc, VIS1);
5053 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
5054 break;
5055 case 0x064: /* VIS I fandnot2 */
5056 CHECK_FPU_FEATURE(dc, VIS1);
5057 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
5058 break;
5059 case 0x065: /* VIS I fandnot2s */
5060 CHECK_FPU_FEATURE(dc, VIS1);
5061 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5062 break;
5063 case 0x066: /* VIS I fnot2 */
5064 CHECK_FPU_FEATURE(dc, VIS1);
5065 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5066 break;
5067 case 0x067: /* VIS I fnot2s */
5068 CHECK_FPU_FEATURE(dc, VIS1);
5069 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5070 break;
5071 case 0x068: /* VIS I fandnot1 */
5072 CHECK_FPU_FEATURE(dc, VIS1);
5073 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5074 break;
5075 case 0x069: /* VIS I fandnot1s */
5076 CHECK_FPU_FEATURE(dc, VIS1);
5077 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5078 break;
5079 case 0x06a: /* VIS I fnot1 */
5080 CHECK_FPU_FEATURE(dc, VIS1);
5081 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5082 break;
5083 case 0x06b: /* VIS I fnot1s */
5084 CHECK_FPU_FEATURE(dc, VIS1);
5085 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5086 break;
5087 case 0x06c: /* VIS I fxor */
5088 CHECK_FPU_FEATURE(dc, VIS1);
5089 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5090 break;
5091 case 0x06d: /* VIS I fxors */
5092 CHECK_FPU_FEATURE(dc, VIS1);
5093 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5094 break;
5095 case 0x06e: /* VIS I fnand */
5096 CHECK_FPU_FEATURE(dc, VIS1);
5097 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5098 break;
5099 case 0x06f: /* VIS I fnands */
5100 CHECK_FPU_FEATURE(dc, VIS1);
5101 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5102 break;
5103 case 0x070: /* VIS I fand */
5104 CHECK_FPU_FEATURE(dc, VIS1);
5105 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5106 break;
5107 case 0x071: /* VIS I fands */
5108 CHECK_FPU_FEATURE(dc, VIS1);
5109 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5110 break;
5111 case 0x072: /* VIS I fxnor */
5112 CHECK_FPU_FEATURE(dc, VIS1);
5113 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5114 break;
5115 case 0x073: /* VIS I fxnors */
5116 CHECK_FPU_FEATURE(dc, VIS1);
5117 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5118 break;
5119 case 0x074: /* VIS I fsrc1 */
5120 CHECK_FPU_FEATURE(dc, VIS1);
5121 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5122 gen_store_fpr_D(dc, rd, cpu_src1_64);
5123 break;
5124 case 0x075: /* VIS I fsrc1s */
5125 CHECK_FPU_FEATURE(dc, VIS1);
5126 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5127 gen_store_fpr_F(dc, rd, cpu_src1_32);
5128 break;
5129 case 0x076: /* VIS I fornot2 */
5130 CHECK_FPU_FEATURE(dc, VIS1);
5131 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5132 break;
5133 case 0x077: /* VIS I fornot2s */
5134 CHECK_FPU_FEATURE(dc, VIS1);
5135 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5136 break;
5137 case 0x078: /* VIS I fsrc2 */
5138 CHECK_FPU_FEATURE(dc, VIS1);
5139 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5140 gen_store_fpr_D(dc, rd, cpu_src1_64);
5141 break;
5142 case 0x079: /* VIS I fsrc2s */
5143 CHECK_FPU_FEATURE(dc, VIS1);
5144 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5145 gen_store_fpr_F(dc, rd, cpu_src1_32);
5146 break;
5147 case 0x07a: /* VIS I fornot1 */
5148 CHECK_FPU_FEATURE(dc, VIS1);
5149 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5150 break;
5151 case 0x07b: /* VIS I fornot1s */
5152 CHECK_FPU_FEATURE(dc, VIS1);
5153 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5154 break;
5155 case 0x07c: /* VIS I for */
5156 CHECK_FPU_FEATURE(dc, VIS1);
5157 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5158 break;
5159 case 0x07d: /* VIS I fors */
5160 CHECK_FPU_FEATURE(dc, VIS1);
5161 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5162 break;
5163 case 0x07e: /* VIS I fone */
5164 CHECK_FPU_FEATURE(dc, VIS1);
5165 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5166 tcg_gen_movi_i64(cpu_dst_64, -1);
5167 gen_store_fpr_D(dc, rd, cpu_dst_64);
5168 break;
5169 case 0x07f: /* VIS I fones */
5170 CHECK_FPU_FEATURE(dc, VIS1);
5171 cpu_dst_32 = gen_dest_fpr_F(dc);
5172 tcg_gen_movi_i32(cpu_dst_32, -1);
5173 gen_store_fpr_F(dc, rd, cpu_dst_32);
5174 break;
5175 case 0x080: /* VIS I shutdown */
5176 case 0x081: /* VIS II siam */
5177 // XXX
5178 goto illegal_insn;
5179 default:
5180 goto illegal_insn;
5182 #else
5183 goto ncp_insn;
5184 #endif
5185 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5186 #ifdef TARGET_SPARC64
5187 goto illegal_insn;
5188 #else
5189 goto ncp_insn;
5190 #endif
5191 #ifdef TARGET_SPARC64
5192 } else if (xop == 0x39) { /* V9 return */
5193 save_state(dc);
5194 cpu_src1 = get_src1(dc, insn);
5195 cpu_tmp0 = get_temp_tl(dc);
5196 if (IS_IMM) { /* immediate */
5197 simm = GET_FIELDs(insn, 19, 31);
5198 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5199 } else { /* register */
5200 rs2 = GET_FIELD(insn, 27, 31);
5201 if (rs2) {
5202 cpu_src2 = gen_load_gpr(dc, rs2);
5203 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5204 } else {
5205 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5208 gen_helper_restore(cpu_env);
5209 gen_mov_pc_npc(dc);
5210 gen_check_align(cpu_tmp0, 3);
5211 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5212 dc->npc = DYNAMIC_PC;
5213 goto jmp_insn;
5214 #endif
5215 } else {
5216 cpu_src1 = get_src1(dc, insn);
5217 cpu_tmp0 = get_temp_tl(dc);
5218 if (IS_IMM) { /* immediate */
5219 simm = GET_FIELDs(insn, 19, 31);
5220 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5221 } else { /* register */
5222 rs2 = GET_FIELD(insn, 27, 31);
5223 if (rs2) {
5224 cpu_src2 = gen_load_gpr(dc, rs2);
5225 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5226 } else {
5227 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5230 switch (xop) {
5231 case 0x38: /* jmpl */
5233 TCGv t = gen_dest_gpr(dc, rd);
5234 tcg_gen_movi_tl(t, dc->pc);
5235 gen_store_gpr(dc, rd, t);
5237 gen_mov_pc_npc(dc);
5238 gen_check_align(cpu_tmp0, 3);
5239 gen_address_mask(dc, cpu_tmp0);
5240 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5241 dc->npc = DYNAMIC_PC;
5243 goto jmp_insn;
5244 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5245 case 0x39: /* rett, V9 return */
5247 if (!supervisor(dc))
5248 goto priv_insn;
5249 gen_mov_pc_npc(dc);
5250 gen_check_align(cpu_tmp0, 3);
5251 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5252 dc->npc = DYNAMIC_PC;
5253 gen_helper_rett(cpu_env);
5255 goto jmp_insn;
5256 #endif
5257 case 0x3b: /* flush */
5258 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5259 goto unimp_flush;
5260 /* nop */
5261 break;
5262 case 0x3c: /* save */
5263 gen_helper_save(cpu_env);
5264 gen_store_gpr(dc, rd, cpu_tmp0);
5265 break;
5266 case 0x3d: /* restore */
5267 gen_helper_restore(cpu_env);
5268 gen_store_gpr(dc, rd, cpu_tmp0);
5269 break;
5270 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5271 case 0x3e: /* V9 done/retry */
5273 switch (rd) {
5274 case 0:
5275 if (!supervisor(dc))
5276 goto priv_insn;
5277 dc->npc = DYNAMIC_PC;
5278 dc->pc = DYNAMIC_PC;
5279 gen_helper_done(cpu_env);
5280 goto jmp_insn;
5281 case 1:
5282 if (!supervisor(dc))
5283 goto priv_insn;
5284 dc->npc = DYNAMIC_PC;
5285 dc->pc = DYNAMIC_PC;
5286 gen_helper_retry(cpu_env);
5287 goto jmp_insn;
5288 default:
5289 goto illegal_insn;
5292 break;
5293 #endif
5294 default:
5295 goto illegal_insn;
5298 break;
5300 break;
5301 case 3: /* load/store instructions */
5303 unsigned int xop = GET_FIELD(insn, 7, 12);
5304 /* ??? gen_address_mask prevents us from using a source
5305 register directly. Always generate a temporary. */
5306 TCGv cpu_addr = get_temp_tl(dc);
5308 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5309 if (xop == 0x3c || xop == 0x3e) {
5310 /* V9 casa/casxa : no offset */
5311 } else if (IS_IMM) { /* immediate */
5312 simm = GET_FIELDs(insn, 19, 31);
5313 if (simm != 0) {
5314 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5316 } else { /* register */
5317 rs2 = GET_FIELD(insn, 27, 31);
5318 if (rs2 != 0) {
5319 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5322 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5323 (xop > 0x17 && xop <= 0x1d ) ||
5324 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5325 TCGv cpu_val = gen_dest_gpr(dc, rd);
5327 switch (xop) {
5328 case 0x0: /* ld, V9 lduw, load unsigned word */
5329 gen_address_mask(dc, cpu_addr);
5330 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
5331 break;
5332 case 0x1: /* ldub, load unsigned byte */
5333 gen_address_mask(dc, cpu_addr);
5334 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
5335 break;
5336 case 0x2: /* lduh, load unsigned halfword */
5337 gen_address_mask(dc, cpu_addr);
5338 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
5339 break;
5340 case 0x3: /* ldd, load double word */
5341 if (rd & 1)
5342 goto illegal_insn;
5343 else {
5344 TCGv_i64 t64;
5346 gen_address_mask(dc, cpu_addr);
5347 t64 = tcg_temp_new_i64();
5348 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5349 tcg_gen_trunc_i64_tl(cpu_val, t64);
5350 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5351 gen_store_gpr(dc, rd + 1, cpu_val);
5352 tcg_gen_shri_i64(t64, t64, 32);
5353 tcg_gen_trunc_i64_tl(cpu_val, t64);
5354 tcg_temp_free_i64(t64);
5355 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5357 break;
5358 case 0x9: /* ldsb, load signed byte */
5359 gen_address_mask(dc, cpu_addr);
5360 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
5361 break;
5362 case 0xa: /* ldsh, load signed halfword */
5363 gen_address_mask(dc, cpu_addr);
5364 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
5365 break;
5366 case 0xd: /* ldstub */
5367 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5368 break;
5369 case 0x0f:
5370 /* swap, swap register with memory. Also atomically */
5371 CHECK_IU_FEATURE(dc, SWAP);
5372 cpu_src1 = gen_load_gpr(dc, rd);
5373 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5374 dc->mem_idx, MO_TEUL);
5375 break;
5376 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5377 case 0x10: /* lda, V9 lduwa, load word alternate */
5378 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5379 break;
5380 case 0x11: /* lduba, load unsigned byte alternate */
5381 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5382 break;
5383 case 0x12: /* lduha, load unsigned halfword alternate */
5384 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5385 break;
5386 case 0x13: /* ldda, load double word alternate */
5387 if (rd & 1) {
5388 goto illegal_insn;
5390 gen_ldda_asi(dc, cpu_addr, insn, rd);
5391 goto skip_move;
5392 case 0x19: /* ldsba, load signed byte alternate */
5393 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5394 break;
5395 case 0x1a: /* ldsha, load signed halfword alternate */
5396 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5397 break;
5398 case 0x1d: /* ldstuba -- XXX: should be atomically */
5399 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5400 break;
5401 case 0x1f: /* swapa, swap reg with alt. memory. Also
5402 atomically */
5403 CHECK_IU_FEATURE(dc, SWAP);
5404 cpu_src1 = gen_load_gpr(dc, rd);
5405 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5406 break;
5408 #ifndef TARGET_SPARC64
5409 case 0x30: /* ldc */
5410 case 0x31: /* ldcsr */
5411 case 0x33: /* lddc */
5412 goto ncp_insn;
5413 #endif
5414 #endif
5415 #ifdef TARGET_SPARC64
5416 case 0x08: /* V9 ldsw */
5417 gen_address_mask(dc, cpu_addr);
5418 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
5419 break;
5420 case 0x0b: /* V9 ldx */
5421 gen_address_mask(dc, cpu_addr);
5422 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
5423 break;
5424 case 0x18: /* V9 ldswa */
5425 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5426 break;
5427 case 0x1b: /* V9 ldxa */
5428 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5429 break;
5430 case 0x2d: /* V9 prefetch, no effect */
5431 goto skip_move;
5432 case 0x30: /* V9 ldfa */
5433 if (gen_trap_ifnofpu(dc)) {
5434 goto jmp_insn;
5436 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5437 gen_update_fprs_dirty(dc, rd);
5438 goto skip_move;
5439 case 0x33: /* V9 lddfa */
5440 if (gen_trap_ifnofpu(dc)) {
5441 goto jmp_insn;
5443 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5444 gen_update_fprs_dirty(dc, DFPREG(rd));
5445 goto skip_move;
5446 case 0x3d: /* V9 prefetcha, no effect */
5447 goto skip_move;
5448 case 0x32: /* V9 ldqfa */
5449 CHECK_FPU_FEATURE(dc, FLOAT128);
5450 if (gen_trap_ifnofpu(dc)) {
5451 goto jmp_insn;
5453 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5454 gen_update_fprs_dirty(dc, QFPREG(rd));
5455 goto skip_move;
5456 #endif
5457 default:
5458 goto illegal_insn;
5460 gen_store_gpr(dc, rd, cpu_val);
5461 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5462 skip_move: ;
5463 #endif
5464 } else if (xop >= 0x20 && xop < 0x24) {
5465 if (gen_trap_ifnofpu(dc)) {
5466 goto jmp_insn;
5468 switch (xop) {
5469 case 0x20: /* ldf, load fpreg */
5470 gen_address_mask(dc, cpu_addr);
5471 cpu_dst_32 = gen_dest_fpr_F(dc);
5472 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5473 dc->mem_idx, MO_TEUL);
5474 gen_store_fpr_F(dc, rd, cpu_dst_32);
5475 break;
5476 case 0x21: /* ldfsr, V9 ldxfsr */
5477 #ifdef TARGET_SPARC64
5478 gen_address_mask(dc, cpu_addr);
5479 if (rd == 1) {
5480 TCGv_i64 t64 = tcg_temp_new_i64();
5481 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5482 dc->mem_idx, MO_TEQ);
5483 gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5484 tcg_temp_free_i64(t64);
5485 break;
5487 #endif
5488 cpu_dst_32 = get_temp_i32(dc);
5489 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5490 dc->mem_idx, MO_TEUL);
5491 gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5492 break;
5493 case 0x22: /* ldqf, load quad fpreg */
5494 CHECK_FPU_FEATURE(dc, FLOAT128);
5495 gen_address_mask(dc, cpu_addr);
5496 cpu_src1_64 = tcg_temp_new_i64();
5497 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5498 MO_TEQ | MO_ALIGN_4);
5499 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5500 cpu_src2_64 = tcg_temp_new_i64();
5501 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5502 MO_TEQ | MO_ALIGN_4);
5503 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5504 tcg_temp_free_i64(cpu_src1_64);
5505 tcg_temp_free_i64(cpu_src2_64);
5506 break;
5507 case 0x23: /* lddf, load double fpreg */
5508 gen_address_mask(dc, cpu_addr);
5509 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5510 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5511 MO_TEQ | MO_ALIGN_4);
5512 gen_store_fpr_D(dc, rd, cpu_dst_64);
5513 break;
5514 default:
5515 goto illegal_insn;
5517 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5518 xop == 0xe || xop == 0x1e) {
5519 TCGv cpu_val = gen_load_gpr(dc, rd);
5521 switch (xop) {
5522 case 0x4: /* st, store word */
5523 gen_address_mask(dc, cpu_addr);
5524 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
5525 break;
5526 case 0x5: /* stb, store byte */
5527 gen_address_mask(dc, cpu_addr);
5528 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
5529 break;
5530 case 0x6: /* sth, store halfword */
5531 gen_address_mask(dc, cpu_addr);
5532 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
5533 break;
5534 case 0x7: /* std, store double word */
5535 if (rd & 1)
5536 goto illegal_insn;
5537 else {
5538 TCGv_i64 t64;
5539 TCGv lo;
5541 gen_address_mask(dc, cpu_addr);
5542 lo = gen_load_gpr(dc, rd + 1);
5543 t64 = tcg_temp_new_i64();
5544 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5545 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
5546 tcg_temp_free_i64(t64);
5548 break;
5549 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5550 case 0x14: /* sta, V9 stwa, store word alternate */
5551 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5552 break;
5553 case 0x15: /* stba, store byte alternate */
5554 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5555 break;
5556 case 0x16: /* stha, store halfword alternate */
5557 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5558 break;
5559 case 0x17: /* stda, store double word alternate */
5560 if (rd & 1) {
5561 goto illegal_insn;
5563 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5564 break;
5565 #endif
5566 #ifdef TARGET_SPARC64
5567 case 0x0e: /* V9 stx */
5568 gen_address_mask(dc, cpu_addr);
5569 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5570 break;
5571 case 0x1e: /* V9 stxa */
5572 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5573 break;
5574 #endif
5575 default:
5576 goto illegal_insn;
5578 } else if (xop > 0x23 && xop < 0x28) {
5579 if (gen_trap_ifnofpu(dc)) {
5580 goto jmp_insn;
5582 switch (xop) {
5583 case 0x24: /* stf, store fpreg */
5584 gen_address_mask(dc, cpu_addr);
5585 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5586 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5587 dc->mem_idx, MO_TEUL);
5588 break;
5589 case 0x25: /* stfsr, V9 stxfsr */
5591 #ifdef TARGET_SPARC64
5592 gen_address_mask(dc, cpu_addr);
5593 if (rd == 1) {
5594 tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
5595 break;
5597 #endif
5598 tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
5600 break;
5601 case 0x26:
5602 #ifdef TARGET_SPARC64
5603 /* V9 stqf, store quad fpreg */
5604 CHECK_FPU_FEATURE(dc, FLOAT128);
5605 gen_address_mask(dc, cpu_addr);
5606 /* ??? While stqf only requires 4-byte alignment, it is
5607 legal for the cpu to signal the unaligned exception.
5608 The OS trap handler is then required to fix it up.
5609 For qemu, this avoids having to probe the second page
5610 before performing the first write. */
5611 cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5612 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5613 dc->mem_idx, MO_TEQ | MO_ALIGN_16);
5614 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5615 cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5616 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5617 dc->mem_idx, MO_TEQ);
5618 break;
5619 #else /* !TARGET_SPARC64 */
5620 /* stdfq, store floating point queue */
5621 #if defined(CONFIG_USER_ONLY)
5622 goto illegal_insn;
5623 #else
5624 if (!supervisor(dc))
5625 goto priv_insn;
5626 if (gen_trap_ifnofpu(dc)) {
5627 goto jmp_insn;
5629 goto nfq_insn;
5630 #endif
5631 #endif
5632 case 0x27: /* stdf, store double fpreg */
5633 gen_address_mask(dc, cpu_addr);
5634 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5635 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5636 MO_TEQ | MO_ALIGN_4);
5637 break;
5638 default:
5639 goto illegal_insn;
5641 } else if (xop > 0x33 && xop < 0x3f) {
5642 switch (xop) {
5643 #ifdef TARGET_SPARC64
5644 case 0x34: /* V9 stfa */
5645 if (gen_trap_ifnofpu(dc)) {
5646 goto jmp_insn;
5648 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5649 break;
5650 case 0x36: /* V9 stqfa */
5652 CHECK_FPU_FEATURE(dc, FLOAT128);
5653 if (gen_trap_ifnofpu(dc)) {
5654 goto jmp_insn;
5656 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5658 break;
5659 case 0x37: /* V9 stdfa */
5660 if (gen_trap_ifnofpu(dc)) {
5661 goto jmp_insn;
5663 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5664 break;
5665 case 0x3e: /* V9 casxa */
5666 rs2 = GET_FIELD(insn, 27, 31);
5667 cpu_src2 = gen_load_gpr(dc, rs2);
5668 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5669 break;
5670 #else
5671 case 0x34: /* stc */
5672 case 0x35: /* stcsr */
5673 case 0x36: /* stdcq */
5674 case 0x37: /* stdc */
5675 goto ncp_insn;
5676 #endif
5677 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5678 case 0x3c: /* V9 or LEON3 casa */
5679 #ifndef TARGET_SPARC64
5680 CHECK_IU_FEATURE(dc, CASA);
5681 #endif
5682 rs2 = GET_FIELD(insn, 27, 31);
5683 cpu_src2 = gen_load_gpr(dc, rs2);
5684 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5685 break;
5686 #endif
5687 default:
5688 goto illegal_insn;
5690 } else {
5691 goto illegal_insn;
5694 break;
5696 /* default case for non jump instructions */
5697 if (dc->npc == DYNAMIC_PC) {
5698 dc->pc = DYNAMIC_PC;
5699 gen_op_next_insn();
5700 } else if (dc->npc == JUMP_PC) {
5701 /* we can do a static jump */
5702 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5703 dc->is_br = 1;
5704 } else {
5705 dc->pc = dc->npc;
5706 dc->npc = dc->npc + 4;
5708 jmp_insn:
5709 goto egress;
5710 illegal_insn:
5711 gen_exception(dc, TT_ILL_INSN);
5712 goto egress;
5713 unimp_flush:
5714 gen_exception(dc, TT_UNIMP_FLUSH);
5715 goto egress;
5716 #if !defined(CONFIG_USER_ONLY)
5717 priv_insn:
5718 gen_exception(dc, TT_PRIV_INSN);
5719 goto egress;
5720 #endif
5721 nfpu_insn:
5722 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5723 goto egress;
5724 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5725 nfq_insn:
5726 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5727 goto egress;
5728 #endif
5729 #ifndef TARGET_SPARC64
5730 ncp_insn:
5731 gen_exception(dc, TT_NCP_INSN);
5732 goto egress;
5733 #endif
5734 egress:
5735 if (dc->n_t32 != 0) {
5736 int i;
5737 for (i = dc->n_t32 - 1; i >= 0; --i) {
5738 tcg_temp_free_i32(dc->t32[i]);
5740 dc->n_t32 = 0;
5742 if (dc->n_ttl != 0) {
5743 int i;
5744 for (i = dc->n_ttl - 1; i >= 0; --i) {
5745 tcg_temp_free(dc->ttl[i]);
5747 dc->n_ttl = 0;
5751 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5753 SPARCCPU *cpu = sparc_env_get_cpu(env);
5754 CPUState *cs = CPU(cpu);
5755 target_ulong pc_start, last_pc;
5756 DisasContext dc1, *dc = &dc1;
5757 int num_insns;
5758 int max_insns;
5759 unsigned int insn;
5761 memset(dc, 0, sizeof(DisasContext));
5762 dc->tb = tb;
5763 pc_start = tb->pc;
5764 dc->pc = pc_start;
5765 last_pc = dc->pc;
5766 dc->npc = (target_ulong) tb->cs_base;
5767 dc->cc_op = CC_OP_DYNAMIC;
5768 dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
5769 dc->def = env->def;
5770 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5771 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5772 dc->singlestep = (cs->singlestep_enabled || singlestep);
5773 #ifndef CONFIG_USER_ONLY
5774 dc->supervisor = (tb->flags & TB_FLAG_SUPER) != 0;
5775 #endif
5776 #ifdef TARGET_SPARC64
5777 dc->fprs_dirty = 0;
5778 dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5779 #ifndef CONFIG_USER_ONLY
5780 dc->hypervisor = (tb->flags & TB_FLAG_HYPER) != 0;
5781 #endif
5782 #endif
5784 num_insns = 0;
5785 max_insns = tb->cflags & CF_COUNT_MASK;
5786 if (max_insns == 0) {
5787 max_insns = CF_COUNT_MASK;
5789 if (max_insns > TCG_MAX_INSNS) {
5790 max_insns = TCG_MAX_INSNS;
5793 gen_tb_start(tb);
5794 do {
5795 if (dc->npc & JUMP_PC) {
5796 assert(dc->jump_pc[1] == dc->pc + 4);
5797 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5798 } else {
5799 tcg_gen_insn_start(dc->pc, dc->npc);
5801 num_insns++;
5802 last_pc = dc->pc;
5804 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5805 if (dc->pc != pc_start) {
5806 save_state(dc);
5808 gen_helper_debug(cpu_env);
5809 tcg_gen_exit_tb(0);
5810 dc->is_br = 1;
5811 goto exit_gen_loop;
5814 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5815 gen_io_start();
5818 insn = cpu_ldl_code(env, dc->pc);
5820 disas_sparc_insn(dc, insn);
5822 if (dc->is_br)
5823 break;
5824 /* if the next PC is different, we abort now */
5825 if (dc->pc != (last_pc + 4))
5826 break;
5827 /* if we reach a page boundary, we stop generation so that the
5828 PC of a TT_TFAULT exception is always in the right page */
5829 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5830 break;
5831 /* if single step mode, we generate only one instruction and
5832 generate an exception */
5833 if (dc->singlestep) {
5834 break;
5836 } while (!tcg_op_buf_full() &&
5837 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5838 num_insns < max_insns);
5840 exit_gen_loop:
5841 if (tb->cflags & CF_LAST_IO) {
5842 gen_io_end();
5844 if (!dc->is_br) {
5845 if (dc->pc != DYNAMIC_PC &&
5846 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5847 /* static PC and NPC: we can use direct chaining */
5848 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5849 } else {
5850 if (dc->pc != DYNAMIC_PC) {
5851 tcg_gen_movi_tl(cpu_pc, dc->pc);
5853 save_npc(dc);
5854 tcg_gen_exit_tb(0);
5857 gen_tb_end(tb, num_insns);
5859 tb->size = last_pc + 4 - pc_start;
5860 tb->icount = num_insns;
5862 #ifdef DEBUG_DISAS
5863 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5864 && qemu_log_in_addr_range(pc_start)) {
5865 qemu_log_lock();
5866 qemu_log("--------------\n");
5867 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5868 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5869 qemu_log("\n");
5870 qemu_log_unlock();
5872 #endif
5875 void gen_intermediate_code_init(CPUSPARCState *env)
5877 static int inited;
5878 static const char gregnames[32][4] = {
5879 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5880 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5881 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5882 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5884 static const char fregnames[32][4] = {
5885 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5886 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5887 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5888 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5891 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5892 #ifdef TARGET_SPARC64
5893 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5894 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5895 #else
5896 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5897 #endif
5898 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5899 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5902 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5903 #ifdef TARGET_SPARC64
5904 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5905 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5906 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5907 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5908 "hstick_cmpr" },
5909 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5910 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5911 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5912 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5913 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5914 #endif
5915 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5916 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5917 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5918 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5919 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5920 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5921 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5922 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5923 #ifndef CONFIG_USER_ONLY
5924 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5925 #endif
5928 unsigned int i;
5930 /* init various static tables */
5931 if (inited) {
5932 return;
5934 inited = 1;
5936 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5937 tcg_ctx.tcg_env = cpu_env;
5939 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5940 offsetof(CPUSPARCState, regwptr),
5941 "regwptr");
5943 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5944 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5947 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5948 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5951 TCGV_UNUSED(cpu_regs[0]);
5952 for (i = 1; i < 8; ++i) {
5953 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5954 offsetof(CPUSPARCState, gregs[i]),
5955 gregnames[i]);
5958 for (i = 8; i < 32; ++i) {
5959 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5960 (i - 8) * sizeof(target_ulong),
5961 gregnames[i]);
5964 for (i = 0; i < TARGET_DPREGS; i++) {
5965 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5966 offsetof(CPUSPARCState, fpr[i]),
5967 fregnames[i]);
5971 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5972 target_ulong *data)
5974 target_ulong pc = data[0];
5975 target_ulong npc = data[1];
5977 env->pc = pc;
5978 if (npc == DYNAMIC_PC) {
5979 /* dynamic NPC: already stored */
5980 } else if (npc & JUMP_PC) {
5981 /* jump PC: use 'cond' and the jump targets of the translation */
5982 if (env->cond) {
5983 env->npc = npc & ~3;
5984 } else {
5985 env->npc = pc + 4;
5987 } else {
5988 env->npc = npc;