rules: don't try to create missing include dirs
[qemu/ar7.git] / target / sparc / translate.c
blob655060cd9ac85ed0164a1c903ae0675ad567e1b6
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 #include "asi.h"
37 #define DEBUG_DISAS
39 #define DYNAMIC_PC 1 /* dynamic pc value */
40 #define JUMP_PC 2 /* dynamic pc value which takes only two values
41 according to jump_pc[T2] */
43 /* global register indexes */
44 static TCGv_env cpu_env;
45 static TCGv_ptr cpu_regwptr;
46 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
47 static TCGv_i32 cpu_cc_op;
48 static TCGv_i32 cpu_psr;
49 static TCGv cpu_fsr, cpu_pc, cpu_npc;
50 static TCGv cpu_regs[32];
51 static TCGv cpu_y;
52 #ifndef CONFIG_USER_ONLY
53 static TCGv cpu_tbr;
54 #endif
55 static TCGv cpu_cond;
56 #ifdef TARGET_SPARC64
57 static TCGv_i32 cpu_xcc, cpu_fprs;
58 static TCGv cpu_gsr;
59 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
60 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
61 #else
62 static TCGv cpu_wim;
63 #endif
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext {
70 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
73 int is_br;
74 int mem_idx;
75 bool fpu_enabled;
76 bool address_mask_32bit;
77 bool singlestep;
78 #ifndef CONFIG_USER_ONLY
79 bool supervisor;
80 #ifdef TARGET_SPARC64
81 bool hypervisor;
82 #endif
83 #endif
85 uint32_t cc_op; /* current CC operation */
86 struct TranslationBlock *tb;
87 sparc_def_t *def;
88 TCGv_i32 t32[3];
89 TCGv ttl[5];
90 int n_t32;
91 int n_ttl;
92 #ifdef TARGET_SPARC64
93 int fprs_dirty;
94 int asi;
95 #endif
96 } DisasContext;
98 typedef struct {
99 TCGCond cond;
100 bool is_bool;
101 bool g1, g2;
102 TCGv c1, c2;
103 } DisasCompare;
105 // This function uses non-native bit order
106 #define GET_FIELD(X, FROM, TO) \
107 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
109 // This function uses the order in the manuals, i.e. bit 0 is 2^0
110 #define GET_FIELD_SP(X, FROM, TO) \
111 GET_FIELD(X, 31 - (TO), 31 - (FROM))
113 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
114 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
116 #ifdef TARGET_SPARC64
117 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
118 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
119 #else
120 #define DFPREG(r) (r & 0x1e)
121 #define QFPREG(r) (r & 0x1c)
122 #endif
124 #define UA2005_HTRAP_MASK 0xff
125 #define V8_TRAP_MASK 0x7f
127 static int sign_extend(int x, int len)
129 len = 32 - len;
130 return (x << len) >> len;
133 #define IS_IMM (insn & (1<<13))
135 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
137 TCGv_i32 t;
138 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
139 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
140 return t;
143 static inline TCGv get_temp_tl(DisasContext *dc)
145 TCGv t;
146 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
147 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
148 return t;
151 static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
153 #if defined(TARGET_SPARC64)
154 int bit = (rd < 32) ? 1 : 2;
155 /* If we know we've already set this bit within the TB,
156 we can avoid setting it again. */
157 if (!(dc->fprs_dirty & bit)) {
158 dc->fprs_dirty |= bit;
159 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
161 #endif
164 /* floating point registers moves */
165 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
167 #if TCG_TARGET_REG_BITS == 32
168 if (src & 1) {
169 return TCGV_LOW(cpu_fpr[src / 2]);
170 } else {
171 return TCGV_HIGH(cpu_fpr[src / 2]);
173 #else
174 if (src & 1) {
175 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
176 } else {
177 TCGv_i32 ret = get_temp_i32(dc);
178 TCGv_i64 t = tcg_temp_new_i64();
180 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
181 tcg_gen_extrl_i64_i32(ret, t);
182 tcg_temp_free_i64(t);
184 return ret;
186 #endif
189 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
191 #if TCG_TARGET_REG_BITS == 32
192 if (dst & 1) {
193 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
194 } else {
195 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
197 #else
198 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
199 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
200 (dst & 1 ? 0 : 32), 32);
201 #endif
202 gen_update_fprs_dirty(dc, dst);
205 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
207 return get_temp_i32(dc);
210 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
212 src = DFPREG(src);
213 return cpu_fpr[src / 2];
216 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
218 dst = DFPREG(dst);
219 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
220 gen_update_fprs_dirty(dc, dst);
223 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
225 return cpu_fpr[DFPREG(dst) / 2];
228 static void gen_op_load_fpr_QT0(unsigned int src)
230 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
231 offsetof(CPU_QuadU, ll.upper));
232 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
233 offsetof(CPU_QuadU, ll.lower));
236 static void gen_op_load_fpr_QT1(unsigned int src)
238 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
239 offsetof(CPU_QuadU, ll.upper));
240 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
241 offsetof(CPU_QuadU, ll.lower));
244 static void gen_op_store_QT0_fpr(unsigned int dst)
246 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
247 offsetof(CPU_QuadU, ll.upper));
248 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
249 offsetof(CPU_QuadU, ll.lower));
252 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
253 TCGv_i64 v1, TCGv_i64 v2)
255 dst = QFPREG(dst);
257 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
258 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
259 gen_update_fprs_dirty(dc, dst);
262 #ifdef TARGET_SPARC64
263 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
265 src = QFPREG(src);
266 return cpu_fpr[src / 2];
269 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
271 src = QFPREG(src);
272 return cpu_fpr[src / 2 + 1];
275 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
277 rd = QFPREG(rd);
278 rs = QFPREG(rs);
280 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
281 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
282 gen_update_fprs_dirty(dc, rd);
284 #endif
286 /* moves */
287 #ifdef CONFIG_USER_ONLY
288 #define supervisor(dc) 0
289 #ifdef TARGET_SPARC64
290 #define hypervisor(dc) 0
291 #endif
292 #else
293 #ifdef TARGET_SPARC64
294 #define hypervisor(dc) (dc->hypervisor)
295 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
296 #else
297 #define supervisor(dc) (dc->supervisor)
298 #endif
299 #endif
301 #ifdef TARGET_SPARC64
302 #ifndef TARGET_ABI32
303 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
304 #else
305 #define AM_CHECK(dc) (1)
306 #endif
307 #endif
309 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
311 #ifdef TARGET_SPARC64
312 if (AM_CHECK(dc))
313 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
314 #endif
317 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
319 if (reg > 0) {
320 assert(reg < 32);
321 return cpu_regs[reg];
322 } else {
323 TCGv t = get_temp_tl(dc);
324 tcg_gen_movi_tl(t, 0);
325 return t;
329 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
331 if (reg > 0) {
332 assert(reg < 32);
333 tcg_gen_mov_tl(cpu_regs[reg], v);
337 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
339 if (reg > 0) {
340 assert(reg < 32);
341 return cpu_regs[reg];
342 } else {
343 return get_temp_tl(dc);
347 static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
348 target_ulong npc)
350 if (unlikely(s->singlestep)) {
351 return false;
354 #ifndef CONFIG_USER_ONLY
355 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
356 (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
357 #else
358 return true;
359 #endif
362 static inline void gen_goto_tb(DisasContext *s, int tb_num,
363 target_ulong pc, target_ulong npc)
365 if (use_goto_tb(s, pc, npc)) {
366 /* jump to same page: we can use a direct jump */
367 tcg_gen_goto_tb(tb_num);
368 tcg_gen_movi_tl(cpu_pc, pc);
369 tcg_gen_movi_tl(cpu_npc, npc);
370 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
371 } else {
372 /* jump to another page: currently not optimized */
373 tcg_gen_movi_tl(cpu_pc, pc);
374 tcg_gen_movi_tl(cpu_npc, npc);
375 tcg_gen_exit_tb(0);
379 // XXX suboptimal
380 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
382 tcg_gen_extu_i32_tl(reg, src);
383 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
384 tcg_gen_andi_tl(reg, reg, 0x1);
387 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
389 tcg_gen_extu_i32_tl(reg, src);
390 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
391 tcg_gen_andi_tl(reg, reg, 0x1);
394 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
396 tcg_gen_extu_i32_tl(reg, src);
397 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
398 tcg_gen_andi_tl(reg, reg, 0x1);
401 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
403 tcg_gen_extu_i32_tl(reg, src);
404 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
405 tcg_gen_andi_tl(reg, reg, 0x1);
408 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
410 tcg_gen_mov_tl(cpu_cc_src, src1);
411 tcg_gen_mov_tl(cpu_cc_src2, src2);
412 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
413 tcg_gen_mov_tl(dst, cpu_cc_dst);
416 static TCGv_i32 gen_add32_carry32(void)
418 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
420 /* Carry is computed from a previous add: (dst < src) */
421 #if TARGET_LONG_BITS == 64
422 cc_src1_32 = tcg_temp_new_i32();
423 cc_src2_32 = tcg_temp_new_i32();
424 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
425 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
426 #else
427 cc_src1_32 = cpu_cc_dst;
428 cc_src2_32 = cpu_cc_src;
429 #endif
431 carry_32 = tcg_temp_new_i32();
432 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
434 #if TARGET_LONG_BITS == 64
435 tcg_temp_free_i32(cc_src1_32);
436 tcg_temp_free_i32(cc_src2_32);
437 #endif
439 return carry_32;
442 static TCGv_i32 gen_sub32_carry32(void)
444 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
446 /* Carry is computed from a previous borrow: (src1 < src2) */
447 #if TARGET_LONG_BITS == 64
448 cc_src1_32 = tcg_temp_new_i32();
449 cc_src2_32 = tcg_temp_new_i32();
450 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
451 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
452 #else
453 cc_src1_32 = cpu_cc_src;
454 cc_src2_32 = cpu_cc_src2;
455 #endif
457 carry_32 = tcg_temp_new_i32();
458 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
460 #if TARGET_LONG_BITS == 64
461 tcg_temp_free_i32(cc_src1_32);
462 tcg_temp_free_i32(cc_src2_32);
463 #endif
465 return carry_32;
468 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
469 TCGv src2, int update_cc)
471 TCGv_i32 carry_32;
472 TCGv carry;
474 switch (dc->cc_op) {
475 case CC_OP_DIV:
476 case CC_OP_LOGIC:
477 /* Carry is known to be zero. Fall back to plain ADD. */
478 if (update_cc) {
479 gen_op_add_cc(dst, src1, src2);
480 } else {
481 tcg_gen_add_tl(dst, src1, src2);
483 return;
485 case CC_OP_ADD:
486 case CC_OP_TADD:
487 case CC_OP_TADDTV:
488 if (TARGET_LONG_BITS == 32) {
489 /* We can re-use the host's hardware carry generation by using
490 an ADD2 opcode. We discard the low part of the output.
491 Ideally we'd combine this operation with the add that
492 generated the carry in the first place. */
493 carry = tcg_temp_new();
494 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
495 tcg_temp_free(carry);
496 goto add_done;
498 carry_32 = gen_add32_carry32();
499 break;
501 case CC_OP_SUB:
502 case CC_OP_TSUB:
503 case CC_OP_TSUBTV:
504 carry_32 = gen_sub32_carry32();
505 break;
507 default:
508 /* We need external help to produce the carry. */
509 carry_32 = tcg_temp_new_i32();
510 gen_helper_compute_C_icc(carry_32, cpu_env);
511 break;
514 #if TARGET_LONG_BITS == 64
515 carry = tcg_temp_new();
516 tcg_gen_extu_i32_i64(carry, carry_32);
517 #else
518 carry = carry_32;
519 #endif
521 tcg_gen_add_tl(dst, src1, src2);
522 tcg_gen_add_tl(dst, dst, carry);
524 tcg_temp_free_i32(carry_32);
525 #if TARGET_LONG_BITS == 64
526 tcg_temp_free(carry);
527 #endif
529 add_done:
530 if (update_cc) {
531 tcg_gen_mov_tl(cpu_cc_src, src1);
532 tcg_gen_mov_tl(cpu_cc_src2, src2);
533 tcg_gen_mov_tl(cpu_cc_dst, dst);
534 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
535 dc->cc_op = CC_OP_ADDX;
539 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
541 tcg_gen_mov_tl(cpu_cc_src, src1);
542 tcg_gen_mov_tl(cpu_cc_src2, src2);
543 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
544 tcg_gen_mov_tl(dst, cpu_cc_dst);
547 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
548 TCGv src2, int update_cc)
550 TCGv_i32 carry_32;
551 TCGv carry;
553 switch (dc->cc_op) {
554 case CC_OP_DIV:
555 case CC_OP_LOGIC:
556 /* Carry is known to be zero. Fall back to plain SUB. */
557 if (update_cc) {
558 gen_op_sub_cc(dst, src1, src2);
559 } else {
560 tcg_gen_sub_tl(dst, src1, src2);
562 return;
564 case CC_OP_ADD:
565 case CC_OP_TADD:
566 case CC_OP_TADDTV:
567 carry_32 = gen_add32_carry32();
568 break;
570 case CC_OP_SUB:
571 case CC_OP_TSUB:
572 case CC_OP_TSUBTV:
573 if (TARGET_LONG_BITS == 32) {
574 /* We can re-use the host's hardware carry generation by using
575 a SUB2 opcode. We discard the low part of the output.
576 Ideally we'd combine this operation with the add that
577 generated the carry in the first place. */
578 carry = tcg_temp_new();
579 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
580 tcg_temp_free(carry);
581 goto sub_done;
583 carry_32 = gen_sub32_carry32();
584 break;
586 default:
587 /* We need external help to produce the carry. */
588 carry_32 = tcg_temp_new_i32();
589 gen_helper_compute_C_icc(carry_32, cpu_env);
590 break;
593 #if TARGET_LONG_BITS == 64
594 carry = tcg_temp_new();
595 tcg_gen_extu_i32_i64(carry, carry_32);
596 #else
597 carry = carry_32;
598 #endif
600 tcg_gen_sub_tl(dst, src1, src2);
601 tcg_gen_sub_tl(dst, dst, carry);
603 tcg_temp_free_i32(carry_32);
604 #if TARGET_LONG_BITS == 64
605 tcg_temp_free(carry);
606 #endif
608 sub_done:
609 if (update_cc) {
610 tcg_gen_mov_tl(cpu_cc_src, src1);
611 tcg_gen_mov_tl(cpu_cc_src2, src2);
612 tcg_gen_mov_tl(cpu_cc_dst, dst);
613 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
614 dc->cc_op = CC_OP_SUBX;
618 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
620 TCGv r_temp, zero, t0;
622 r_temp = tcg_temp_new();
623 t0 = tcg_temp_new();
625 /* old op:
626 if (!(env->y & 1))
627 T1 = 0;
629 zero = tcg_const_tl(0);
630 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
631 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
632 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
633 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
634 zero, cpu_cc_src2);
635 tcg_temp_free(zero);
637 // b2 = T0 & 1;
638 // env->y = (b2 << 31) | (env->y >> 1);
639 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
640 tcg_gen_shli_tl(r_temp, r_temp, 31);
641 tcg_gen_shri_tl(t0, cpu_y, 1);
642 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
643 tcg_gen_or_tl(t0, t0, r_temp);
644 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
646 // b1 = N ^ V;
647 gen_mov_reg_N(t0, cpu_psr);
648 gen_mov_reg_V(r_temp, cpu_psr);
649 tcg_gen_xor_tl(t0, t0, r_temp);
650 tcg_temp_free(r_temp);
652 // T0 = (b1 << 31) | (T0 >> 1);
653 // src1 = T0;
654 tcg_gen_shli_tl(t0, t0, 31);
655 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
656 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
657 tcg_temp_free(t0);
659 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
661 tcg_gen_mov_tl(dst, cpu_cc_dst);
664 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
666 #if TARGET_LONG_BITS == 32
667 if (sign_ext) {
668 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
669 } else {
670 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
672 #else
673 TCGv t0 = tcg_temp_new_i64();
674 TCGv t1 = tcg_temp_new_i64();
676 if (sign_ext) {
677 tcg_gen_ext32s_i64(t0, src1);
678 tcg_gen_ext32s_i64(t1, src2);
679 } else {
680 tcg_gen_ext32u_i64(t0, src1);
681 tcg_gen_ext32u_i64(t1, src2);
684 tcg_gen_mul_i64(dst, t0, t1);
685 tcg_temp_free(t0);
686 tcg_temp_free(t1);
688 tcg_gen_shri_i64(cpu_y, dst, 32);
689 #endif
692 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
694 /* zero-extend truncated operands before multiplication */
695 gen_op_multiply(dst, src1, src2, 0);
698 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
700 /* sign-extend truncated operands before multiplication */
701 gen_op_multiply(dst, src1, src2, 1);
704 // 1
705 static inline void gen_op_eval_ba(TCGv dst)
707 tcg_gen_movi_tl(dst, 1);
710 // Z
711 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
713 gen_mov_reg_Z(dst, src);
716 // Z | (N ^ V)
717 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
719 TCGv t0 = tcg_temp_new();
720 gen_mov_reg_N(t0, src);
721 gen_mov_reg_V(dst, src);
722 tcg_gen_xor_tl(dst, dst, t0);
723 gen_mov_reg_Z(t0, src);
724 tcg_gen_or_tl(dst, dst, t0);
725 tcg_temp_free(t0);
728 // N ^ V
729 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
731 TCGv t0 = tcg_temp_new();
732 gen_mov_reg_V(t0, src);
733 gen_mov_reg_N(dst, src);
734 tcg_gen_xor_tl(dst, dst, t0);
735 tcg_temp_free(t0);
738 // C | Z
739 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
741 TCGv t0 = tcg_temp_new();
742 gen_mov_reg_Z(t0, src);
743 gen_mov_reg_C(dst, src);
744 tcg_gen_or_tl(dst, dst, t0);
745 tcg_temp_free(t0);
748 // C
749 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
751 gen_mov_reg_C(dst, src);
754 // V
755 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
757 gen_mov_reg_V(dst, src);
760 // 0
761 static inline void gen_op_eval_bn(TCGv dst)
763 tcg_gen_movi_tl(dst, 0);
766 // N
767 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
769 gen_mov_reg_N(dst, src);
772 // !Z
773 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
775 gen_mov_reg_Z(dst, src);
776 tcg_gen_xori_tl(dst, dst, 0x1);
779 // !(Z | (N ^ V))
780 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
782 gen_op_eval_ble(dst, src);
783 tcg_gen_xori_tl(dst, dst, 0x1);
786 // !(N ^ V)
787 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
789 gen_op_eval_bl(dst, src);
790 tcg_gen_xori_tl(dst, dst, 0x1);
793 // !(C | Z)
794 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
796 gen_op_eval_bleu(dst, src);
797 tcg_gen_xori_tl(dst, dst, 0x1);
800 // !C
801 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
803 gen_mov_reg_C(dst, src);
804 tcg_gen_xori_tl(dst, dst, 0x1);
807 // !N
808 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
810 gen_mov_reg_N(dst, src);
811 tcg_gen_xori_tl(dst, dst, 0x1);
814 // !V
815 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
817 gen_mov_reg_V(dst, src);
818 tcg_gen_xori_tl(dst, dst, 0x1);
822 FPSR bit field FCC1 | FCC0:
826 3 unordered
828 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
829 unsigned int fcc_offset)
831 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
832 tcg_gen_andi_tl(reg, reg, 0x1);
835 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
836 unsigned int fcc_offset)
838 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
839 tcg_gen_andi_tl(reg, reg, 0x1);
842 // !0: FCC0 | FCC1
843 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
844 unsigned int fcc_offset)
846 TCGv t0 = tcg_temp_new();
847 gen_mov_reg_FCC0(dst, src, fcc_offset);
848 gen_mov_reg_FCC1(t0, src, fcc_offset);
849 tcg_gen_or_tl(dst, dst, t0);
850 tcg_temp_free(t0);
853 // 1 or 2: FCC0 ^ FCC1
854 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
855 unsigned int fcc_offset)
857 TCGv t0 = tcg_temp_new();
858 gen_mov_reg_FCC0(dst, src, fcc_offset);
859 gen_mov_reg_FCC1(t0, src, fcc_offset);
860 tcg_gen_xor_tl(dst, dst, t0);
861 tcg_temp_free(t0);
864 // 1 or 3: FCC0
865 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
866 unsigned int fcc_offset)
868 gen_mov_reg_FCC0(dst, src, fcc_offset);
871 // 1: FCC0 & !FCC1
872 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
873 unsigned int fcc_offset)
875 TCGv t0 = tcg_temp_new();
876 gen_mov_reg_FCC0(dst, src, fcc_offset);
877 gen_mov_reg_FCC1(t0, src, fcc_offset);
878 tcg_gen_andc_tl(dst, dst, t0);
879 tcg_temp_free(t0);
882 // 2 or 3: FCC1
883 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
884 unsigned int fcc_offset)
886 gen_mov_reg_FCC1(dst, src, fcc_offset);
889 // 2: !FCC0 & FCC1
890 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
891 unsigned int fcc_offset)
893 TCGv t0 = tcg_temp_new();
894 gen_mov_reg_FCC0(dst, src, fcc_offset);
895 gen_mov_reg_FCC1(t0, src, fcc_offset);
896 tcg_gen_andc_tl(dst, t0, dst);
897 tcg_temp_free(t0);
900 // 3: FCC0 & FCC1
901 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
902 unsigned int fcc_offset)
904 TCGv t0 = tcg_temp_new();
905 gen_mov_reg_FCC0(dst, src, fcc_offset);
906 gen_mov_reg_FCC1(t0, src, fcc_offset);
907 tcg_gen_and_tl(dst, dst, t0);
908 tcg_temp_free(t0);
911 // 0: !(FCC0 | FCC1)
912 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
913 unsigned int fcc_offset)
915 TCGv t0 = tcg_temp_new();
916 gen_mov_reg_FCC0(dst, src, fcc_offset);
917 gen_mov_reg_FCC1(t0, src, fcc_offset);
918 tcg_gen_or_tl(dst, dst, t0);
919 tcg_gen_xori_tl(dst, dst, 0x1);
920 tcg_temp_free(t0);
923 // 0 or 3: !(FCC0 ^ FCC1)
924 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
925 unsigned int fcc_offset)
927 TCGv t0 = tcg_temp_new();
928 gen_mov_reg_FCC0(dst, src, fcc_offset);
929 gen_mov_reg_FCC1(t0, src, fcc_offset);
930 tcg_gen_xor_tl(dst, dst, t0);
931 tcg_gen_xori_tl(dst, dst, 0x1);
932 tcg_temp_free(t0);
935 // 0 or 2: !FCC0
936 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
937 unsigned int fcc_offset)
939 gen_mov_reg_FCC0(dst, src, fcc_offset);
940 tcg_gen_xori_tl(dst, dst, 0x1);
943 // !1: !(FCC0 & !FCC1)
944 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
945 unsigned int fcc_offset)
947 TCGv t0 = tcg_temp_new();
948 gen_mov_reg_FCC0(dst, src, fcc_offset);
949 gen_mov_reg_FCC1(t0, src, fcc_offset);
950 tcg_gen_andc_tl(dst, dst, t0);
951 tcg_gen_xori_tl(dst, dst, 0x1);
952 tcg_temp_free(t0);
955 // 0 or 1: !FCC1
956 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
957 unsigned int fcc_offset)
959 gen_mov_reg_FCC1(dst, src, fcc_offset);
960 tcg_gen_xori_tl(dst, dst, 0x1);
963 // !2: !(!FCC0 & FCC1)
964 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
965 unsigned int fcc_offset)
967 TCGv t0 = tcg_temp_new();
968 gen_mov_reg_FCC0(dst, src, fcc_offset);
969 gen_mov_reg_FCC1(t0, src, fcc_offset);
970 tcg_gen_andc_tl(dst, t0, dst);
971 tcg_gen_xori_tl(dst, dst, 0x1);
972 tcg_temp_free(t0);
975 // !3: !(FCC0 & FCC1)
976 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
977 unsigned int fcc_offset)
979 TCGv t0 = tcg_temp_new();
980 gen_mov_reg_FCC0(dst, src, fcc_offset);
981 gen_mov_reg_FCC1(t0, src, fcc_offset);
982 tcg_gen_and_tl(dst, dst, t0);
983 tcg_gen_xori_tl(dst, dst, 0x1);
984 tcg_temp_free(t0);
987 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
988 target_ulong pc2, TCGv r_cond)
990 TCGLabel *l1 = gen_new_label();
992 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
994 gen_goto_tb(dc, 0, pc1, pc1 + 4);
996 gen_set_label(l1);
997 gen_goto_tb(dc, 1, pc2, pc2 + 4);
1000 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
1002 TCGLabel *l1 = gen_new_label();
1003 target_ulong npc = dc->npc;
1005 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
1007 gen_goto_tb(dc, 0, npc, pc1);
1009 gen_set_label(l1);
1010 gen_goto_tb(dc, 1, npc + 4, npc + 8);
1012 dc->is_br = 1;
1015 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
1017 target_ulong npc = dc->npc;
1019 if (likely(npc != DYNAMIC_PC)) {
1020 dc->pc = npc;
1021 dc->jump_pc[0] = pc1;
1022 dc->jump_pc[1] = npc + 4;
1023 dc->npc = JUMP_PC;
1024 } else {
1025 TCGv t, z;
1027 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1029 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1030 t = tcg_const_tl(pc1);
1031 z = tcg_const_tl(0);
1032 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
1033 tcg_temp_free(t);
1034 tcg_temp_free(z);
1036 dc->pc = DYNAMIC_PC;
1040 static inline void gen_generic_branch(DisasContext *dc)
1042 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1043 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1044 TCGv zero = tcg_const_tl(0);
1046 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1048 tcg_temp_free(npc0);
1049 tcg_temp_free(npc1);
1050 tcg_temp_free(zero);
1053 /* call this function before using the condition register as it may
1054 have been set for a jump */
1055 static inline void flush_cond(DisasContext *dc)
1057 if (dc->npc == JUMP_PC) {
1058 gen_generic_branch(dc);
1059 dc->npc = DYNAMIC_PC;
1063 static inline void save_npc(DisasContext *dc)
1065 if (dc->npc == JUMP_PC) {
1066 gen_generic_branch(dc);
1067 dc->npc = DYNAMIC_PC;
1068 } else if (dc->npc != DYNAMIC_PC) {
1069 tcg_gen_movi_tl(cpu_npc, dc->npc);
1073 static inline void update_psr(DisasContext *dc)
1075 if (dc->cc_op != CC_OP_FLAGS) {
1076 dc->cc_op = CC_OP_FLAGS;
1077 gen_helper_compute_psr(cpu_env);
1081 static inline void save_state(DisasContext *dc)
1083 tcg_gen_movi_tl(cpu_pc, dc->pc);
1084 save_npc(dc);
1087 static void gen_exception(DisasContext *dc, int which)
1089 TCGv_i32 t;
1091 save_state(dc);
1092 t = tcg_const_i32(which);
1093 gen_helper_raise_exception(cpu_env, t);
1094 tcg_temp_free_i32(t);
1095 dc->is_br = 1;
1098 static void gen_check_align(TCGv addr, int mask)
1100 TCGv_i32 r_mask = tcg_const_i32(mask);
1101 gen_helper_check_align(cpu_env, addr, r_mask);
1102 tcg_temp_free_i32(r_mask);
1105 static inline void gen_mov_pc_npc(DisasContext *dc)
1107 if (dc->npc == JUMP_PC) {
1108 gen_generic_branch(dc);
1109 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1110 dc->pc = DYNAMIC_PC;
1111 } else if (dc->npc == DYNAMIC_PC) {
1112 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1113 dc->pc = DYNAMIC_PC;
1114 } else {
1115 dc->pc = dc->npc;
1119 static inline void gen_op_next_insn(void)
1121 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1122 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1125 static void free_compare(DisasCompare *cmp)
1127 if (!cmp->g1) {
1128 tcg_temp_free(cmp->c1);
1130 if (!cmp->g2) {
1131 tcg_temp_free(cmp->c2);
1135 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1136 DisasContext *dc)
1138 static int subcc_cond[16] = {
1139 TCG_COND_NEVER,
1140 TCG_COND_EQ,
1141 TCG_COND_LE,
1142 TCG_COND_LT,
1143 TCG_COND_LEU,
1144 TCG_COND_LTU,
1145 -1, /* neg */
1146 -1, /* overflow */
1147 TCG_COND_ALWAYS,
1148 TCG_COND_NE,
1149 TCG_COND_GT,
1150 TCG_COND_GE,
1151 TCG_COND_GTU,
1152 TCG_COND_GEU,
1153 -1, /* pos */
1154 -1, /* no overflow */
1157 static int logic_cond[16] = {
1158 TCG_COND_NEVER,
1159 TCG_COND_EQ, /* eq: Z */
1160 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1161 TCG_COND_LT, /* lt: N ^ V -> N */
1162 TCG_COND_EQ, /* leu: C | Z -> Z */
1163 TCG_COND_NEVER, /* ltu: C -> 0 */
1164 TCG_COND_LT, /* neg: N */
1165 TCG_COND_NEVER, /* vs: V -> 0 */
1166 TCG_COND_ALWAYS,
1167 TCG_COND_NE, /* ne: !Z */
1168 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1169 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1170 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1171 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1172 TCG_COND_GE, /* pos: !N */
1173 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1176 TCGv_i32 r_src;
1177 TCGv r_dst;
1179 #ifdef TARGET_SPARC64
1180 if (xcc) {
1181 r_src = cpu_xcc;
1182 } else {
1183 r_src = cpu_psr;
1185 #else
1186 r_src = cpu_psr;
1187 #endif
1189 switch (dc->cc_op) {
1190 case CC_OP_LOGIC:
1191 cmp->cond = logic_cond[cond];
1192 do_compare_dst_0:
1193 cmp->is_bool = false;
1194 cmp->g2 = false;
1195 cmp->c2 = tcg_const_tl(0);
1196 #ifdef TARGET_SPARC64
1197 if (!xcc) {
1198 cmp->g1 = false;
1199 cmp->c1 = tcg_temp_new();
1200 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1201 break;
1203 #endif
1204 cmp->g1 = true;
1205 cmp->c1 = cpu_cc_dst;
1206 break;
1208 case CC_OP_SUB:
1209 switch (cond) {
1210 case 6: /* neg */
1211 case 14: /* pos */
1212 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1213 goto do_compare_dst_0;
1215 case 7: /* overflow */
1216 case 15: /* !overflow */
1217 goto do_dynamic;
1219 default:
1220 cmp->cond = subcc_cond[cond];
1221 cmp->is_bool = false;
1222 #ifdef TARGET_SPARC64
1223 if (!xcc) {
1224 /* Note that sign-extension works for unsigned compares as
1225 long as both operands are sign-extended. */
1226 cmp->g1 = cmp->g2 = false;
1227 cmp->c1 = tcg_temp_new();
1228 cmp->c2 = tcg_temp_new();
1229 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1230 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1231 break;
1233 #endif
1234 cmp->g1 = cmp->g2 = true;
1235 cmp->c1 = cpu_cc_src;
1236 cmp->c2 = cpu_cc_src2;
1237 break;
1239 break;
1241 default:
1242 do_dynamic:
1243 gen_helper_compute_psr(cpu_env);
1244 dc->cc_op = CC_OP_FLAGS;
1245 /* FALLTHRU */
1247 case CC_OP_FLAGS:
1248 /* We're going to generate a boolean result. */
1249 cmp->cond = TCG_COND_NE;
1250 cmp->is_bool = true;
1251 cmp->g1 = cmp->g2 = false;
1252 cmp->c1 = r_dst = tcg_temp_new();
1253 cmp->c2 = tcg_const_tl(0);
1255 switch (cond) {
1256 case 0x0:
1257 gen_op_eval_bn(r_dst);
1258 break;
1259 case 0x1:
1260 gen_op_eval_be(r_dst, r_src);
1261 break;
1262 case 0x2:
1263 gen_op_eval_ble(r_dst, r_src);
1264 break;
1265 case 0x3:
1266 gen_op_eval_bl(r_dst, r_src);
1267 break;
1268 case 0x4:
1269 gen_op_eval_bleu(r_dst, r_src);
1270 break;
1271 case 0x5:
1272 gen_op_eval_bcs(r_dst, r_src);
1273 break;
1274 case 0x6:
1275 gen_op_eval_bneg(r_dst, r_src);
1276 break;
1277 case 0x7:
1278 gen_op_eval_bvs(r_dst, r_src);
1279 break;
1280 case 0x8:
1281 gen_op_eval_ba(r_dst);
1282 break;
1283 case 0x9:
1284 gen_op_eval_bne(r_dst, r_src);
1285 break;
1286 case 0xa:
1287 gen_op_eval_bg(r_dst, r_src);
1288 break;
1289 case 0xb:
1290 gen_op_eval_bge(r_dst, r_src);
1291 break;
1292 case 0xc:
1293 gen_op_eval_bgu(r_dst, r_src);
1294 break;
1295 case 0xd:
1296 gen_op_eval_bcc(r_dst, r_src);
1297 break;
1298 case 0xe:
1299 gen_op_eval_bpos(r_dst, r_src);
1300 break;
1301 case 0xf:
1302 gen_op_eval_bvc(r_dst, r_src);
1303 break;
1305 break;
1309 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1311 unsigned int offset;
1312 TCGv r_dst;
1314 /* For now we still generate a straight boolean result. */
1315 cmp->cond = TCG_COND_NE;
1316 cmp->is_bool = true;
1317 cmp->g1 = cmp->g2 = false;
1318 cmp->c1 = r_dst = tcg_temp_new();
1319 cmp->c2 = tcg_const_tl(0);
1321 switch (cc) {
1322 default:
1323 case 0x0:
1324 offset = 0;
1325 break;
1326 case 0x1:
1327 offset = 32 - 10;
1328 break;
1329 case 0x2:
1330 offset = 34 - 10;
1331 break;
1332 case 0x3:
1333 offset = 36 - 10;
1334 break;
1337 switch (cond) {
1338 case 0x0:
1339 gen_op_eval_bn(r_dst);
1340 break;
1341 case 0x1:
1342 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1343 break;
1344 case 0x2:
1345 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1346 break;
1347 case 0x3:
1348 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1349 break;
1350 case 0x4:
1351 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1352 break;
1353 case 0x5:
1354 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1355 break;
1356 case 0x6:
1357 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1358 break;
1359 case 0x7:
1360 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1361 break;
1362 case 0x8:
1363 gen_op_eval_ba(r_dst);
1364 break;
1365 case 0x9:
1366 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1367 break;
1368 case 0xa:
1369 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1370 break;
1371 case 0xb:
1372 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1373 break;
1374 case 0xc:
1375 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1376 break;
1377 case 0xd:
1378 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1379 break;
1380 case 0xe:
1381 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1382 break;
1383 case 0xf:
1384 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1385 break;
1389 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1390 DisasContext *dc)
1392 DisasCompare cmp;
1393 gen_compare(&cmp, cc, cond, dc);
1395 /* The interface is to return a boolean in r_dst. */
1396 if (cmp.is_bool) {
1397 tcg_gen_mov_tl(r_dst, cmp.c1);
1398 } else {
1399 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1402 free_compare(&cmp);
1405 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1407 DisasCompare cmp;
1408 gen_fcompare(&cmp, cc, cond);
1410 /* The interface is to return a boolean in r_dst. */
1411 if (cmp.is_bool) {
1412 tcg_gen_mov_tl(r_dst, cmp.c1);
1413 } else {
1414 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1417 free_compare(&cmp);
1420 #ifdef TARGET_SPARC64
1421 // Inverted logic
1422 static const int gen_tcg_cond_reg[8] = {
1424 TCG_COND_NE,
1425 TCG_COND_GT,
1426 TCG_COND_GE,
1428 TCG_COND_EQ,
1429 TCG_COND_LE,
1430 TCG_COND_LT,
1433 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1435 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1436 cmp->is_bool = false;
1437 cmp->g1 = true;
1438 cmp->g2 = false;
1439 cmp->c1 = r_src;
1440 cmp->c2 = tcg_const_tl(0);
1443 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1445 DisasCompare cmp;
1446 gen_compare_reg(&cmp, cond, r_src);
1448 /* The interface is to return a boolean in r_dst. */
1449 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1451 free_compare(&cmp);
1453 #endif
1455 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1457 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1458 target_ulong target = dc->pc + offset;
1460 #ifdef TARGET_SPARC64
1461 if (unlikely(AM_CHECK(dc))) {
1462 target &= 0xffffffffULL;
1464 #endif
1465 if (cond == 0x0) {
1466 /* unconditional not taken */
1467 if (a) {
1468 dc->pc = dc->npc + 4;
1469 dc->npc = dc->pc + 4;
1470 } else {
1471 dc->pc = dc->npc;
1472 dc->npc = dc->pc + 4;
1474 } else if (cond == 0x8) {
1475 /* unconditional taken */
1476 if (a) {
1477 dc->pc = target;
1478 dc->npc = dc->pc + 4;
1479 } else {
1480 dc->pc = dc->npc;
1481 dc->npc = target;
1482 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1484 } else {
1485 flush_cond(dc);
1486 gen_cond(cpu_cond, cc, cond, dc);
1487 if (a) {
1488 gen_branch_a(dc, target);
1489 } else {
1490 gen_branch_n(dc, target);
1495 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1497 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1498 target_ulong target = dc->pc + offset;
1500 #ifdef TARGET_SPARC64
1501 if (unlikely(AM_CHECK(dc))) {
1502 target &= 0xffffffffULL;
1504 #endif
1505 if (cond == 0x0) {
1506 /* unconditional not taken */
1507 if (a) {
1508 dc->pc = dc->npc + 4;
1509 dc->npc = dc->pc + 4;
1510 } else {
1511 dc->pc = dc->npc;
1512 dc->npc = dc->pc + 4;
1514 } else if (cond == 0x8) {
1515 /* unconditional taken */
1516 if (a) {
1517 dc->pc = target;
1518 dc->npc = dc->pc + 4;
1519 } else {
1520 dc->pc = dc->npc;
1521 dc->npc = target;
1522 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1524 } else {
1525 flush_cond(dc);
1526 gen_fcond(cpu_cond, cc, cond);
1527 if (a) {
1528 gen_branch_a(dc, target);
1529 } else {
1530 gen_branch_n(dc, target);
1535 #ifdef TARGET_SPARC64
1536 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1537 TCGv r_reg)
1539 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1540 target_ulong target = dc->pc + offset;
1542 if (unlikely(AM_CHECK(dc))) {
1543 target &= 0xffffffffULL;
1545 flush_cond(dc);
1546 gen_cond_reg(cpu_cond, cond, r_reg);
1547 if (a) {
1548 gen_branch_a(dc, target);
1549 } else {
1550 gen_branch_n(dc, target);
1554 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1556 switch (fccno) {
1557 case 0:
1558 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1559 break;
1560 case 1:
1561 gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1562 break;
1563 case 2:
1564 gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1565 break;
1566 case 3:
1567 gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1568 break;
1572 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1574 switch (fccno) {
1575 case 0:
1576 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1577 break;
1578 case 1:
1579 gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1580 break;
1581 case 2:
1582 gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1583 break;
1584 case 3:
1585 gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1586 break;
1590 static inline void gen_op_fcmpq(int fccno)
1592 switch (fccno) {
1593 case 0:
1594 gen_helper_fcmpq(cpu_fsr, cpu_env);
1595 break;
1596 case 1:
1597 gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1598 break;
1599 case 2:
1600 gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1601 break;
1602 case 3:
1603 gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1604 break;
1608 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1610 switch (fccno) {
1611 case 0:
1612 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1613 break;
1614 case 1:
1615 gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1616 break;
1617 case 2:
1618 gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1619 break;
1620 case 3:
1621 gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1622 break;
1626 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1628 switch (fccno) {
1629 case 0:
1630 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1631 break;
1632 case 1:
1633 gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1634 break;
1635 case 2:
1636 gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1637 break;
1638 case 3:
1639 gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1640 break;
1644 static inline void gen_op_fcmpeq(int fccno)
1646 switch (fccno) {
1647 case 0:
1648 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1649 break;
1650 case 1:
1651 gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1652 break;
1653 case 2:
1654 gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1655 break;
1656 case 3:
1657 gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1658 break;
1662 #else
1664 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1666 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1669 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1671 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1674 static inline void gen_op_fcmpq(int fccno)
1676 gen_helper_fcmpq(cpu_fsr, cpu_env);
1679 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1681 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1684 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1686 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1689 static inline void gen_op_fcmpeq(int fccno)
1691 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1693 #endif
1695 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1697 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1698 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1699 gen_exception(dc, TT_FP_EXCP);
1702 static int gen_trap_ifnofpu(DisasContext *dc)
1704 #if !defined(CONFIG_USER_ONLY)
1705 if (!dc->fpu_enabled) {
1706 gen_exception(dc, TT_NFPU_INSN);
1707 return 1;
1709 #endif
1710 return 0;
1713 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1715 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1718 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1719 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1721 TCGv_i32 dst, src;
1723 src = gen_load_fpr_F(dc, rs);
1724 dst = gen_dest_fpr_F(dc);
1726 gen(dst, cpu_env, src);
1727 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1729 gen_store_fpr_F(dc, rd, dst);
1732 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1733 void (*gen)(TCGv_i32, TCGv_i32))
1735 TCGv_i32 dst, src;
1737 src = gen_load_fpr_F(dc, rs);
1738 dst = gen_dest_fpr_F(dc);
1740 gen(dst, src);
1742 gen_store_fpr_F(dc, rd, dst);
1745 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1746 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1748 TCGv_i32 dst, src1, src2;
1750 src1 = gen_load_fpr_F(dc, rs1);
1751 src2 = gen_load_fpr_F(dc, rs2);
1752 dst = gen_dest_fpr_F(dc);
1754 gen(dst, cpu_env, src1, src2);
1755 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1757 gen_store_fpr_F(dc, rd, dst);
1760 #ifdef TARGET_SPARC64
1761 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1762 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1764 TCGv_i32 dst, src1, src2;
1766 src1 = gen_load_fpr_F(dc, rs1);
1767 src2 = gen_load_fpr_F(dc, rs2);
1768 dst = gen_dest_fpr_F(dc);
1770 gen(dst, src1, src2);
1772 gen_store_fpr_F(dc, rd, dst);
1774 #endif
1776 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1777 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1779 TCGv_i64 dst, src;
1781 src = gen_load_fpr_D(dc, rs);
1782 dst = gen_dest_fpr_D(dc, rd);
1784 gen(dst, cpu_env, src);
1785 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1787 gen_store_fpr_D(dc, rd, dst);
1790 #ifdef TARGET_SPARC64
1791 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1792 void (*gen)(TCGv_i64, TCGv_i64))
1794 TCGv_i64 dst, src;
1796 src = gen_load_fpr_D(dc, rs);
1797 dst = gen_dest_fpr_D(dc, rd);
1799 gen(dst, src);
1801 gen_store_fpr_D(dc, rd, dst);
1803 #endif
1805 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1806 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1808 TCGv_i64 dst, src1, src2;
1810 src1 = gen_load_fpr_D(dc, rs1);
1811 src2 = gen_load_fpr_D(dc, rs2);
1812 dst = gen_dest_fpr_D(dc, rd);
1814 gen(dst, cpu_env, src1, src2);
1815 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1817 gen_store_fpr_D(dc, rd, dst);
1820 #ifdef TARGET_SPARC64
1821 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1822 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1824 TCGv_i64 dst, src1, src2;
1826 src1 = gen_load_fpr_D(dc, rs1);
1827 src2 = gen_load_fpr_D(dc, rs2);
1828 dst = gen_dest_fpr_D(dc, rd);
1830 gen(dst, src1, src2);
1832 gen_store_fpr_D(dc, rd, dst);
1835 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1836 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1838 TCGv_i64 dst, src1, src2;
1840 src1 = gen_load_fpr_D(dc, rs1);
1841 src2 = gen_load_fpr_D(dc, rs2);
1842 dst = gen_dest_fpr_D(dc, rd);
1844 gen(dst, cpu_gsr, src1, src2);
1846 gen_store_fpr_D(dc, rd, dst);
1849 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1850 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1852 TCGv_i64 dst, src0, src1, src2;
1854 src1 = gen_load_fpr_D(dc, rs1);
1855 src2 = gen_load_fpr_D(dc, rs2);
1856 src0 = gen_load_fpr_D(dc, rd);
1857 dst = gen_dest_fpr_D(dc, rd);
1859 gen(dst, src0, src1, src2);
1861 gen_store_fpr_D(dc, rd, dst);
1863 #endif
1865 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1866 void (*gen)(TCGv_ptr))
1868 gen_op_load_fpr_QT1(QFPREG(rs));
1870 gen(cpu_env);
1871 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1873 gen_op_store_QT0_fpr(QFPREG(rd));
1874 gen_update_fprs_dirty(dc, QFPREG(rd));
1877 #ifdef TARGET_SPARC64
1878 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1879 void (*gen)(TCGv_ptr))
1881 gen_op_load_fpr_QT1(QFPREG(rs));
1883 gen(cpu_env);
1885 gen_op_store_QT0_fpr(QFPREG(rd));
1886 gen_update_fprs_dirty(dc, QFPREG(rd));
1888 #endif
1890 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1891 void (*gen)(TCGv_ptr))
1893 gen_op_load_fpr_QT0(QFPREG(rs1));
1894 gen_op_load_fpr_QT1(QFPREG(rs2));
1896 gen(cpu_env);
1897 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1899 gen_op_store_QT0_fpr(QFPREG(rd));
1900 gen_update_fprs_dirty(dc, QFPREG(rd));
1903 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1904 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1906 TCGv_i64 dst;
1907 TCGv_i32 src1, src2;
1909 src1 = gen_load_fpr_F(dc, rs1);
1910 src2 = gen_load_fpr_F(dc, rs2);
1911 dst = gen_dest_fpr_D(dc, rd);
1913 gen(dst, cpu_env, src1, src2);
1914 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1916 gen_store_fpr_D(dc, rd, dst);
1919 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1920 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1922 TCGv_i64 src1, src2;
1924 src1 = gen_load_fpr_D(dc, rs1);
1925 src2 = gen_load_fpr_D(dc, rs2);
1927 gen(cpu_env, src1, src2);
1928 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1930 gen_op_store_QT0_fpr(QFPREG(rd));
1931 gen_update_fprs_dirty(dc, QFPREG(rd));
1934 #ifdef TARGET_SPARC64
1935 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1936 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1938 TCGv_i64 dst;
1939 TCGv_i32 src;
1941 src = gen_load_fpr_F(dc, rs);
1942 dst = gen_dest_fpr_D(dc, rd);
1944 gen(dst, cpu_env, src);
1945 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1947 gen_store_fpr_D(dc, rd, dst);
1949 #endif
1951 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1952 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1954 TCGv_i64 dst;
1955 TCGv_i32 src;
1957 src = gen_load_fpr_F(dc, rs);
1958 dst = gen_dest_fpr_D(dc, rd);
1960 gen(dst, cpu_env, src);
1962 gen_store_fpr_D(dc, rd, dst);
1965 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1966 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1968 TCGv_i32 dst;
1969 TCGv_i64 src;
1971 src = gen_load_fpr_D(dc, rs);
1972 dst = gen_dest_fpr_F(dc);
1974 gen(dst, cpu_env, src);
1975 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1977 gen_store_fpr_F(dc, rd, dst);
1980 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1981 void (*gen)(TCGv_i32, TCGv_ptr))
1983 TCGv_i32 dst;
1985 gen_op_load_fpr_QT1(QFPREG(rs));
1986 dst = gen_dest_fpr_F(dc);
1988 gen(dst, cpu_env);
1989 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1991 gen_store_fpr_F(dc, rd, dst);
1994 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1995 void (*gen)(TCGv_i64, TCGv_ptr))
1997 TCGv_i64 dst;
1999 gen_op_load_fpr_QT1(QFPREG(rs));
2000 dst = gen_dest_fpr_D(dc, rd);
2002 gen(dst, cpu_env);
2003 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
2005 gen_store_fpr_D(dc, rd, dst);
2008 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
2009 void (*gen)(TCGv_ptr, TCGv_i32))
2011 TCGv_i32 src;
2013 src = gen_load_fpr_F(dc, rs);
2015 gen(cpu_env, src);
2017 gen_op_store_QT0_fpr(QFPREG(rd));
2018 gen_update_fprs_dirty(dc, QFPREG(rd));
2021 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
2022 void (*gen)(TCGv_ptr, TCGv_i64))
2024 TCGv_i64 src;
2026 src = gen_load_fpr_D(dc, rs);
2028 gen(cpu_env, src);
2030 gen_op_store_QT0_fpr(QFPREG(rd));
2031 gen_update_fprs_dirty(dc, QFPREG(rd));
2034 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
2035 TCGv addr, int mmu_idx, TCGMemOp memop)
2037 gen_address_mask(dc, addr);
2038 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
2041 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
2043 TCGv m1 = tcg_const_tl(0xff);
2044 gen_address_mask(dc, addr);
2045 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
2046 tcg_temp_free(m1);
2049 /* asi moves */
2050 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2051 typedef enum {
2052 GET_ASI_HELPER,
2053 GET_ASI_EXCP,
2054 GET_ASI_DIRECT,
2055 GET_ASI_DTWINX,
2056 GET_ASI_BLOCK,
2057 GET_ASI_SHORT,
2058 GET_ASI_BCOPY,
2059 GET_ASI_BFILL,
2060 } ASIType;
2062 typedef struct {
2063 ASIType type;
2064 int asi;
2065 int mem_idx;
2066 TCGMemOp memop;
2067 } DisasASI;
2069 static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
2071 int asi = GET_FIELD(insn, 19, 26);
2072 ASIType type = GET_ASI_HELPER;
2073 int mem_idx = dc->mem_idx;
2075 #ifndef TARGET_SPARC64
2076 /* Before v9, all asis are immediate and privileged. */
2077 if (IS_IMM) {
2078 gen_exception(dc, TT_ILL_INSN);
2079 type = GET_ASI_EXCP;
2080 } else if (supervisor(dc)
2081 /* Note that LEON accepts ASI_USERDATA in user mode, for
2082 use with CASA. Also note that previous versions of
2083 QEMU allowed (and old versions of gcc emitted) ASI_P
2084 for LEON, which is incorrect. */
2085 || (asi == ASI_USERDATA
2086 && (dc->def->features & CPU_FEATURE_CASA))) {
2087 switch (asi) {
2088 case ASI_USERDATA: /* User data access */
2089 mem_idx = MMU_USER_IDX;
2090 type = GET_ASI_DIRECT;
2091 break;
2092 case ASI_KERNELDATA: /* Supervisor data access */
2093 mem_idx = MMU_KERNEL_IDX;
2094 type = GET_ASI_DIRECT;
2095 break;
2096 case ASI_M_BYPASS: /* MMU passthrough */
2097 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
2098 mem_idx = MMU_PHYS_IDX;
2099 type = GET_ASI_DIRECT;
2100 break;
2101 case ASI_M_BCOPY: /* Block copy, sta access */
2102 mem_idx = MMU_KERNEL_IDX;
2103 type = GET_ASI_BCOPY;
2104 break;
2105 case ASI_M_BFILL: /* Block fill, stda access */
2106 mem_idx = MMU_KERNEL_IDX;
2107 type = GET_ASI_BFILL;
2108 break;
2110 } else {
2111 gen_exception(dc, TT_PRIV_INSN);
2112 type = GET_ASI_EXCP;
2114 #else
2115 if (IS_IMM) {
2116 asi = dc->asi;
2118 /* With v9, all asis below 0x80 are privileged. */
2119 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2120 down that bit into DisasContext. For the moment that's ok,
2121 since the direct implementations below doesn't have any ASIs
2122 in the restricted [0x30, 0x7f] range, and the check will be
2123 done properly in the helper. */
2124 if (!supervisor(dc) && asi < 0x80) {
2125 gen_exception(dc, TT_PRIV_ACT);
2126 type = GET_ASI_EXCP;
2127 } else {
2128 switch (asi) {
2129 case ASI_REAL: /* Bypass */
2130 case ASI_REAL_IO: /* Bypass, non-cacheable */
2131 case ASI_REAL_L: /* Bypass LE */
2132 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
2133 case ASI_TWINX_REAL: /* Real address, twinx */
2134 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2135 case ASI_QUAD_LDD_PHYS:
2136 case ASI_QUAD_LDD_PHYS_L:
2137 mem_idx = MMU_PHYS_IDX;
2138 break;
2139 case ASI_N: /* Nucleus */
2140 case ASI_NL: /* Nucleus LE */
2141 case ASI_TWINX_N:
2142 case ASI_TWINX_NL:
2143 case ASI_NUCLEUS_QUAD_LDD:
2144 case ASI_NUCLEUS_QUAD_LDD_L:
2145 if (hypervisor(dc)) {
2146 mem_idx = MMU_PHYS_IDX;
2147 } else {
2148 mem_idx = MMU_NUCLEUS_IDX;
2150 break;
2151 case ASI_AIUP: /* As if user primary */
2152 case ASI_AIUPL: /* As if user primary LE */
2153 case ASI_TWINX_AIUP:
2154 case ASI_TWINX_AIUP_L:
2155 case ASI_BLK_AIUP_4V:
2156 case ASI_BLK_AIUP_L_4V:
2157 case ASI_BLK_AIUP:
2158 case ASI_BLK_AIUPL:
2159 mem_idx = MMU_USER_IDX;
2160 break;
2161 case ASI_AIUS: /* As if user secondary */
2162 case ASI_AIUSL: /* As if user secondary LE */
2163 case ASI_TWINX_AIUS:
2164 case ASI_TWINX_AIUS_L:
2165 case ASI_BLK_AIUS_4V:
2166 case ASI_BLK_AIUS_L_4V:
2167 case ASI_BLK_AIUS:
2168 case ASI_BLK_AIUSL:
2169 mem_idx = MMU_USER_SECONDARY_IDX;
2170 break;
2171 case ASI_S: /* Secondary */
2172 case ASI_SL: /* Secondary LE */
2173 case ASI_TWINX_S:
2174 case ASI_TWINX_SL:
2175 case ASI_BLK_COMMIT_S:
2176 case ASI_BLK_S:
2177 case ASI_BLK_SL:
2178 case ASI_FL8_S:
2179 case ASI_FL8_SL:
2180 case ASI_FL16_S:
2181 case ASI_FL16_SL:
2182 if (mem_idx == MMU_USER_IDX) {
2183 mem_idx = MMU_USER_SECONDARY_IDX;
2184 } else if (mem_idx == MMU_KERNEL_IDX) {
2185 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2187 break;
2188 case ASI_P: /* Primary */
2189 case ASI_PL: /* Primary LE */
2190 case ASI_TWINX_P:
2191 case ASI_TWINX_PL:
2192 case ASI_BLK_COMMIT_P:
2193 case ASI_BLK_P:
2194 case ASI_BLK_PL:
2195 case ASI_FL8_P:
2196 case ASI_FL8_PL:
2197 case ASI_FL16_P:
2198 case ASI_FL16_PL:
2199 break;
2201 switch (asi) {
2202 case ASI_REAL:
2203 case ASI_REAL_IO:
2204 case ASI_REAL_L:
2205 case ASI_REAL_IO_L:
2206 case ASI_N:
2207 case ASI_NL:
2208 case ASI_AIUP:
2209 case ASI_AIUPL:
2210 case ASI_AIUS:
2211 case ASI_AIUSL:
2212 case ASI_S:
2213 case ASI_SL:
2214 case ASI_P:
2215 case ASI_PL:
2216 type = GET_ASI_DIRECT;
2217 break;
2218 case ASI_TWINX_REAL:
2219 case ASI_TWINX_REAL_L:
2220 case ASI_TWINX_N:
2221 case ASI_TWINX_NL:
2222 case ASI_TWINX_AIUP:
2223 case ASI_TWINX_AIUP_L:
2224 case ASI_TWINX_AIUS:
2225 case ASI_TWINX_AIUS_L:
2226 case ASI_TWINX_P:
2227 case ASI_TWINX_PL:
2228 case ASI_TWINX_S:
2229 case ASI_TWINX_SL:
2230 case ASI_QUAD_LDD_PHYS:
2231 case ASI_QUAD_LDD_PHYS_L:
2232 case ASI_NUCLEUS_QUAD_LDD:
2233 case ASI_NUCLEUS_QUAD_LDD_L:
2234 type = GET_ASI_DTWINX;
2235 break;
2236 case ASI_BLK_COMMIT_P:
2237 case ASI_BLK_COMMIT_S:
2238 case ASI_BLK_AIUP_4V:
2239 case ASI_BLK_AIUP_L_4V:
2240 case ASI_BLK_AIUP:
2241 case ASI_BLK_AIUPL:
2242 case ASI_BLK_AIUS_4V:
2243 case ASI_BLK_AIUS_L_4V:
2244 case ASI_BLK_AIUS:
2245 case ASI_BLK_AIUSL:
2246 case ASI_BLK_S:
2247 case ASI_BLK_SL:
2248 case ASI_BLK_P:
2249 case ASI_BLK_PL:
2250 type = GET_ASI_BLOCK;
2251 break;
2252 case ASI_FL8_S:
2253 case ASI_FL8_SL:
2254 case ASI_FL8_P:
2255 case ASI_FL8_PL:
2256 memop = MO_UB;
2257 type = GET_ASI_SHORT;
2258 break;
2259 case ASI_FL16_S:
2260 case ASI_FL16_SL:
2261 case ASI_FL16_P:
2262 case ASI_FL16_PL:
2263 memop = MO_TEUW;
2264 type = GET_ASI_SHORT;
2265 break;
2267 /* The little-endian asis all have bit 3 set. */
2268 if (asi & 8) {
2269 memop ^= MO_BSWAP;
2272 #endif
2274 return (DisasASI){ type, asi, mem_idx, memop };
2277 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2278 int insn, TCGMemOp memop)
2280 DisasASI da = get_asi(dc, insn, memop);
2282 switch (da.type) {
2283 case GET_ASI_EXCP:
2284 break;
2285 case GET_ASI_DTWINX: /* Reserved for ldda. */
2286 gen_exception(dc, TT_ILL_INSN);
2287 break;
2288 case GET_ASI_DIRECT:
2289 gen_address_mask(dc, addr);
2290 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
2291 break;
2292 default:
2294 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2295 TCGv_i32 r_mop = tcg_const_i32(memop);
2297 save_state(dc);
2298 #ifdef TARGET_SPARC64
2299 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2300 #else
2302 TCGv_i64 t64 = tcg_temp_new_i64();
2303 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2304 tcg_gen_trunc_i64_tl(dst, t64);
2305 tcg_temp_free_i64(t64);
2307 #endif
2308 tcg_temp_free_i32(r_mop);
2309 tcg_temp_free_i32(r_asi);
2311 break;
2315 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2316 int insn, TCGMemOp memop)
2318 DisasASI da = get_asi(dc, insn, memop);
2320 switch (da.type) {
2321 case GET_ASI_EXCP:
2322 break;
2323 case GET_ASI_DTWINX: /* Reserved for stda. */
2324 #ifndef TARGET_SPARC64
2325 gen_exception(dc, TT_ILL_INSN);
2326 break;
2327 #else
2328 if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2329 /* Pre OpenSPARC CPUs don't have these */
2330 gen_exception(dc, TT_ILL_INSN);
2331 return;
2333 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2334 * are ST_BLKINIT_ ASIs */
2335 /* fall through */
2336 #endif
2337 case GET_ASI_DIRECT:
2338 gen_address_mask(dc, addr);
2339 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
2340 break;
2341 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2342 case GET_ASI_BCOPY:
2343 /* Copy 32 bytes from the address in SRC to ADDR. */
2344 /* ??? The original qemu code suggests 4-byte alignment, dropping
2345 the low bits, but the only place I can see this used is in the
2346 Linux kernel with 32 byte alignment, which would make more sense
2347 as a cacheline-style operation. */
2349 TCGv saddr = tcg_temp_new();
2350 TCGv daddr = tcg_temp_new();
2351 TCGv four = tcg_const_tl(4);
2352 TCGv_i32 tmp = tcg_temp_new_i32();
2353 int i;
2355 tcg_gen_andi_tl(saddr, src, -4);
2356 tcg_gen_andi_tl(daddr, addr, -4);
2357 for (i = 0; i < 32; i += 4) {
2358 /* Since the loads and stores are paired, allow the
2359 copy to happen in the host endianness. */
2360 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2361 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2362 tcg_gen_add_tl(saddr, saddr, four);
2363 tcg_gen_add_tl(daddr, daddr, four);
2366 tcg_temp_free(saddr);
2367 tcg_temp_free(daddr);
2368 tcg_temp_free(four);
2369 tcg_temp_free_i32(tmp);
2371 break;
2372 #endif
2373 default:
2375 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2376 TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
2378 save_state(dc);
2379 #ifdef TARGET_SPARC64
2380 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2381 #else
2383 TCGv_i64 t64 = tcg_temp_new_i64();
2384 tcg_gen_extu_tl_i64(t64, src);
2385 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2386 tcg_temp_free_i64(t64);
2388 #endif
2389 tcg_temp_free_i32(r_mop);
2390 tcg_temp_free_i32(r_asi);
2392 /* A write to a TLB register may alter page maps. End the TB. */
2393 dc->npc = DYNAMIC_PC;
2395 break;
2399 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2400 TCGv addr, int insn)
2402 DisasASI da = get_asi(dc, insn, MO_TEUL);
2404 switch (da.type) {
2405 case GET_ASI_EXCP:
2406 break;
2407 case GET_ASI_DIRECT:
2408 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2409 break;
2410 default:
2411 /* ??? Should be DAE_invalid_asi. */
2412 gen_exception(dc, TT_DATA_ACCESS);
2413 break;
2417 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2418 int insn, int rd)
2420 DisasASI da = get_asi(dc, insn, MO_TEUL);
2421 TCGv oldv;
2423 switch (da.type) {
2424 case GET_ASI_EXCP:
2425 return;
2426 case GET_ASI_DIRECT:
2427 oldv = tcg_temp_new();
2428 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2429 da.mem_idx, da.memop);
2430 gen_store_gpr(dc, rd, oldv);
2431 tcg_temp_free(oldv);
2432 break;
2433 default:
2434 /* ??? Should be DAE_invalid_asi. */
2435 gen_exception(dc, TT_DATA_ACCESS);
2436 break;
2440 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2442 DisasASI da = get_asi(dc, insn, MO_UB);
2444 switch (da.type) {
2445 case GET_ASI_EXCP:
2446 break;
2447 case GET_ASI_DIRECT:
2448 gen_ldstub(dc, dst, addr, da.mem_idx);
2449 break;
2450 default:
2451 /* ??? Should be DAE_invalid_asi. */
2452 gen_exception(dc, TT_DATA_ACCESS);
2453 break;
2456 #endif
2458 #ifdef TARGET_SPARC64
2459 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2460 int insn, int size, int rd)
2462 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2463 TCGv_i32 d32;
2464 TCGv_i64 d64;
2466 switch (da.type) {
2467 case GET_ASI_EXCP:
2468 break;
2470 case GET_ASI_DIRECT:
2471 gen_address_mask(dc, addr);
2472 switch (size) {
2473 case 4:
2474 d32 = gen_dest_fpr_F(dc);
2475 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
2476 gen_store_fpr_F(dc, rd, d32);
2477 break;
2478 case 8:
2479 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2480 da.memop | MO_ALIGN_4);
2481 break;
2482 case 16:
2483 d64 = tcg_temp_new_i64();
2484 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2485 tcg_gen_addi_tl(addr, addr, 8);
2486 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2487 da.memop | MO_ALIGN_4);
2488 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2489 tcg_temp_free_i64(d64);
2490 break;
2491 default:
2492 g_assert_not_reached();
2494 break;
2496 case GET_ASI_BLOCK:
2497 /* Valid for lddfa on aligned registers only. */
2498 if (size == 8 && (rd & 7) == 0) {
2499 TCGMemOp memop;
2500 TCGv eight;
2501 int i;
2503 gen_address_mask(dc, addr);
2505 /* The first operation checks required alignment. */
2506 memop = da.memop | MO_ALIGN_64;
2507 eight = tcg_const_tl(8);
2508 for (i = 0; ; ++i) {
2509 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2510 da.mem_idx, memop);
2511 if (i == 7) {
2512 break;
2514 tcg_gen_add_tl(addr, addr, eight);
2515 memop = da.memop;
2517 tcg_temp_free(eight);
2518 } else {
2519 gen_exception(dc, TT_ILL_INSN);
2521 break;
2523 case GET_ASI_SHORT:
2524 /* Valid for lddfa only. */
2525 if (size == 8) {
2526 gen_address_mask(dc, addr);
2527 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2528 } else {
2529 gen_exception(dc, TT_ILL_INSN);
2531 break;
2533 default:
2535 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2536 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2538 save_state(dc);
2539 /* According to the table in the UA2011 manual, the only
2540 other asis that are valid for ldfa/lddfa/ldqfa are
2541 the NO_FAULT asis. We still need a helper for these,
2542 but we can just use the integer asi helper for them. */
2543 switch (size) {
2544 case 4:
2545 d64 = tcg_temp_new_i64();
2546 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2547 d32 = gen_dest_fpr_F(dc);
2548 tcg_gen_extrl_i64_i32(d32, d64);
2549 tcg_temp_free_i64(d64);
2550 gen_store_fpr_F(dc, rd, d32);
2551 break;
2552 case 8:
2553 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2554 break;
2555 case 16:
2556 d64 = tcg_temp_new_i64();
2557 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2558 tcg_gen_addi_tl(addr, addr, 8);
2559 gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2560 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2561 tcg_temp_free_i64(d64);
2562 break;
2563 default:
2564 g_assert_not_reached();
2566 tcg_temp_free_i32(r_mop);
2567 tcg_temp_free_i32(r_asi);
2569 break;
2573 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2574 int insn, int size, int rd)
2576 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2577 TCGv_i32 d32;
2579 switch (da.type) {
2580 case GET_ASI_EXCP:
2581 break;
2583 case GET_ASI_DIRECT:
2584 gen_address_mask(dc, addr);
2585 switch (size) {
2586 case 4:
2587 d32 = gen_load_fpr_F(dc, rd);
2588 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
2589 break;
2590 case 8:
2591 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2592 da.memop | MO_ALIGN_4);
2593 break;
2594 case 16:
2595 /* Only 4-byte alignment required. However, it is legal for the
2596 cpu to signal the alignment fault, and the OS trap handler is
2597 required to fix it up. Requiring 16-byte alignment here avoids
2598 having to probe the second page before performing the first
2599 write. */
2600 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2601 da.memop | MO_ALIGN_16);
2602 tcg_gen_addi_tl(addr, addr, 8);
2603 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2604 break;
2605 default:
2606 g_assert_not_reached();
2608 break;
2610 case GET_ASI_BLOCK:
2611 /* Valid for stdfa on aligned registers only. */
2612 if (size == 8 && (rd & 7) == 0) {
2613 TCGMemOp memop;
2614 TCGv eight;
2615 int i;
2617 gen_address_mask(dc, addr);
2619 /* The first operation checks required alignment. */
2620 memop = da.memop | MO_ALIGN_64;
2621 eight = tcg_const_tl(8);
2622 for (i = 0; ; ++i) {
2623 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2624 da.mem_idx, memop);
2625 if (i == 7) {
2626 break;
2628 tcg_gen_add_tl(addr, addr, eight);
2629 memop = da.memop;
2631 tcg_temp_free(eight);
2632 } else {
2633 gen_exception(dc, TT_ILL_INSN);
2635 break;
2637 case GET_ASI_SHORT:
2638 /* Valid for stdfa only. */
2639 if (size == 8) {
2640 gen_address_mask(dc, addr);
2641 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2642 } else {
2643 gen_exception(dc, TT_ILL_INSN);
2645 break;
2647 default:
2648 /* According to the table in the UA2011 manual, the only
2649 other asis that are valid for ldfa/lddfa/ldqfa are
2650 the PST* asis, which aren't currently handled. */
2651 gen_exception(dc, TT_ILL_INSN);
2652 break;
2656 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2658 DisasASI da = get_asi(dc, insn, MO_TEQ);
2659 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2660 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2662 switch (da.type) {
2663 case GET_ASI_EXCP:
2664 return;
2666 case GET_ASI_DTWINX:
2667 gen_address_mask(dc, addr);
2668 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2669 tcg_gen_addi_tl(addr, addr, 8);
2670 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2671 break;
2673 case GET_ASI_DIRECT:
2675 TCGv_i64 tmp = tcg_temp_new_i64();
2677 gen_address_mask(dc, addr);
2678 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
2680 /* Note that LE ldda acts as if each 32-bit register
2681 result is byte swapped. Having just performed one
2682 64-bit bswap, we need now to swap the writebacks. */
2683 if ((da.memop & MO_BSWAP) == MO_TE) {
2684 tcg_gen_extr32_i64(lo, hi, tmp);
2685 } else {
2686 tcg_gen_extr32_i64(hi, lo, tmp);
2688 tcg_temp_free_i64(tmp);
2690 break;
2692 default:
2693 /* ??? In theory we've handled all of the ASIs that are valid
2694 for ldda, and this should raise DAE_invalid_asi. However,
2695 real hardware allows others. This can be seen with e.g.
2696 FreeBSD 10.3 wrt ASI_IC_TAG. */
2698 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2699 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2700 TCGv_i64 tmp = tcg_temp_new_i64();
2702 save_state(dc);
2703 gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
2704 tcg_temp_free_i32(r_asi);
2705 tcg_temp_free_i32(r_mop);
2707 /* See above. */
2708 if ((da.memop & MO_BSWAP) == MO_TE) {
2709 tcg_gen_extr32_i64(lo, hi, tmp);
2710 } else {
2711 tcg_gen_extr32_i64(hi, lo, tmp);
2713 tcg_temp_free_i64(tmp);
2715 break;
2718 gen_store_gpr(dc, rd, hi);
2719 gen_store_gpr(dc, rd + 1, lo);
2722 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2723 int insn, int rd)
2725 DisasASI da = get_asi(dc, insn, MO_TEQ);
2726 TCGv lo = gen_load_gpr(dc, rd + 1);
2728 switch (da.type) {
2729 case GET_ASI_EXCP:
2730 break;
2732 case GET_ASI_DTWINX:
2733 gen_address_mask(dc, addr);
2734 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2735 tcg_gen_addi_tl(addr, addr, 8);
2736 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2737 break;
2739 case GET_ASI_DIRECT:
2741 TCGv_i64 t64 = tcg_temp_new_i64();
2743 /* Note that LE stda acts as if each 32-bit register result is
2744 byte swapped. We will perform one 64-bit LE store, so now
2745 we must swap the order of the construction. */
2746 if ((da.memop & MO_BSWAP) == MO_TE) {
2747 tcg_gen_concat32_i64(t64, lo, hi);
2748 } else {
2749 tcg_gen_concat32_i64(t64, hi, lo);
2751 gen_address_mask(dc, addr);
2752 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2753 tcg_temp_free_i64(t64);
2755 break;
2757 default:
2758 /* ??? In theory we've handled all of the ASIs that are valid
2759 for stda, and this should raise DAE_invalid_asi. */
2761 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2762 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2763 TCGv_i64 t64 = tcg_temp_new_i64();
2765 /* See above. */
2766 if ((da.memop & MO_BSWAP) == MO_TE) {
2767 tcg_gen_concat32_i64(t64, lo, hi);
2768 } else {
2769 tcg_gen_concat32_i64(t64, hi, lo);
2772 save_state(dc);
2773 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2774 tcg_temp_free_i32(r_mop);
2775 tcg_temp_free_i32(r_asi);
2776 tcg_temp_free_i64(t64);
2778 break;
2782 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2783 int insn, int rd)
2785 DisasASI da = get_asi(dc, insn, MO_TEQ);
2786 TCGv oldv;
2788 switch (da.type) {
2789 case GET_ASI_EXCP:
2790 return;
2791 case GET_ASI_DIRECT:
2792 oldv = tcg_temp_new();
2793 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2794 da.mem_idx, da.memop);
2795 gen_store_gpr(dc, rd, oldv);
2796 tcg_temp_free(oldv);
2797 break;
2798 default:
2799 /* ??? Should be DAE_invalid_asi. */
2800 gen_exception(dc, TT_DATA_ACCESS);
2801 break;
2805 #elif !defined(CONFIG_USER_ONLY)
2806 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2808 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2809 whereby "rd + 1" elicits "error: array subscript is above array".
2810 Since we have already asserted that rd is even, the semantics
2811 are unchanged. */
2812 TCGv lo = gen_dest_gpr(dc, rd | 1);
2813 TCGv hi = gen_dest_gpr(dc, rd);
2814 TCGv_i64 t64 = tcg_temp_new_i64();
2815 DisasASI da = get_asi(dc, insn, MO_TEQ);
2817 switch (da.type) {
2818 case GET_ASI_EXCP:
2819 tcg_temp_free_i64(t64);
2820 return;
2821 case GET_ASI_DIRECT:
2822 gen_address_mask(dc, addr);
2823 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
2824 break;
2825 default:
2827 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2828 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2830 save_state(dc);
2831 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2832 tcg_temp_free_i32(r_mop);
2833 tcg_temp_free_i32(r_asi);
2835 break;
2838 tcg_gen_extr_i64_i32(lo, hi, t64);
2839 tcg_temp_free_i64(t64);
2840 gen_store_gpr(dc, rd | 1, lo);
2841 gen_store_gpr(dc, rd, hi);
2844 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2845 int insn, int rd)
2847 DisasASI da = get_asi(dc, insn, MO_TEQ);
2848 TCGv lo = gen_load_gpr(dc, rd + 1);
2849 TCGv_i64 t64 = tcg_temp_new_i64();
2851 tcg_gen_concat_tl_i64(t64, lo, hi);
2853 switch (da.type) {
2854 case GET_ASI_EXCP:
2855 break;
2856 case GET_ASI_DIRECT:
2857 gen_address_mask(dc, addr);
2858 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2859 break;
2860 case GET_ASI_BFILL:
2861 /* Store 32 bytes of T64 to ADDR. */
2862 /* ??? The original qemu code suggests 8-byte alignment, dropping
2863 the low bits, but the only place I can see this used is in the
2864 Linux kernel with 32 byte alignment, which would make more sense
2865 as a cacheline-style operation. */
2867 TCGv d_addr = tcg_temp_new();
2868 TCGv eight = tcg_const_tl(8);
2869 int i;
2871 tcg_gen_andi_tl(d_addr, addr, -8);
2872 for (i = 0; i < 32; i += 8) {
2873 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2874 tcg_gen_add_tl(d_addr, d_addr, eight);
2877 tcg_temp_free(d_addr);
2878 tcg_temp_free(eight);
2880 break;
2881 default:
2883 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2884 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2886 save_state(dc);
2887 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2888 tcg_temp_free_i32(r_mop);
2889 tcg_temp_free_i32(r_asi);
2891 break;
2894 tcg_temp_free_i64(t64);
2896 #endif
2898 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2900 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2901 return gen_load_gpr(dc, rs1);
2904 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2906 if (IS_IMM) { /* immediate */
2907 target_long simm = GET_FIELDs(insn, 19, 31);
2908 TCGv t = get_temp_tl(dc);
2909 tcg_gen_movi_tl(t, simm);
2910 return t;
2911 } else { /* register */
2912 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2913 return gen_load_gpr(dc, rs2);
2917 #ifdef TARGET_SPARC64
2918 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2920 TCGv_i32 c32, zero, dst, s1, s2;
2922 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2923 or fold the comparison down to 32 bits and use movcond_i32. Choose
2924 the later. */
2925 c32 = tcg_temp_new_i32();
2926 if (cmp->is_bool) {
2927 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2928 } else {
2929 TCGv_i64 c64 = tcg_temp_new_i64();
2930 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2931 tcg_gen_extrl_i64_i32(c32, c64);
2932 tcg_temp_free_i64(c64);
2935 s1 = gen_load_fpr_F(dc, rs);
2936 s2 = gen_load_fpr_F(dc, rd);
2937 dst = gen_dest_fpr_F(dc);
2938 zero = tcg_const_i32(0);
2940 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2942 tcg_temp_free_i32(c32);
2943 tcg_temp_free_i32(zero);
2944 gen_store_fpr_F(dc, rd, dst);
2947 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2949 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2950 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2951 gen_load_fpr_D(dc, rs),
2952 gen_load_fpr_D(dc, rd));
2953 gen_store_fpr_D(dc, rd, dst);
2956 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2958 int qd = QFPREG(rd);
2959 int qs = QFPREG(rs);
2961 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2962 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2963 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2964 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2966 gen_update_fprs_dirty(dc, qd);
2969 #ifndef CONFIG_USER_ONLY
2970 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2972 TCGv_i32 r_tl = tcg_temp_new_i32();
2974 /* load env->tl into r_tl */
2975 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2977 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2978 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2980 /* calculate offset to current trap state from env->ts, reuse r_tl */
2981 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2982 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2984 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2986 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2987 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2988 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2989 tcg_temp_free_ptr(r_tl_tmp);
2992 tcg_temp_free_i32(r_tl);
2994 #endif
2996 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2997 int width, bool cc, bool left)
2999 TCGv lo1, lo2, t1, t2;
3000 uint64_t amask, tabl, tabr;
3001 int shift, imask, omask;
3003 if (cc) {
3004 tcg_gen_mov_tl(cpu_cc_src, s1);
3005 tcg_gen_mov_tl(cpu_cc_src2, s2);
3006 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
3007 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3008 dc->cc_op = CC_OP_SUB;
3011 /* Theory of operation: there are two tables, left and right (not to
3012 be confused with the left and right versions of the opcode). These
3013 are indexed by the low 3 bits of the inputs. To make things "easy",
3014 these tables are loaded into two constants, TABL and TABR below.
3015 The operation index = (input & imask) << shift calculates the index
3016 into the constant, while val = (table >> index) & omask calculates
3017 the value we're looking for. */
3018 switch (width) {
3019 case 8:
3020 imask = 0x7;
3021 shift = 3;
3022 omask = 0xff;
3023 if (left) {
3024 tabl = 0x80c0e0f0f8fcfeffULL;
3025 tabr = 0xff7f3f1f0f070301ULL;
3026 } else {
3027 tabl = 0x0103070f1f3f7fffULL;
3028 tabr = 0xfffefcf8f0e0c080ULL;
3030 break;
3031 case 16:
3032 imask = 0x6;
3033 shift = 1;
3034 omask = 0xf;
3035 if (left) {
3036 tabl = 0x8cef;
3037 tabr = 0xf731;
3038 } else {
3039 tabl = 0x137f;
3040 tabr = 0xfec8;
3042 break;
3043 case 32:
3044 imask = 0x4;
3045 shift = 0;
3046 omask = 0x3;
3047 if (left) {
3048 tabl = (2 << 2) | 3;
3049 tabr = (3 << 2) | 1;
3050 } else {
3051 tabl = (1 << 2) | 3;
3052 tabr = (3 << 2) | 2;
3054 break;
3055 default:
3056 abort();
3059 lo1 = tcg_temp_new();
3060 lo2 = tcg_temp_new();
3061 tcg_gen_andi_tl(lo1, s1, imask);
3062 tcg_gen_andi_tl(lo2, s2, imask);
3063 tcg_gen_shli_tl(lo1, lo1, shift);
3064 tcg_gen_shli_tl(lo2, lo2, shift);
3066 t1 = tcg_const_tl(tabl);
3067 t2 = tcg_const_tl(tabr);
3068 tcg_gen_shr_tl(lo1, t1, lo1);
3069 tcg_gen_shr_tl(lo2, t2, lo2);
3070 tcg_gen_andi_tl(dst, lo1, omask);
3071 tcg_gen_andi_tl(lo2, lo2, omask);
3073 amask = -8;
3074 if (AM_CHECK(dc)) {
3075 amask &= 0xffffffffULL;
3077 tcg_gen_andi_tl(s1, s1, amask);
3078 tcg_gen_andi_tl(s2, s2, amask);
3080 /* We want to compute
3081 dst = (s1 == s2 ? lo1 : lo1 & lo2).
3082 We've already done dst = lo1, so this reduces to
3083 dst &= (s1 == s2 ? -1 : lo2)
3084 Which we perform by
3085 lo2 |= -(s1 == s2)
3086 dst &= lo2
3088 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
3089 tcg_gen_neg_tl(t1, t1);
3090 tcg_gen_or_tl(lo2, lo2, t1);
3091 tcg_gen_and_tl(dst, dst, lo2);
3093 tcg_temp_free(lo1);
3094 tcg_temp_free(lo2);
3095 tcg_temp_free(t1);
3096 tcg_temp_free(t2);
3099 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
3101 TCGv tmp = tcg_temp_new();
3103 tcg_gen_add_tl(tmp, s1, s2);
3104 tcg_gen_andi_tl(dst, tmp, -8);
3105 if (left) {
3106 tcg_gen_neg_tl(tmp, tmp);
3108 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3110 tcg_temp_free(tmp);
3113 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
3115 TCGv t1, t2, shift;
3117 t1 = tcg_temp_new();
3118 t2 = tcg_temp_new();
3119 shift = tcg_temp_new();
3121 tcg_gen_andi_tl(shift, gsr, 7);
3122 tcg_gen_shli_tl(shift, shift, 3);
3123 tcg_gen_shl_tl(t1, s1, shift);
3125 /* A shift of 64 does not produce 0 in TCG. Divide this into a
3126 shift of (up to 63) followed by a constant shift of 1. */
3127 tcg_gen_xori_tl(shift, shift, 63);
3128 tcg_gen_shr_tl(t2, s2, shift);
3129 tcg_gen_shri_tl(t2, t2, 1);
3131 tcg_gen_or_tl(dst, t1, t2);
3133 tcg_temp_free(t1);
3134 tcg_temp_free(t2);
3135 tcg_temp_free(shift);
3137 #endif
3139 #define CHECK_IU_FEATURE(dc, FEATURE) \
3140 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3141 goto illegal_insn;
3142 #define CHECK_FPU_FEATURE(dc, FEATURE) \
3143 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3144 goto nfpu_insn;
3146 /* before an instruction, dc->pc must be static */
3147 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
3149 unsigned int opc, rs1, rs2, rd;
3150 TCGv cpu_src1, cpu_src2;
3151 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
3152 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
3153 target_long simm;
3155 opc = GET_FIELD(insn, 0, 1);
3156 rd = GET_FIELD(insn, 2, 6);
3158 switch (opc) {
3159 case 0: /* branches/sethi */
3161 unsigned int xop = GET_FIELD(insn, 7, 9);
3162 int32_t target;
3163 switch (xop) {
3164 #ifdef TARGET_SPARC64
3165 case 0x1: /* V9 BPcc */
3167 int cc;
3169 target = GET_FIELD_SP(insn, 0, 18);
3170 target = sign_extend(target, 19);
3171 target <<= 2;
3172 cc = GET_FIELD_SP(insn, 20, 21);
3173 if (cc == 0)
3174 do_branch(dc, target, insn, 0);
3175 else if (cc == 2)
3176 do_branch(dc, target, insn, 1);
3177 else
3178 goto illegal_insn;
3179 goto jmp_insn;
3181 case 0x3: /* V9 BPr */
3183 target = GET_FIELD_SP(insn, 0, 13) |
3184 (GET_FIELD_SP(insn, 20, 21) << 14);
3185 target = sign_extend(target, 16);
3186 target <<= 2;
3187 cpu_src1 = get_src1(dc, insn);
3188 do_branch_reg(dc, target, insn, cpu_src1);
3189 goto jmp_insn;
3191 case 0x5: /* V9 FBPcc */
3193 int cc = GET_FIELD_SP(insn, 20, 21);
3194 if (gen_trap_ifnofpu(dc)) {
3195 goto jmp_insn;
3197 target = GET_FIELD_SP(insn, 0, 18);
3198 target = sign_extend(target, 19);
3199 target <<= 2;
3200 do_fbranch(dc, target, insn, cc);
3201 goto jmp_insn;
3203 #else
3204 case 0x7: /* CBN+x */
3206 goto ncp_insn;
3208 #endif
3209 case 0x2: /* BN+x */
3211 target = GET_FIELD(insn, 10, 31);
3212 target = sign_extend(target, 22);
3213 target <<= 2;
3214 do_branch(dc, target, insn, 0);
3215 goto jmp_insn;
3217 case 0x6: /* FBN+x */
3219 if (gen_trap_ifnofpu(dc)) {
3220 goto jmp_insn;
3222 target = GET_FIELD(insn, 10, 31);
3223 target = sign_extend(target, 22);
3224 target <<= 2;
3225 do_fbranch(dc, target, insn, 0);
3226 goto jmp_insn;
3228 case 0x4: /* SETHI */
3229 /* Special-case %g0 because that's the canonical nop. */
3230 if (rd) {
3231 uint32_t value = GET_FIELD(insn, 10, 31);
3232 TCGv t = gen_dest_gpr(dc, rd);
3233 tcg_gen_movi_tl(t, value << 10);
3234 gen_store_gpr(dc, rd, t);
3236 break;
3237 case 0x0: /* UNIMPL */
3238 default:
3239 goto illegal_insn;
3241 break;
3243 break;
3244 case 1: /*CALL*/
3246 target_long target = GET_FIELDs(insn, 2, 31) << 2;
3247 TCGv o7 = gen_dest_gpr(dc, 15);
3249 tcg_gen_movi_tl(o7, dc->pc);
3250 gen_store_gpr(dc, 15, o7);
3251 target += dc->pc;
3252 gen_mov_pc_npc(dc);
3253 #ifdef TARGET_SPARC64
3254 if (unlikely(AM_CHECK(dc))) {
3255 target &= 0xffffffffULL;
3257 #endif
3258 dc->npc = target;
3260 goto jmp_insn;
3261 case 2: /* FPU & Logical Operations */
3263 unsigned int xop = GET_FIELD(insn, 7, 12);
3264 TCGv cpu_dst = get_temp_tl(dc);
3265 TCGv cpu_tmp0;
3267 if (xop == 0x3a) { /* generate trap */
3268 int cond = GET_FIELD(insn, 3, 6);
3269 TCGv_i32 trap;
3270 TCGLabel *l1 = NULL;
3271 int mask;
3273 if (cond == 0) {
3274 /* Trap never. */
3275 break;
3278 save_state(dc);
3280 if (cond != 8) {
3281 /* Conditional trap. */
3282 DisasCompare cmp;
3283 #ifdef TARGET_SPARC64
3284 /* V9 icc/xcc */
3285 int cc = GET_FIELD_SP(insn, 11, 12);
3286 if (cc == 0) {
3287 gen_compare(&cmp, 0, cond, dc);
3288 } else if (cc == 2) {
3289 gen_compare(&cmp, 1, cond, dc);
3290 } else {
3291 goto illegal_insn;
3293 #else
3294 gen_compare(&cmp, 0, cond, dc);
3295 #endif
3296 l1 = gen_new_label();
3297 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3298 cmp.c1, cmp.c2, l1);
3299 free_compare(&cmp);
3302 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3303 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3305 /* Don't use the normal temporaries, as they may well have
3306 gone out of scope with the branch above. While we're
3307 doing that we might as well pre-truncate to 32-bit. */
3308 trap = tcg_temp_new_i32();
3310 rs1 = GET_FIELD_SP(insn, 14, 18);
3311 if (IS_IMM) {
3312 rs2 = GET_FIELD_SP(insn, 0, 7);
3313 if (rs1 == 0) {
3314 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3315 /* Signal that the trap value is fully constant. */
3316 mask = 0;
3317 } else {
3318 TCGv t1 = gen_load_gpr(dc, rs1);
3319 tcg_gen_trunc_tl_i32(trap, t1);
3320 tcg_gen_addi_i32(trap, trap, rs2);
3322 } else {
3323 TCGv t1, t2;
3324 rs2 = GET_FIELD_SP(insn, 0, 4);
3325 t1 = gen_load_gpr(dc, rs1);
3326 t2 = gen_load_gpr(dc, rs2);
3327 tcg_gen_add_tl(t1, t1, t2);
3328 tcg_gen_trunc_tl_i32(trap, t1);
3330 if (mask != 0) {
3331 tcg_gen_andi_i32(trap, trap, mask);
3332 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3335 gen_helper_raise_exception(cpu_env, trap);
3336 tcg_temp_free_i32(trap);
3338 if (cond == 8) {
3339 /* An unconditional trap ends the TB. */
3340 dc->is_br = 1;
3341 goto jmp_insn;
3342 } else {
3343 /* A conditional trap falls through to the next insn. */
3344 gen_set_label(l1);
3345 break;
3347 } else if (xop == 0x28) {
3348 rs1 = GET_FIELD(insn, 13, 17);
3349 switch(rs1) {
3350 case 0: /* rdy */
3351 #ifndef TARGET_SPARC64
3352 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3353 manual, rdy on the microSPARC
3354 II */
3355 case 0x0f: /* stbar in the SPARCv8 manual,
3356 rdy on the microSPARC II */
3357 case 0x10 ... 0x1f: /* implementation-dependent in the
3358 SPARCv8 manual, rdy on the
3359 microSPARC II */
3360 /* Read Asr17 */
3361 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3362 TCGv t = gen_dest_gpr(dc, rd);
3363 /* Read Asr17 for a Leon3 monoprocessor */
3364 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3365 gen_store_gpr(dc, rd, t);
3366 break;
3368 #endif
3369 gen_store_gpr(dc, rd, cpu_y);
3370 break;
3371 #ifdef TARGET_SPARC64
3372 case 0x2: /* V9 rdccr */
3373 update_psr(dc);
3374 gen_helper_rdccr(cpu_dst, cpu_env);
3375 gen_store_gpr(dc, rd, cpu_dst);
3376 break;
3377 case 0x3: /* V9 rdasi */
3378 tcg_gen_movi_tl(cpu_dst, dc->asi);
3379 gen_store_gpr(dc, rd, cpu_dst);
3380 break;
3381 case 0x4: /* V9 rdtick */
3383 TCGv_ptr r_tickptr;
3384 TCGv_i32 r_const;
3386 r_tickptr = tcg_temp_new_ptr();
3387 r_const = tcg_const_i32(dc->mem_idx);
3388 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3389 offsetof(CPUSPARCState, tick));
3390 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3391 r_const);
3392 tcg_temp_free_ptr(r_tickptr);
3393 tcg_temp_free_i32(r_const);
3394 gen_store_gpr(dc, rd, cpu_dst);
3396 break;
3397 case 0x5: /* V9 rdpc */
3399 TCGv t = gen_dest_gpr(dc, rd);
3400 if (unlikely(AM_CHECK(dc))) {
3401 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3402 } else {
3403 tcg_gen_movi_tl(t, dc->pc);
3405 gen_store_gpr(dc, rd, t);
3407 break;
3408 case 0x6: /* V9 rdfprs */
3409 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3410 gen_store_gpr(dc, rd, cpu_dst);
3411 break;
3412 case 0xf: /* V9 membar */
3413 break; /* no effect */
3414 case 0x13: /* Graphics Status */
3415 if (gen_trap_ifnofpu(dc)) {
3416 goto jmp_insn;
3418 gen_store_gpr(dc, rd, cpu_gsr);
3419 break;
3420 case 0x16: /* Softint */
3421 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3422 offsetof(CPUSPARCState, softint));
3423 gen_store_gpr(dc, rd, cpu_dst);
3424 break;
3425 case 0x17: /* Tick compare */
3426 gen_store_gpr(dc, rd, cpu_tick_cmpr);
3427 break;
3428 case 0x18: /* System tick */
3430 TCGv_ptr r_tickptr;
3431 TCGv_i32 r_const;
3433 r_tickptr = tcg_temp_new_ptr();
3434 r_const = tcg_const_i32(dc->mem_idx);
3435 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3436 offsetof(CPUSPARCState, stick));
3437 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3438 r_const);
3439 tcg_temp_free_ptr(r_tickptr);
3440 tcg_temp_free_i32(r_const);
3441 gen_store_gpr(dc, rd, cpu_dst);
3443 break;
3444 case 0x19: /* System tick compare */
3445 gen_store_gpr(dc, rd, cpu_stick_cmpr);
3446 break;
3447 case 0x1a: /* UltraSPARC-T1 Strand status */
3448 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3449 * this ASR as impl. dep
3451 CHECK_IU_FEATURE(dc, HYPV);
3453 TCGv t = gen_dest_gpr(dc, rd);
3454 tcg_gen_movi_tl(t, 1UL);
3455 gen_store_gpr(dc, rd, t);
3457 break;
3458 case 0x10: /* Performance Control */
3459 case 0x11: /* Performance Instrumentation Counter */
3460 case 0x12: /* Dispatch Control */
3461 case 0x14: /* Softint set, WO */
3462 case 0x15: /* Softint clear, WO */
3463 #endif
3464 default:
3465 goto illegal_insn;
3467 #if !defined(CONFIG_USER_ONLY)
3468 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3469 #ifndef TARGET_SPARC64
3470 if (!supervisor(dc)) {
3471 goto priv_insn;
3473 update_psr(dc);
3474 gen_helper_rdpsr(cpu_dst, cpu_env);
3475 #else
3476 CHECK_IU_FEATURE(dc, HYPV);
3477 if (!hypervisor(dc))
3478 goto priv_insn;
3479 rs1 = GET_FIELD(insn, 13, 17);
3480 switch (rs1) {
3481 case 0: // hpstate
3482 tcg_gen_ld_i64(cpu_dst, cpu_env,
3483 offsetof(CPUSPARCState, hpstate));
3484 break;
3485 case 1: // htstate
3486 // gen_op_rdhtstate();
3487 break;
3488 case 3: // hintp
3489 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3490 break;
3491 case 5: // htba
3492 tcg_gen_mov_tl(cpu_dst, cpu_htba);
3493 break;
3494 case 6: // hver
3495 tcg_gen_mov_tl(cpu_dst, cpu_hver);
3496 break;
3497 case 31: // hstick_cmpr
3498 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3499 break;
3500 default:
3501 goto illegal_insn;
3503 #endif
3504 gen_store_gpr(dc, rd, cpu_dst);
3505 break;
3506 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3507 if (!supervisor(dc)) {
3508 goto priv_insn;
3510 cpu_tmp0 = get_temp_tl(dc);
3511 #ifdef TARGET_SPARC64
3512 rs1 = GET_FIELD(insn, 13, 17);
3513 switch (rs1) {
3514 case 0: // tpc
3516 TCGv_ptr r_tsptr;
3518 r_tsptr = tcg_temp_new_ptr();
3519 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3520 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3521 offsetof(trap_state, tpc));
3522 tcg_temp_free_ptr(r_tsptr);
3524 break;
3525 case 1: // tnpc
3527 TCGv_ptr r_tsptr;
3529 r_tsptr = tcg_temp_new_ptr();
3530 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3531 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3532 offsetof(trap_state, tnpc));
3533 tcg_temp_free_ptr(r_tsptr);
3535 break;
3536 case 2: // tstate
3538 TCGv_ptr r_tsptr;
3540 r_tsptr = tcg_temp_new_ptr();
3541 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3542 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3543 offsetof(trap_state, tstate));
3544 tcg_temp_free_ptr(r_tsptr);
3546 break;
3547 case 3: // tt
3549 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3551 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3552 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3553 offsetof(trap_state, tt));
3554 tcg_temp_free_ptr(r_tsptr);
3556 break;
3557 case 4: // tick
3559 TCGv_ptr r_tickptr;
3560 TCGv_i32 r_const;
3562 r_tickptr = tcg_temp_new_ptr();
3563 r_const = tcg_const_i32(dc->mem_idx);
3564 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3565 offsetof(CPUSPARCState, tick));
3566 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3567 r_tickptr, r_const);
3568 tcg_temp_free_ptr(r_tickptr);
3569 tcg_temp_free_i32(r_const);
3571 break;
3572 case 5: // tba
3573 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3574 break;
3575 case 6: // pstate
3576 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3577 offsetof(CPUSPARCState, pstate));
3578 break;
3579 case 7: // tl
3580 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3581 offsetof(CPUSPARCState, tl));
3582 break;
3583 case 8: // pil
3584 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3585 offsetof(CPUSPARCState, psrpil));
3586 break;
3587 case 9: // cwp
3588 gen_helper_rdcwp(cpu_tmp0, cpu_env);
3589 break;
3590 case 10: // cansave
3591 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3592 offsetof(CPUSPARCState, cansave));
3593 break;
3594 case 11: // canrestore
3595 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3596 offsetof(CPUSPARCState, canrestore));
3597 break;
3598 case 12: // cleanwin
3599 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3600 offsetof(CPUSPARCState, cleanwin));
3601 break;
3602 case 13: // otherwin
3603 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3604 offsetof(CPUSPARCState, otherwin));
3605 break;
3606 case 14: // wstate
3607 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3608 offsetof(CPUSPARCState, wstate));
3609 break;
3610 case 16: // UA2005 gl
3611 CHECK_IU_FEATURE(dc, GL);
3612 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3613 offsetof(CPUSPARCState, gl));
3614 break;
3615 case 26: // UA2005 strand status
3616 CHECK_IU_FEATURE(dc, HYPV);
3617 if (!hypervisor(dc))
3618 goto priv_insn;
3619 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3620 break;
3621 case 31: // ver
3622 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3623 break;
3624 case 15: // fq
3625 default:
3626 goto illegal_insn;
3628 #else
3629 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3630 #endif
3631 gen_store_gpr(dc, rd, cpu_tmp0);
3632 break;
3633 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3634 #ifdef TARGET_SPARC64
3635 gen_helper_flushw(cpu_env);
3636 #else
3637 if (!supervisor(dc))
3638 goto priv_insn;
3639 gen_store_gpr(dc, rd, cpu_tbr);
3640 #endif
3641 break;
3642 #endif
3643 } else if (xop == 0x34) { /* FPU Operations */
3644 if (gen_trap_ifnofpu(dc)) {
3645 goto jmp_insn;
3647 gen_op_clear_ieee_excp_and_FTT();
3648 rs1 = GET_FIELD(insn, 13, 17);
3649 rs2 = GET_FIELD(insn, 27, 31);
3650 xop = GET_FIELD(insn, 18, 26);
3652 switch (xop) {
3653 case 0x1: /* fmovs */
3654 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3655 gen_store_fpr_F(dc, rd, cpu_src1_32);
3656 break;
3657 case 0x5: /* fnegs */
3658 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3659 break;
3660 case 0x9: /* fabss */
3661 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3662 break;
3663 case 0x29: /* fsqrts */
3664 CHECK_FPU_FEATURE(dc, FSQRT);
3665 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3666 break;
3667 case 0x2a: /* fsqrtd */
3668 CHECK_FPU_FEATURE(dc, FSQRT);
3669 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3670 break;
3671 case 0x2b: /* fsqrtq */
3672 CHECK_FPU_FEATURE(dc, FLOAT128);
3673 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3674 break;
3675 case 0x41: /* fadds */
3676 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3677 break;
3678 case 0x42: /* faddd */
3679 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3680 break;
3681 case 0x43: /* faddq */
3682 CHECK_FPU_FEATURE(dc, FLOAT128);
3683 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3684 break;
3685 case 0x45: /* fsubs */
3686 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3687 break;
3688 case 0x46: /* fsubd */
3689 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3690 break;
3691 case 0x47: /* fsubq */
3692 CHECK_FPU_FEATURE(dc, FLOAT128);
3693 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3694 break;
3695 case 0x49: /* fmuls */
3696 CHECK_FPU_FEATURE(dc, FMUL);
3697 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3698 break;
3699 case 0x4a: /* fmuld */
3700 CHECK_FPU_FEATURE(dc, FMUL);
3701 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3702 break;
3703 case 0x4b: /* fmulq */
3704 CHECK_FPU_FEATURE(dc, FLOAT128);
3705 CHECK_FPU_FEATURE(dc, FMUL);
3706 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3707 break;
3708 case 0x4d: /* fdivs */
3709 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3710 break;
3711 case 0x4e: /* fdivd */
3712 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3713 break;
3714 case 0x4f: /* fdivq */
3715 CHECK_FPU_FEATURE(dc, FLOAT128);
3716 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3717 break;
3718 case 0x69: /* fsmuld */
3719 CHECK_FPU_FEATURE(dc, FSMULD);
3720 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3721 break;
3722 case 0x6e: /* fdmulq */
3723 CHECK_FPU_FEATURE(dc, FLOAT128);
3724 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3725 break;
3726 case 0xc4: /* fitos */
3727 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3728 break;
3729 case 0xc6: /* fdtos */
3730 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3731 break;
3732 case 0xc7: /* fqtos */
3733 CHECK_FPU_FEATURE(dc, FLOAT128);
3734 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3735 break;
3736 case 0xc8: /* fitod */
3737 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3738 break;
3739 case 0xc9: /* fstod */
3740 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3741 break;
3742 case 0xcb: /* fqtod */
3743 CHECK_FPU_FEATURE(dc, FLOAT128);
3744 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3745 break;
3746 case 0xcc: /* fitoq */
3747 CHECK_FPU_FEATURE(dc, FLOAT128);
3748 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3749 break;
3750 case 0xcd: /* fstoq */
3751 CHECK_FPU_FEATURE(dc, FLOAT128);
3752 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3753 break;
3754 case 0xce: /* fdtoq */
3755 CHECK_FPU_FEATURE(dc, FLOAT128);
3756 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3757 break;
3758 case 0xd1: /* fstoi */
3759 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3760 break;
3761 case 0xd2: /* fdtoi */
3762 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3763 break;
3764 case 0xd3: /* fqtoi */
3765 CHECK_FPU_FEATURE(dc, FLOAT128);
3766 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3767 break;
3768 #ifdef TARGET_SPARC64
3769 case 0x2: /* V9 fmovd */
3770 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3771 gen_store_fpr_D(dc, rd, cpu_src1_64);
3772 break;
3773 case 0x3: /* V9 fmovq */
3774 CHECK_FPU_FEATURE(dc, FLOAT128);
3775 gen_move_Q(dc, rd, rs2);
3776 break;
3777 case 0x6: /* V9 fnegd */
3778 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3779 break;
3780 case 0x7: /* V9 fnegq */
3781 CHECK_FPU_FEATURE(dc, FLOAT128);
3782 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3783 break;
3784 case 0xa: /* V9 fabsd */
3785 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3786 break;
3787 case 0xb: /* V9 fabsq */
3788 CHECK_FPU_FEATURE(dc, FLOAT128);
3789 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3790 break;
3791 case 0x81: /* V9 fstox */
3792 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3793 break;
3794 case 0x82: /* V9 fdtox */
3795 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3796 break;
3797 case 0x83: /* V9 fqtox */
3798 CHECK_FPU_FEATURE(dc, FLOAT128);
3799 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3800 break;
3801 case 0x84: /* V9 fxtos */
3802 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3803 break;
3804 case 0x88: /* V9 fxtod */
3805 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3806 break;
3807 case 0x8c: /* V9 fxtoq */
3808 CHECK_FPU_FEATURE(dc, FLOAT128);
3809 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3810 break;
3811 #endif
3812 default:
3813 goto illegal_insn;
3815 } else if (xop == 0x35) { /* FPU Operations */
3816 #ifdef TARGET_SPARC64
3817 int cond;
3818 #endif
3819 if (gen_trap_ifnofpu(dc)) {
3820 goto jmp_insn;
3822 gen_op_clear_ieee_excp_and_FTT();
3823 rs1 = GET_FIELD(insn, 13, 17);
3824 rs2 = GET_FIELD(insn, 27, 31);
3825 xop = GET_FIELD(insn, 18, 26);
3827 #ifdef TARGET_SPARC64
3828 #define FMOVR(sz) \
3829 do { \
3830 DisasCompare cmp; \
3831 cond = GET_FIELD_SP(insn, 10, 12); \
3832 cpu_src1 = get_src1(dc, insn); \
3833 gen_compare_reg(&cmp, cond, cpu_src1); \
3834 gen_fmov##sz(dc, &cmp, rd, rs2); \
3835 free_compare(&cmp); \
3836 } while (0)
3838 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3839 FMOVR(s);
3840 break;
3841 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3842 FMOVR(d);
3843 break;
3844 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3845 CHECK_FPU_FEATURE(dc, FLOAT128);
3846 FMOVR(q);
3847 break;
3849 #undef FMOVR
3850 #endif
3851 switch (xop) {
3852 #ifdef TARGET_SPARC64
3853 #define FMOVCC(fcc, sz) \
3854 do { \
3855 DisasCompare cmp; \
3856 cond = GET_FIELD_SP(insn, 14, 17); \
3857 gen_fcompare(&cmp, fcc, cond); \
3858 gen_fmov##sz(dc, &cmp, rd, rs2); \
3859 free_compare(&cmp); \
3860 } while (0)
3862 case 0x001: /* V9 fmovscc %fcc0 */
3863 FMOVCC(0, s);
3864 break;
3865 case 0x002: /* V9 fmovdcc %fcc0 */
3866 FMOVCC(0, d);
3867 break;
3868 case 0x003: /* V9 fmovqcc %fcc0 */
3869 CHECK_FPU_FEATURE(dc, FLOAT128);
3870 FMOVCC(0, q);
3871 break;
3872 case 0x041: /* V9 fmovscc %fcc1 */
3873 FMOVCC(1, s);
3874 break;
3875 case 0x042: /* V9 fmovdcc %fcc1 */
3876 FMOVCC(1, d);
3877 break;
3878 case 0x043: /* V9 fmovqcc %fcc1 */
3879 CHECK_FPU_FEATURE(dc, FLOAT128);
3880 FMOVCC(1, q);
3881 break;
3882 case 0x081: /* V9 fmovscc %fcc2 */
3883 FMOVCC(2, s);
3884 break;
3885 case 0x082: /* V9 fmovdcc %fcc2 */
3886 FMOVCC(2, d);
3887 break;
3888 case 0x083: /* V9 fmovqcc %fcc2 */
3889 CHECK_FPU_FEATURE(dc, FLOAT128);
3890 FMOVCC(2, q);
3891 break;
3892 case 0x0c1: /* V9 fmovscc %fcc3 */
3893 FMOVCC(3, s);
3894 break;
3895 case 0x0c2: /* V9 fmovdcc %fcc3 */
3896 FMOVCC(3, d);
3897 break;
3898 case 0x0c3: /* V9 fmovqcc %fcc3 */
3899 CHECK_FPU_FEATURE(dc, FLOAT128);
3900 FMOVCC(3, q);
3901 break;
3902 #undef FMOVCC
3903 #define FMOVCC(xcc, sz) \
3904 do { \
3905 DisasCompare cmp; \
3906 cond = GET_FIELD_SP(insn, 14, 17); \
3907 gen_compare(&cmp, xcc, cond, dc); \
3908 gen_fmov##sz(dc, &cmp, rd, rs2); \
3909 free_compare(&cmp); \
3910 } while (0)
3912 case 0x101: /* V9 fmovscc %icc */
3913 FMOVCC(0, s);
3914 break;
3915 case 0x102: /* V9 fmovdcc %icc */
3916 FMOVCC(0, d);
3917 break;
3918 case 0x103: /* V9 fmovqcc %icc */
3919 CHECK_FPU_FEATURE(dc, FLOAT128);
3920 FMOVCC(0, q);
3921 break;
3922 case 0x181: /* V9 fmovscc %xcc */
3923 FMOVCC(1, s);
3924 break;
3925 case 0x182: /* V9 fmovdcc %xcc */
3926 FMOVCC(1, d);
3927 break;
3928 case 0x183: /* V9 fmovqcc %xcc */
3929 CHECK_FPU_FEATURE(dc, FLOAT128);
3930 FMOVCC(1, q);
3931 break;
3932 #undef FMOVCC
3933 #endif
3934 case 0x51: /* fcmps, V9 %fcc */
3935 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3936 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3937 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3938 break;
3939 case 0x52: /* fcmpd, V9 %fcc */
3940 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3941 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3942 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3943 break;
3944 case 0x53: /* fcmpq, V9 %fcc */
3945 CHECK_FPU_FEATURE(dc, FLOAT128);
3946 gen_op_load_fpr_QT0(QFPREG(rs1));
3947 gen_op_load_fpr_QT1(QFPREG(rs2));
3948 gen_op_fcmpq(rd & 3);
3949 break;
3950 case 0x55: /* fcmpes, V9 %fcc */
3951 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3952 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3953 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3954 break;
3955 case 0x56: /* fcmped, V9 %fcc */
3956 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3957 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3958 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3959 break;
3960 case 0x57: /* fcmpeq, V9 %fcc */
3961 CHECK_FPU_FEATURE(dc, FLOAT128);
3962 gen_op_load_fpr_QT0(QFPREG(rs1));
3963 gen_op_load_fpr_QT1(QFPREG(rs2));
3964 gen_op_fcmpeq(rd & 3);
3965 break;
3966 default:
3967 goto illegal_insn;
3969 } else if (xop == 0x2) {
3970 TCGv dst = gen_dest_gpr(dc, rd);
3971 rs1 = GET_FIELD(insn, 13, 17);
3972 if (rs1 == 0) {
3973 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3974 if (IS_IMM) { /* immediate */
3975 simm = GET_FIELDs(insn, 19, 31);
3976 tcg_gen_movi_tl(dst, simm);
3977 gen_store_gpr(dc, rd, dst);
3978 } else { /* register */
3979 rs2 = GET_FIELD(insn, 27, 31);
3980 if (rs2 == 0) {
3981 tcg_gen_movi_tl(dst, 0);
3982 gen_store_gpr(dc, rd, dst);
3983 } else {
3984 cpu_src2 = gen_load_gpr(dc, rs2);
3985 gen_store_gpr(dc, rd, cpu_src2);
3988 } else {
3989 cpu_src1 = get_src1(dc, insn);
3990 if (IS_IMM) { /* immediate */
3991 simm = GET_FIELDs(insn, 19, 31);
3992 tcg_gen_ori_tl(dst, cpu_src1, simm);
3993 gen_store_gpr(dc, rd, dst);
3994 } else { /* register */
3995 rs2 = GET_FIELD(insn, 27, 31);
3996 if (rs2 == 0) {
3997 /* mov shortcut: or x, %g0, y -> mov x, y */
3998 gen_store_gpr(dc, rd, cpu_src1);
3999 } else {
4000 cpu_src2 = gen_load_gpr(dc, rs2);
4001 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
4002 gen_store_gpr(dc, rd, dst);
4006 #ifdef TARGET_SPARC64
4007 } else if (xop == 0x25) { /* sll, V9 sllx */
4008 cpu_src1 = get_src1(dc, insn);
4009 if (IS_IMM) { /* immediate */
4010 simm = GET_FIELDs(insn, 20, 31);
4011 if (insn & (1 << 12)) {
4012 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
4013 } else {
4014 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
4016 } else { /* register */
4017 rs2 = GET_FIELD(insn, 27, 31);
4018 cpu_src2 = gen_load_gpr(dc, rs2);
4019 cpu_tmp0 = get_temp_tl(dc);
4020 if (insn & (1 << 12)) {
4021 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4022 } else {
4023 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4025 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
4027 gen_store_gpr(dc, rd, cpu_dst);
4028 } else if (xop == 0x26) { /* srl, V9 srlx */
4029 cpu_src1 = get_src1(dc, insn);
4030 if (IS_IMM) { /* immediate */
4031 simm = GET_FIELDs(insn, 20, 31);
4032 if (insn & (1 << 12)) {
4033 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
4034 } else {
4035 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4036 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
4038 } else { /* register */
4039 rs2 = GET_FIELD(insn, 27, 31);
4040 cpu_src2 = gen_load_gpr(dc, rs2);
4041 cpu_tmp0 = get_temp_tl(dc);
4042 if (insn & (1 << 12)) {
4043 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4044 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
4045 } else {
4046 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4047 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4048 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
4051 gen_store_gpr(dc, rd, cpu_dst);
4052 } else if (xop == 0x27) { /* sra, V9 srax */
4053 cpu_src1 = get_src1(dc, insn);
4054 if (IS_IMM) { /* immediate */
4055 simm = GET_FIELDs(insn, 20, 31);
4056 if (insn & (1 << 12)) {
4057 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
4058 } else {
4059 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4060 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
4062 } else { /* register */
4063 rs2 = GET_FIELD(insn, 27, 31);
4064 cpu_src2 = gen_load_gpr(dc, rs2);
4065 cpu_tmp0 = get_temp_tl(dc);
4066 if (insn & (1 << 12)) {
4067 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4068 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
4069 } else {
4070 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4071 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4072 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
4075 gen_store_gpr(dc, rd, cpu_dst);
4076 #endif
4077 } else if (xop < 0x36) {
4078 if (xop < 0x20) {
4079 cpu_src1 = get_src1(dc, insn);
4080 cpu_src2 = get_src2(dc, insn);
4081 switch (xop & ~0x10) {
4082 case 0x0: /* add */
4083 if (xop & 0x10) {
4084 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4085 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4086 dc->cc_op = CC_OP_ADD;
4087 } else {
4088 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4090 break;
4091 case 0x1: /* and */
4092 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
4093 if (xop & 0x10) {
4094 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4095 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4096 dc->cc_op = CC_OP_LOGIC;
4098 break;
4099 case 0x2: /* or */
4100 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
4101 if (xop & 0x10) {
4102 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4103 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4104 dc->cc_op = CC_OP_LOGIC;
4106 break;
4107 case 0x3: /* xor */
4108 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
4109 if (xop & 0x10) {
4110 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4111 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4112 dc->cc_op = CC_OP_LOGIC;
4114 break;
4115 case 0x4: /* sub */
4116 if (xop & 0x10) {
4117 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4118 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
4119 dc->cc_op = CC_OP_SUB;
4120 } else {
4121 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
4123 break;
4124 case 0x5: /* andn */
4125 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
4126 if (xop & 0x10) {
4127 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4128 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4129 dc->cc_op = CC_OP_LOGIC;
4131 break;
4132 case 0x6: /* orn */
4133 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
4134 if (xop & 0x10) {
4135 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4136 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4137 dc->cc_op = CC_OP_LOGIC;
4139 break;
4140 case 0x7: /* xorn */
4141 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
4142 if (xop & 0x10) {
4143 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4144 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4145 dc->cc_op = CC_OP_LOGIC;
4147 break;
4148 case 0x8: /* addx, V9 addc */
4149 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4150 (xop & 0x10));
4151 break;
4152 #ifdef TARGET_SPARC64
4153 case 0x9: /* V9 mulx */
4154 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4155 break;
4156 #endif
4157 case 0xa: /* umul */
4158 CHECK_IU_FEATURE(dc, MUL);
4159 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
4160 if (xop & 0x10) {
4161 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4162 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4163 dc->cc_op = CC_OP_LOGIC;
4165 break;
4166 case 0xb: /* smul */
4167 CHECK_IU_FEATURE(dc, MUL);
4168 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
4169 if (xop & 0x10) {
4170 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4171 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4172 dc->cc_op = CC_OP_LOGIC;
4174 break;
4175 case 0xc: /* subx, V9 subc */
4176 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4177 (xop & 0x10));
4178 break;
4179 #ifdef TARGET_SPARC64
4180 case 0xd: /* V9 udivx */
4181 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4182 break;
4183 #endif
4184 case 0xe: /* udiv */
4185 CHECK_IU_FEATURE(dc, DIV);
4186 if (xop & 0x10) {
4187 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4188 cpu_src2);
4189 dc->cc_op = CC_OP_DIV;
4190 } else {
4191 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4192 cpu_src2);
4194 break;
4195 case 0xf: /* sdiv */
4196 CHECK_IU_FEATURE(dc, DIV);
4197 if (xop & 0x10) {
4198 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4199 cpu_src2);
4200 dc->cc_op = CC_OP_DIV;
4201 } else {
4202 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4203 cpu_src2);
4205 break;
4206 default:
4207 goto illegal_insn;
4209 gen_store_gpr(dc, rd, cpu_dst);
4210 } else {
4211 cpu_src1 = get_src1(dc, insn);
4212 cpu_src2 = get_src2(dc, insn);
4213 switch (xop) {
4214 case 0x20: /* taddcc */
4215 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4216 gen_store_gpr(dc, rd, cpu_dst);
4217 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4218 dc->cc_op = CC_OP_TADD;
4219 break;
4220 case 0x21: /* tsubcc */
4221 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4222 gen_store_gpr(dc, rd, cpu_dst);
4223 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4224 dc->cc_op = CC_OP_TSUB;
4225 break;
4226 case 0x22: /* taddcctv */
4227 gen_helper_taddcctv(cpu_dst, cpu_env,
4228 cpu_src1, cpu_src2);
4229 gen_store_gpr(dc, rd, cpu_dst);
4230 dc->cc_op = CC_OP_TADDTV;
4231 break;
4232 case 0x23: /* tsubcctv */
4233 gen_helper_tsubcctv(cpu_dst, cpu_env,
4234 cpu_src1, cpu_src2);
4235 gen_store_gpr(dc, rd, cpu_dst);
4236 dc->cc_op = CC_OP_TSUBTV;
4237 break;
4238 case 0x24: /* mulscc */
4239 update_psr(dc);
4240 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4241 gen_store_gpr(dc, rd, cpu_dst);
4242 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4243 dc->cc_op = CC_OP_ADD;
4244 break;
4245 #ifndef TARGET_SPARC64
4246 case 0x25: /* sll */
4247 if (IS_IMM) { /* immediate */
4248 simm = GET_FIELDs(insn, 20, 31);
4249 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4250 } else { /* register */
4251 cpu_tmp0 = get_temp_tl(dc);
4252 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4253 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4255 gen_store_gpr(dc, rd, cpu_dst);
4256 break;
4257 case 0x26: /* srl */
4258 if (IS_IMM) { /* immediate */
4259 simm = GET_FIELDs(insn, 20, 31);
4260 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4261 } else { /* register */
4262 cpu_tmp0 = get_temp_tl(dc);
4263 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4264 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4266 gen_store_gpr(dc, rd, cpu_dst);
4267 break;
4268 case 0x27: /* sra */
4269 if (IS_IMM) { /* immediate */
4270 simm = GET_FIELDs(insn, 20, 31);
4271 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4272 } else { /* register */
4273 cpu_tmp0 = get_temp_tl(dc);
4274 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4275 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4277 gen_store_gpr(dc, rd, cpu_dst);
4278 break;
4279 #endif
4280 case 0x30:
4282 cpu_tmp0 = get_temp_tl(dc);
4283 switch(rd) {
4284 case 0: /* wry */
4285 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4286 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4287 break;
4288 #ifndef TARGET_SPARC64
4289 case 0x01 ... 0x0f: /* undefined in the
4290 SPARCv8 manual, nop
4291 on the microSPARC
4292 II */
4293 case 0x10 ... 0x1f: /* implementation-dependent
4294 in the SPARCv8
4295 manual, nop on the
4296 microSPARC II */
4297 if ((rd == 0x13) && (dc->def->features &
4298 CPU_FEATURE_POWERDOWN)) {
4299 /* LEON3 power-down */
4300 save_state(dc);
4301 gen_helper_power_down(cpu_env);
4303 break;
4304 #else
4305 case 0x2: /* V9 wrccr */
4306 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4307 gen_helper_wrccr(cpu_env, cpu_tmp0);
4308 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4309 dc->cc_op = CC_OP_FLAGS;
4310 break;
4311 case 0x3: /* V9 wrasi */
4312 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4313 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4314 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4315 offsetof(CPUSPARCState, asi));
4316 /* End TB to notice changed ASI. */
4317 save_state(dc);
4318 gen_op_next_insn();
4319 tcg_gen_exit_tb(0);
4320 dc->is_br = 1;
4321 break;
4322 case 0x6: /* V9 wrfprs */
4323 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4324 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4325 dc->fprs_dirty = 0;
4326 save_state(dc);
4327 gen_op_next_insn();
4328 tcg_gen_exit_tb(0);
4329 dc->is_br = 1;
4330 break;
4331 case 0xf: /* V9 sir, nop if user */
4332 #if !defined(CONFIG_USER_ONLY)
4333 if (supervisor(dc)) {
4334 ; // XXX
4336 #endif
4337 break;
4338 case 0x13: /* Graphics Status */
4339 if (gen_trap_ifnofpu(dc)) {
4340 goto jmp_insn;
4342 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4343 break;
4344 case 0x14: /* Softint set */
4345 if (!supervisor(dc))
4346 goto illegal_insn;
4347 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4348 gen_helper_set_softint(cpu_env, cpu_tmp0);
4349 break;
4350 case 0x15: /* Softint clear */
4351 if (!supervisor(dc))
4352 goto illegal_insn;
4353 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4354 gen_helper_clear_softint(cpu_env, cpu_tmp0);
4355 break;
4356 case 0x16: /* Softint write */
4357 if (!supervisor(dc))
4358 goto illegal_insn;
4359 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4360 gen_helper_write_softint(cpu_env, cpu_tmp0);
4361 break;
4362 case 0x17: /* Tick compare */
4363 #if !defined(CONFIG_USER_ONLY)
4364 if (!supervisor(dc))
4365 goto illegal_insn;
4366 #endif
4368 TCGv_ptr r_tickptr;
4370 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4371 cpu_src2);
4372 r_tickptr = tcg_temp_new_ptr();
4373 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4374 offsetof(CPUSPARCState, tick));
4375 gen_helper_tick_set_limit(r_tickptr,
4376 cpu_tick_cmpr);
4377 tcg_temp_free_ptr(r_tickptr);
4379 break;
4380 case 0x18: /* System tick */
4381 #if !defined(CONFIG_USER_ONLY)
4382 if (!supervisor(dc))
4383 goto illegal_insn;
4384 #endif
4386 TCGv_ptr r_tickptr;
4388 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4389 cpu_src2);
4390 r_tickptr = tcg_temp_new_ptr();
4391 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4392 offsetof(CPUSPARCState, stick));
4393 gen_helper_tick_set_count(r_tickptr,
4394 cpu_tmp0);
4395 tcg_temp_free_ptr(r_tickptr);
4397 break;
4398 case 0x19: /* System tick compare */
4399 #if !defined(CONFIG_USER_ONLY)
4400 if (!supervisor(dc))
4401 goto illegal_insn;
4402 #endif
4404 TCGv_ptr r_tickptr;
4406 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4407 cpu_src2);
4408 r_tickptr = tcg_temp_new_ptr();
4409 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4410 offsetof(CPUSPARCState, stick));
4411 gen_helper_tick_set_limit(r_tickptr,
4412 cpu_stick_cmpr);
4413 tcg_temp_free_ptr(r_tickptr);
4415 break;
4417 case 0x10: /* Performance Control */
4418 case 0x11: /* Performance Instrumentation
4419 Counter */
4420 case 0x12: /* Dispatch Control */
4421 #endif
4422 default:
4423 goto illegal_insn;
4426 break;
4427 #if !defined(CONFIG_USER_ONLY)
4428 case 0x31: /* wrpsr, V9 saved, restored */
4430 if (!supervisor(dc))
4431 goto priv_insn;
4432 #ifdef TARGET_SPARC64
4433 switch (rd) {
4434 case 0:
4435 gen_helper_saved(cpu_env);
4436 break;
4437 case 1:
4438 gen_helper_restored(cpu_env);
4439 break;
4440 case 2: /* UA2005 allclean */
4441 case 3: /* UA2005 otherw */
4442 case 4: /* UA2005 normalw */
4443 case 5: /* UA2005 invalw */
4444 // XXX
4445 default:
4446 goto illegal_insn;
4448 #else
4449 cpu_tmp0 = get_temp_tl(dc);
4450 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4451 gen_helper_wrpsr(cpu_env, cpu_tmp0);
4452 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4453 dc->cc_op = CC_OP_FLAGS;
4454 save_state(dc);
4455 gen_op_next_insn();
4456 tcg_gen_exit_tb(0);
4457 dc->is_br = 1;
4458 #endif
4460 break;
4461 case 0x32: /* wrwim, V9 wrpr */
4463 if (!supervisor(dc))
4464 goto priv_insn;
4465 cpu_tmp0 = get_temp_tl(dc);
4466 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4467 #ifdef TARGET_SPARC64
4468 switch (rd) {
4469 case 0: // tpc
4471 TCGv_ptr r_tsptr;
4473 r_tsptr = tcg_temp_new_ptr();
4474 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4475 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4476 offsetof(trap_state, tpc));
4477 tcg_temp_free_ptr(r_tsptr);
4479 break;
4480 case 1: // tnpc
4482 TCGv_ptr r_tsptr;
4484 r_tsptr = tcg_temp_new_ptr();
4485 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4486 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4487 offsetof(trap_state, tnpc));
4488 tcg_temp_free_ptr(r_tsptr);
4490 break;
4491 case 2: // tstate
4493 TCGv_ptr r_tsptr;
4495 r_tsptr = tcg_temp_new_ptr();
4496 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4497 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4498 offsetof(trap_state,
4499 tstate));
4500 tcg_temp_free_ptr(r_tsptr);
4502 break;
4503 case 3: // tt
4505 TCGv_ptr r_tsptr;
4507 r_tsptr = tcg_temp_new_ptr();
4508 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4509 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4510 offsetof(trap_state, tt));
4511 tcg_temp_free_ptr(r_tsptr);
4513 break;
4514 case 4: // tick
4516 TCGv_ptr r_tickptr;
4518 r_tickptr = tcg_temp_new_ptr();
4519 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4520 offsetof(CPUSPARCState, tick));
4521 gen_helper_tick_set_count(r_tickptr,
4522 cpu_tmp0);
4523 tcg_temp_free_ptr(r_tickptr);
4525 break;
4526 case 5: // tba
4527 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4528 break;
4529 case 6: // pstate
4530 save_state(dc);
4531 gen_helper_wrpstate(cpu_env, cpu_tmp0);
4532 dc->npc = DYNAMIC_PC;
4533 break;
4534 case 7: // tl
4535 save_state(dc);
4536 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4537 offsetof(CPUSPARCState, tl));
4538 dc->npc = DYNAMIC_PC;
4539 break;
4540 case 8: // pil
4541 gen_helper_wrpil(cpu_env, cpu_tmp0);
4542 break;
4543 case 9: // cwp
4544 gen_helper_wrcwp(cpu_env, cpu_tmp0);
4545 break;
4546 case 10: // cansave
4547 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4548 offsetof(CPUSPARCState,
4549 cansave));
4550 break;
4551 case 11: // canrestore
4552 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4553 offsetof(CPUSPARCState,
4554 canrestore));
4555 break;
4556 case 12: // cleanwin
4557 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4558 offsetof(CPUSPARCState,
4559 cleanwin));
4560 break;
4561 case 13: // otherwin
4562 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4563 offsetof(CPUSPARCState,
4564 otherwin));
4565 break;
4566 case 14: // wstate
4567 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4568 offsetof(CPUSPARCState,
4569 wstate));
4570 break;
4571 case 16: // UA2005 gl
4572 CHECK_IU_FEATURE(dc, GL);
4573 gen_helper_wrgl(cpu_env, cpu_tmp0);
4574 break;
4575 case 26: // UA2005 strand status
4576 CHECK_IU_FEATURE(dc, HYPV);
4577 if (!hypervisor(dc))
4578 goto priv_insn;
4579 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4580 break;
4581 default:
4582 goto illegal_insn;
4584 #else
4585 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4586 if (dc->def->nwindows != 32) {
4587 tcg_gen_andi_tl(cpu_wim, cpu_wim,
4588 (1 << dc->def->nwindows) - 1);
4590 #endif
4592 break;
4593 case 0x33: /* wrtbr, UA2005 wrhpr */
4595 #ifndef TARGET_SPARC64
4596 if (!supervisor(dc))
4597 goto priv_insn;
4598 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4599 #else
4600 CHECK_IU_FEATURE(dc, HYPV);
4601 if (!hypervisor(dc))
4602 goto priv_insn;
4603 cpu_tmp0 = get_temp_tl(dc);
4604 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4605 switch (rd) {
4606 case 0: // hpstate
4607 tcg_gen_st_i64(cpu_tmp0, cpu_env,
4608 offsetof(CPUSPARCState,
4609 hpstate));
4610 save_state(dc);
4611 gen_op_next_insn();
4612 tcg_gen_exit_tb(0);
4613 dc->is_br = 1;
4614 break;
4615 case 1: // htstate
4616 // XXX gen_op_wrhtstate();
4617 break;
4618 case 3: // hintp
4619 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4620 break;
4621 case 5: // htba
4622 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4623 break;
4624 case 31: // hstick_cmpr
4626 TCGv_ptr r_tickptr;
4628 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4629 r_tickptr = tcg_temp_new_ptr();
4630 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4631 offsetof(CPUSPARCState, hstick));
4632 gen_helper_tick_set_limit(r_tickptr,
4633 cpu_hstick_cmpr);
4634 tcg_temp_free_ptr(r_tickptr);
4636 break;
4637 case 6: // hver readonly
4638 default:
4639 goto illegal_insn;
4641 #endif
4643 break;
4644 #endif
4645 #ifdef TARGET_SPARC64
4646 case 0x2c: /* V9 movcc */
4648 int cc = GET_FIELD_SP(insn, 11, 12);
4649 int cond = GET_FIELD_SP(insn, 14, 17);
4650 DisasCompare cmp;
4651 TCGv dst;
4653 if (insn & (1 << 18)) {
4654 if (cc == 0) {
4655 gen_compare(&cmp, 0, cond, dc);
4656 } else if (cc == 2) {
4657 gen_compare(&cmp, 1, cond, dc);
4658 } else {
4659 goto illegal_insn;
4661 } else {
4662 gen_fcompare(&cmp, cc, cond);
4665 /* The get_src2 above loaded the normal 13-bit
4666 immediate field, not the 11-bit field we have
4667 in movcc. But it did handle the reg case. */
4668 if (IS_IMM) {
4669 simm = GET_FIELD_SPs(insn, 0, 10);
4670 tcg_gen_movi_tl(cpu_src2, simm);
4673 dst = gen_load_gpr(dc, rd);
4674 tcg_gen_movcond_tl(cmp.cond, dst,
4675 cmp.c1, cmp.c2,
4676 cpu_src2, dst);
4677 free_compare(&cmp);
4678 gen_store_gpr(dc, rd, dst);
4679 break;
4681 case 0x2d: /* V9 sdivx */
4682 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4683 gen_store_gpr(dc, rd, cpu_dst);
4684 break;
4685 case 0x2e: /* V9 popc */
4686 tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
4687 gen_store_gpr(dc, rd, cpu_dst);
4688 break;
4689 case 0x2f: /* V9 movr */
4691 int cond = GET_FIELD_SP(insn, 10, 12);
4692 DisasCompare cmp;
4693 TCGv dst;
4695 gen_compare_reg(&cmp, cond, cpu_src1);
4697 /* The get_src2 above loaded the normal 13-bit
4698 immediate field, not the 10-bit field we have
4699 in movr. But it did handle the reg case. */
4700 if (IS_IMM) {
4701 simm = GET_FIELD_SPs(insn, 0, 9);
4702 tcg_gen_movi_tl(cpu_src2, simm);
4705 dst = gen_load_gpr(dc, rd);
4706 tcg_gen_movcond_tl(cmp.cond, dst,
4707 cmp.c1, cmp.c2,
4708 cpu_src2, dst);
4709 free_compare(&cmp);
4710 gen_store_gpr(dc, rd, dst);
4711 break;
4713 #endif
4714 default:
4715 goto illegal_insn;
4718 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4719 #ifdef TARGET_SPARC64
4720 int opf = GET_FIELD_SP(insn, 5, 13);
4721 rs1 = GET_FIELD(insn, 13, 17);
4722 rs2 = GET_FIELD(insn, 27, 31);
4723 if (gen_trap_ifnofpu(dc)) {
4724 goto jmp_insn;
4727 switch (opf) {
4728 case 0x000: /* VIS I edge8cc */
4729 CHECK_FPU_FEATURE(dc, VIS1);
4730 cpu_src1 = gen_load_gpr(dc, rs1);
4731 cpu_src2 = gen_load_gpr(dc, rs2);
4732 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4733 gen_store_gpr(dc, rd, cpu_dst);
4734 break;
4735 case 0x001: /* VIS II edge8n */
4736 CHECK_FPU_FEATURE(dc, VIS2);
4737 cpu_src1 = gen_load_gpr(dc, rs1);
4738 cpu_src2 = gen_load_gpr(dc, rs2);
4739 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4740 gen_store_gpr(dc, rd, cpu_dst);
4741 break;
4742 case 0x002: /* VIS I edge8lcc */
4743 CHECK_FPU_FEATURE(dc, VIS1);
4744 cpu_src1 = gen_load_gpr(dc, rs1);
4745 cpu_src2 = gen_load_gpr(dc, rs2);
4746 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4747 gen_store_gpr(dc, rd, cpu_dst);
4748 break;
4749 case 0x003: /* VIS II edge8ln */
4750 CHECK_FPU_FEATURE(dc, VIS2);
4751 cpu_src1 = gen_load_gpr(dc, rs1);
4752 cpu_src2 = gen_load_gpr(dc, rs2);
4753 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4754 gen_store_gpr(dc, rd, cpu_dst);
4755 break;
4756 case 0x004: /* VIS I edge16cc */
4757 CHECK_FPU_FEATURE(dc, VIS1);
4758 cpu_src1 = gen_load_gpr(dc, rs1);
4759 cpu_src2 = gen_load_gpr(dc, rs2);
4760 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4761 gen_store_gpr(dc, rd, cpu_dst);
4762 break;
4763 case 0x005: /* VIS II edge16n */
4764 CHECK_FPU_FEATURE(dc, VIS2);
4765 cpu_src1 = gen_load_gpr(dc, rs1);
4766 cpu_src2 = gen_load_gpr(dc, rs2);
4767 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4768 gen_store_gpr(dc, rd, cpu_dst);
4769 break;
4770 case 0x006: /* VIS I edge16lcc */
4771 CHECK_FPU_FEATURE(dc, VIS1);
4772 cpu_src1 = gen_load_gpr(dc, rs1);
4773 cpu_src2 = gen_load_gpr(dc, rs2);
4774 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4775 gen_store_gpr(dc, rd, cpu_dst);
4776 break;
4777 case 0x007: /* VIS II edge16ln */
4778 CHECK_FPU_FEATURE(dc, VIS2);
4779 cpu_src1 = gen_load_gpr(dc, rs1);
4780 cpu_src2 = gen_load_gpr(dc, rs2);
4781 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4782 gen_store_gpr(dc, rd, cpu_dst);
4783 break;
4784 case 0x008: /* VIS I edge32cc */
4785 CHECK_FPU_FEATURE(dc, VIS1);
4786 cpu_src1 = gen_load_gpr(dc, rs1);
4787 cpu_src2 = gen_load_gpr(dc, rs2);
4788 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4789 gen_store_gpr(dc, rd, cpu_dst);
4790 break;
4791 case 0x009: /* VIS II edge32n */
4792 CHECK_FPU_FEATURE(dc, VIS2);
4793 cpu_src1 = gen_load_gpr(dc, rs1);
4794 cpu_src2 = gen_load_gpr(dc, rs2);
4795 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4796 gen_store_gpr(dc, rd, cpu_dst);
4797 break;
4798 case 0x00a: /* VIS I edge32lcc */
4799 CHECK_FPU_FEATURE(dc, VIS1);
4800 cpu_src1 = gen_load_gpr(dc, rs1);
4801 cpu_src2 = gen_load_gpr(dc, rs2);
4802 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4803 gen_store_gpr(dc, rd, cpu_dst);
4804 break;
4805 case 0x00b: /* VIS II edge32ln */
4806 CHECK_FPU_FEATURE(dc, VIS2);
4807 cpu_src1 = gen_load_gpr(dc, rs1);
4808 cpu_src2 = gen_load_gpr(dc, rs2);
4809 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4810 gen_store_gpr(dc, rd, cpu_dst);
4811 break;
4812 case 0x010: /* VIS I array8 */
4813 CHECK_FPU_FEATURE(dc, VIS1);
4814 cpu_src1 = gen_load_gpr(dc, rs1);
4815 cpu_src2 = gen_load_gpr(dc, rs2);
4816 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4817 gen_store_gpr(dc, rd, cpu_dst);
4818 break;
4819 case 0x012: /* VIS I array16 */
4820 CHECK_FPU_FEATURE(dc, VIS1);
4821 cpu_src1 = gen_load_gpr(dc, rs1);
4822 cpu_src2 = gen_load_gpr(dc, rs2);
4823 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4824 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4825 gen_store_gpr(dc, rd, cpu_dst);
4826 break;
4827 case 0x014: /* VIS I array32 */
4828 CHECK_FPU_FEATURE(dc, VIS1);
4829 cpu_src1 = gen_load_gpr(dc, rs1);
4830 cpu_src2 = gen_load_gpr(dc, rs2);
4831 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4832 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4833 gen_store_gpr(dc, rd, cpu_dst);
4834 break;
4835 case 0x018: /* VIS I alignaddr */
4836 CHECK_FPU_FEATURE(dc, VIS1);
4837 cpu_src1 = gen_load_gpr(dc, rs1);
4838 cpu_src2 = gen_load_gpr(dc, rs2);
4839 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4840 gen_store_gpr(dc, rd, cpu_dst);
4841 break;
4842 case 0x01a: /* VIS I alignaddrl */
4843 CHECK_FPU_FEATURE(dc, VIS1);
4844 cpu_src1 = gen_load_gpr(dc, rs1);
4845 cpu_src2 = gen_load_gpr(dc, rs2);
4846 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4847 gen_store_gpr(dc, rd, cpu_dst);
4848 break;
4849 case 0x019: /* VIS II bmask */
4850 CHECK_FPU_FEATURE(dc, VIS2);
4851 cpu_src1 = gen_load_gpr(dc, rs1);
4852 cpu_src2 = gen_load_gpr(dc, rs2);
4853 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4854 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4855 gen_store_gpr(dc, rd, cpu_dst);
4856 break;
4857 case 0x020: /* VIS I fcmple16 */
4858 CHECK_FPU_FEATURE(dc, VIS1);
4859 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4860 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4861 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4862 gen_store_gpr(dc, rd, cpu_dst);
4863 break;
4864 case 0x022: /* VIS I fcmpne16 */
4865 CHECK_FPU_FEATURE(dc, VIS1);
4866 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4867 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4868 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4869 gen_store_gpr(dc, rd, cpu_dst);
4870 break;
4871 case 0x024: /* VIS I fcmple32 */
4872 CHECK_FPU_FEATURE(dc, VIS1);
4873 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4874 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4875 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4876 gen_store_gpr(dc, rd, cpu_dst);
4877 break;
4878 case 0x026: /* VIS I fcmpne32 */
4879 CHECK_FPU_FEATURE(dc, VIS1);
4880 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4881 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4882 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4883 gen_store_gpr(dc, rd, cpu_dst);
4884 break;
4885 case 0x028: /* VIS I fcmpgt16 */
4886 CHECK_FPU_FEATURE(dc, VIS1);
4887 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4888 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4889 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4890 gen_store_gpr(dc, rd, cpu_dst);
4891 break;
4892 case 0x02a: /* VIS I fcmpeq16 */
4893 CHECK_FPU_FEATURE(dc, VIS1);
4894 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4895 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4896 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4897 gen_store_gpr(dc, rd, cpu_dst);
4898 break;
4899 case 0x02c: /* VIS I fcmpgt32 */
4900 CHECK_FPU_FEATURE(dc, VIS1);
4901 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4902 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4903 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4904 gen_store_gpr(dc, rd, cpu_dst);
4905 break;
4906 case 0x02e: /* VIS I fcmpeq32 */
4907 CHECK_FPU_FEATURE(dc, VIS1);
4908 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4909 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4910 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4911 gen_store_gpr(dc, rd, cpu_dst);
4912 break;
4913 case 0x031: /* VIS I fmul8x16 */
4914 CHECK_FPU_FEATURE(dc, VIS1);
4915 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4916 break;
4917 case 0x033: /* VIS I fmul8x16au */
4918 CHECK_FPU_FEATURE(dc, VIS1);
4919 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4920 break;
4921 case 0x035: /* VIS I fmul8x16al */
4922 CHECK_FPU_FEATURE(dc, VIS1);
4923 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4924 break;
4925 case 0x036: /* VIS I fmul8sux16 */
4926 CHECK_FPU_FEATURE(dc, VIS1);
4927 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4928 break;
4929 case 0x037: /* VIS I fmul8ulx16 */
4930 CHECK_FPU_FEATURE(dc, VIS1);
4931 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4932 break;
4933 case 0x038: /* VIS I fmuld8sux16 */
4934 CHECK_FPU_FEATURE(dc, VIS1);
4935 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4936 break;
4937 case 0x039: /* VIS I fmuld8ulx16 */
4938 CHECK_FPU_FEATURE(dc, VIS1);
4939 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4940 break;
4941 case 0x03a: /* VIS I fpack32 */
4942 CHECK_FPU_FEATURE(dc, VIS1);
4943 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4944 break;
4945 case 0x03b: /* VIS I fpack16 */
4946 CHECK_FPU_FEATURE(dc, VIS1);
4947 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4948 cpu_dst_32 = gen_dest_fpr_F(dc);
4949 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4950 gen_store_fpr_F(dc, rd, cpu_dst_32);
4951 break;
4952 case 0x03d: /* VIS I fpackfix */
4953 CHECK_FPU_FEATURE(dc, VIS1);
4954 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4955 cpu_dst_32 = gen_dest_fpr_F(dc);
4956 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4957 gen_store_fpr_F(dc, rd, cpu_dst_32);
4958 break;
4959 case 0x03e: /* VIS I pdist */
4960 CHECK_FPU_FEATURE(dc, VIS1);
4961 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4962 break;
4963 case 0x048: /* VIS I faligndata */
4964 CHECK_FPU_FEATURE(dc, VIS1);
4965 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4966 break;
4967 case 0x04b: /* VIS I fpmerge */
4968 CHECK_FPU_FEATURE(dc, VIS1);
4969 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4970 break;
4971 case 0x04c: /* VIS II bshuffle */
4972 CHECK_FPU_FEATURE(dc, VIS2);
4973 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4974 break;
4975 case 0x04d: /* VIS I fexpand */
4976 CHECK_FPU_FEATURE(dc, VIS1);
4977 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4978 break;
4979 case 0x050: /* VIS I fpadd16 */
4980 CHECK_FPU_FEATURE(dc, VIS1);
4981 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4982 break;
4983 case 0x051: /* VIS I fpadd16s */
4984 CHECK_FPU_FEATURE(dc, VIS1);
4985 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4986 break;
4987 case 0x052: /* VIS I fpadd32 */
4988 CHECK_FPU_FEATURE(dc, VIS1);
4989 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4990 break;
4991 case 0x053: /* VIS I fpadd32s */
4992 CHECK_FPU_FEATURE(dc, VIS1);
4993 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4994 break;
4995 case 0x054: /* VIS I fpsub16 */
4996 CHECK_FPU_FEATURE(dc, VIS1);
4997 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4998 break;
4999 case 0x055: /* VIS I fpsub16s */
5000 CHECK_FPU_FEATURE(dc, VIS1);
5001 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
5002 break;
5003 case 0x056: /* VIS I fpsub32 */
5004 CHECK_FPU_FEATURE(dc, VIS1);
5005 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
5006 break;
5007 case 0x057: /* VIS I fpsub32s */
5008 CHECK_FPU_FEATURE(dc, VIS1);
5009 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
5010 break;
5011 case 0x060: /* VIS I fzero */
5012 CHECK_FPU_FEATURE(dc, VIS1);
5013 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5014 tcg_gen_movi_i64(cpu_dst_64, 0);
5015 gen_store_fpr_D(dc, rd, cpu_dst_64);
5016 break;
5017 case 0x061: /* VIS I fzeros */
5018 CHECK_FPU_FEATURE(dc, VIS1);
5019 cpu_dst_32 = gen_dest_fpr_F(dc);
5020 tcg_gen_movi_i32(cpu_dst_32, 0);
5021 gen_store_fpr_F(dc, rd, cpu_dst_32);
5022 break;
5023 case 0x062: /* VIS I fnor */
5024 CHECK_FPU_FEATURE(dc, VIS1);
5025 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
5026 break;
5027 case 0x063: /* VIS I fnors */
5028 CHECK_FPU_FEATURE(dc, VIS1);
5029 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
5030 break;
5031 case 0x064: /* VIS I fandnot2 */
5032 CHECK_FPU_FEATURE(dc, VIS1);
5033 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
5034 break;
5035 case 0x065: /* VIS I fandnot2s */
5036 CHECK_FPU_FEATURE(dc, VIS1);
5037 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5038 break;
5039 case 0x066: /* VIS I fnot2 */
5040 CHECK_FPU_FEATURE(dc, VIS1);
5041 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5042 break;
5043 case 0x067: /* VIS I fnot2s */
5044 CHECK_FPU_FEATURE(dc, VIS1);
5045 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5046 break;
5047 case 0x068: /* VIS I fandnot1 */
5048 CHECK_FPU_FEATURE(dc, VIS1);
5049 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5050 break;
5051 case 0x069: /* VIS I fandnot1s */
5052 CHECK_FPU_FEATURE(dc, VIS1);
5053 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5054 break;
5055 case 0x06a: /* VIS I fnot1 */
5056 CHECK_FPU_FEATURE(dc, VIS1);
5057 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5058 break;
5059 case 0x06b: /* VIS I fnot1s */
5060 CHECK_FPU_FEATURE(dc, VIS1);
5061 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5062 break;
5063 case 0x06c: /* VIS I fxor */
5064 CHECK_FPU_FEATURE(dc, VIS1);
5065 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5066 break;
5067 case 0x06d: /* VIS I fxors */
5068 CHECK_FPU_FEATURE(dc, VIS1);
5069 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5070 break;
5071 case 0x06e: /* VIS I fnand */
5072 CHECK_FPU_FEATURE(dc, VIS1);
5073 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5074 break;
5075 case 0x06f: /* VIS I fnands */
5076 CHECK_FPU_FEATURE(dc, VIS1);
5077 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5078 break;
5079 case 0x070: /* VIS I fand */
5080 CHECK_FPU_FEATURE(dc, VIS1);
5081 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5082 break;
5083 case 0x071: /* VIS I fands */
5084 CHECK_FPU_FEATURE(dc, VIS1);
5085 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5086 break;
5087 case 0x072: /* VIS I fxnor */
5088 CHECK_FPU_FEATURE(dc, VIS1);
5089 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5090 break;
5091 case 0x073: /* VIS I fxnors */
5092 CHECK_FPU_FEATURE(dc, VIS1);
5093 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5094 break;
5095 case 0x074: /* VIS I fsrc1 */
5096 CHECK_FPU_FEATURE(dc, VIS1);
5097 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5098 gen_store_fpr_D(dc, rd, cpu_src1_64);
5099 break;
5100 case 0x075: /* VIS I fsrc1s */
5101 CHECK_FPU_FEATURE(dc, VIS1);
5102 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5103 gen_store_fpr_F(dc, rd, cpu_src1_32);
5104 break;
5105 case 0x076: /* VIS I fornot2 */
5106 CHECK_FPU_FEATURE(dc, VIS1);
5107 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5108 break;
5109 case 0x077: /* VIS I fornot2s */
5110 CHECK_FPU_FEATURE(dc, VIS1);
5111 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5112 break;
5113 case 0x078: /* VIS I fsrc2 */
5114 CHECK_FPU_FEATURE(dc, VIS1);
5115 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5116 gen_store_fpr_D(dc, rd, cpu_src1_64);
5117 break;
5118 case 0x079: /* VIS I fsrc2s */
5119 CHECK_FPU_FEATURE(dc, VIS1);
5120 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5121 gen_store_fpr_F(dc, rd, cpu_src1_32);
5122 break;
5123 case 0x07a: /* VIS I fornot1 */
5124 CHECK_FPU_FEATURE(dc, VIS1);
5125 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5126 break;
5127 case 0x07b: /* VIS I fornot1s */
5128 CHECK_FPU_FEATURE(dc, VIS1);
5129 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5130 break;
5131 case 0x07c: /* VIS I for */
5132 CHECK_FPU_FEATURE(dc, VIS1);
5133 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5134 break;
5135 case 0x07d: /* VIS I fors */
5136 CHECK_FPU_FEATURE(dc, VIS1);
5137 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5138 break;
5139 case 0x07e: /* VIS I fone */
5140 CHECK_FPU_FEATURE(dc, VIS1);
5141 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5142 tcg_gen_movi_i64(cpu_dst_64, -1);
5143 gen_store_fpr_D(dc, rd, cpu_dst_64);
5144 break;
5145 case 0x07f: /* VIS I fones */
5146 CHECK_FPU_FEATURE(dc, VIS1);
5147 cpu_dst_32 = gen_dest_fpr_F(dc);
5148 tcg_gen_movi_i32(cpu_dst_32, -1);
5149 gen_store_fpr_F(dc, rd, cpu_dst_32);
5150 break;
5151 case 0x080: /* VIS I shutdown */
5152 case 0x081: /* VIS II siam */
5153 // XXX
5154 goto illegal_insn;
5155 default:
5156 goto illegal_insn;
5158 #else
5159 goto ncp_insn;
5160 #endif
5161 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5162 #ifdef TARGET_SPARC64
5163 goto illegal_insn;
5164 #else
5165 goto ncp_insn;
5166 #endif
5167 #ifdef TARGET_SPARC64
5168 } else if (xop == 0x39) { /* V9 return */
5169 save_state(dc);
5170 cpu_src1 = get_src1(dc, insn);
5171 cpu_tmp0 = get_temp_tl(dc);
5172 if (IS_IMM) { /* immediate */
5173 simm = GET_FIELDs(insn, 19, 31);
5174 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5175 } else { /* register */
5176 rs2 = GET_FIELD(insn, 27, 31);
5177 if (rs2) {
5178 cpu_src2 = gen_load_gpr(dc, rs2);
5179 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5180 } else {
5181 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5184 gen_helper_restore(cpu_env);
5185 gen_mov_pc_npc(dc);
5186 gen_check_align(cpu_tmp0, 3);
5187 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5188 dc->npc = DYNAMIC_PC;
5189 goto jmp_insn;
5190 #endif
5191 } else {
5192 cpu_src1 = get_src1(dc, insn);
5193 cpu_tmp0 = get_temp_tl(dc);
5194 if (IS_IMM) { /* immediate */
5195 simm = GET_FIELDs(insn, 19, 31);
5196 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5197 } else { /* register */
5198 rs2 = GET_FIELD(insn, 27, 31);
5199 if (rs2) {
5200 cpu_src2 = gen_load_gpr(dc, rs2);
5201 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5202 } else {
5203 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5206 switch (xop) {
5207 case 0x38: /* jmpl */
5209 TCGv t = gen_dest_gpr(dc, rd);
5210 tcg_gen_movi_tl(t, dc->pc);
5211 gen_store_gpr(dc, rd, t);
5213 gen_mov_pc_npc(dc);
5214 gen_check_align(cpu_tmp0, 3);
5215 gen_address_mask(dc, cpu_tmp0);
5216 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5217 dc->npc = DYNAMIC_PC;
5219 goto jmp_insn;
5220 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5221 case 0x39: /* rett, V9 return */
5223 if (!supervisor(dc))
5224 goto priv_insn;
5225 gen_mov_pc_npc(dc);
5226 gen_check_align(cpu_tmp0, 3);
5227 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5228 dc->npc = DYNAMIC_PC;
5229 gen_helper_rett(cpu_env);
5231 goto jmp_insn;
5232 #endif
5233 case 0x3b: /* flush */
5234 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5235 goto unimp_flush;
5236 /* nop */
5237 break;
5238 case 0x3c: /* save */
5239 gen_helper_save(cpu_env);
5240 gen_store_gpr(dc, rd, cpu_tmp0);
5241 break;
5242 case 0x3d: /* restore */
5243 gen_helper_restore(cpu_env);
5244 gen_store_gpr(dc, rd, cpu_tmp0);
5245 break;
5246 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5247 case 0x3e: /* V9 done/retry */
5249 switch (rd) {
5250 case 0:
5251 if (!supervisor(dc))
5252 goto priv_insn;
5253 dc->npc = DYNAMIC_PC;
5254 dc->pc = DYNAMIC_PC;
5255 gen_helper_done(cpu_env);
5256 goto jmp_insn;
5257 case 1:
5258 if (!supervisor(dc))
5259 goto priv_insn;
5260 dc->npc = DYNAMIC_PC;
5261 dc->pc = DYNAMIC_PC;
5262 gen_helper_retry(cpu_env);
5263 goto jmp_insn;
5264 default:
5265 goto illegal_insn;
5268 break;
5269 #endif
5270 default:
5271 goto illegal_insn;
5274 break;
5276 break;
5277 case 3: /* load/store instructions */
5279 unsigned int xop = GET_FIELD(insn, 7, 12);
5280 /* ??? gen_address_mask prevents us from using a source
5281 register directly. Always generate a temporary. */
5282 TCGv cpu_addr = get_temp_tl(dc);
5284 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5285 if (xop == 0x3c || xop == 0x3e) {
5286 /* V9 casa/casxa : no offset */
5287 } else if (IS_IMM) { /* immediate */
5288 simm = GET_FIELDs(insn, 19, 31);
5289 if (simm != 0) {
5290 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5292 } else { /* register */
5293 rs2 = GET_FIELD(insn, 27, 31);
5294 if (rs2 != 0) {
5295 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5298 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5299 (xop > 0x17 && xop <= 0x1d ) ||
5300 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5301 TCGv cpu_val = gen_dest_gpr(dc, rd);
5303 switch (xop) {
5304 case 0x0: /* ld, V9 lduw, load unsigned word */
5305 gen_address_mask(dc, cpu_addr);
5306 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
5307 break;
5308 case 0x1: /* ldub, load unsigned byte */
5309 gen_address_mask(dc, cpu_addr);
5310 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
5311 break;
5312 case 0x2: /* lduh, load unsigned halfword */
5313 gen_address_mask(dc, cpu_addr);
5314 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
5315 break;
5316 case 0x3: /* ldd, load double word */
5317 if (rd & 1)
5318 goto illegal_insn;
5319 else {
5320 TCGv_i64 t64;
5322 gen_address_mask(dc, cpu_addr);
5323 t64 = tcg_temp_new_i64();
5324 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5325 tcg_gen_trunc_i64_tl(cpu_val, t64);
5326 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5327 gen_store_gpr(dc, rd + 1, cpu_val);
5328 tcg_gen_shri_i64(t64, t64, 32);
5329 tcg_gen_trunc_i64_tl(cpu_val, t64);
5330 tcg_temp_free_i64(t64);
5331 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5333 break;
5334 case 0x9: /* ldsb, load signed byte */
5335 gen_address_mask(dc, cpu_addr);
5336 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
5337 break;
5338 case 0xa: /* ldsh, load signed halfword */
5339 gen_address_mask(dc, cpu_addr);
5340 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
5341 break;
5342 case 0xd: /* ldstub */
5343 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5344 break;
5345 case 0x0f:
5346 /* swap, swap register with memory. Also atomically */
5347 CHECK_IU_FEATURE(dc, SWAP);
5348 cpu_src1 = gen_load_gpr(dc, rd);
5349 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5350 dc->mem_idx, MO_TEUL);
5351 break;
5352 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5353 case 0x10: /* lda, V9 lduwa, load word alternate */
5354 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5355 break;
5356 case 0x11: /* lduba, load unsigned byte alternate */
5357 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5358 break;
5359 case 0x12: /* lduha, load unsigned halfword alternate */
5360 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5361 break;
5362 case 0x13: /* ldda, load double word alternate */
5363 if (rd & 1) {
5364 goto illegal_insn;
5366 gen_ldda_asi(dc, cpu_addr, insn, rd);
5367 goto skip_move;
5368 case 0x19: /* ldsba, load signed byte alternate */
5369 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5370 break;
5371 case 0x1a: /* ldsha, load signed halfword alternate */
5372 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5373 break;
5374 case 0x1d: /* ldstuba -- XXX: should be atomically */
5375 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5376 break;
5377 case 0x1f: /* swapa, swap reg with alt. memory. Also
5378 atomically */
5379 CHECK_IU_FEATURE(dc, SWAP);
5380 cpu_src1 = gen_load_gpr(dc, rd);
5381 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5382 break;
5384 #ifndef TARGET_SPARC64
5385 case 0x30: /* ldc */
5386 case 0x31: /* ldcsr */
5387 case 0x33: /* lddc */
5388 goto ncp_insn;
5389 #endif
5390 #endif
5391 #ifdef TARGET_SPARC64
5392 case 0x08: /* V9 ldsw */
5393 gen_address_mask(dc, cpu_addr);
5394 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
5395 break;
5396 case 0x0b: /* V9 ldx */
5397 gen_address_mask(dc, cpu_addr);
5398 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
5399 break;
5400 case 0x18: /* V9 ldswa */
5401 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5402 break;
5403 case 0x1b: /* V9 ldxa */
5404 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5405 break;
5406 case 0x2d: /* V9 prefetch, no effect */
5407 goto skip_move;
5408 case 0x30: /* V9 ldfa */
5409 if (gen_trap_ifnofpu(dc)) {
5410 goto jmp_insn;
5412 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5413 gen_update_fprs_dirty(dc, rd);
5414 goto skip_move;
5415 case 0x33: /* V9 lddfa */
5416 if (gen_trap_ifnofpu(dc)) {
5417 goto jmp_insn;
5419 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5420 gen_update_fprs_dirty(dc, DFPREG(rd));
5421 goto skip_move;
5422 case 0x3d: /* V9 prefetcha, no effect */
5423 goto skip_move;
5424 case 0x32: /* V9 ldqfa */
5425 CHECK_FPU_FEATURE(dc, FLOAT128);
5426 if (gen_trap_ifnofpu(dc)) {
5427 goto jmp_insn;
5429 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5430 gen_update_fprs_dirty(dc, QFPREG(rd));
5431 goto skip_move;
5432 #endif
5433 default:
5434 goto illegal_insn;
5436 gen_store_gpr(dc, rd, cpu_val);
5437 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5438 skip_move: ;
5439 #endif
5440 } else if (xop >= 0x20 && xop < 0x24) {
5441 if (gen_trap_ifnofpu(dc)) {
5442 goto jmp_insn;
5444 switch (xop) {
5445 case 0x20: /* ldf, load fpreg */
5446 gen_address_mask(dc, cpu_addr);
5447 cpu_dst_32 = gen_dest_fpr_F(dc);
5448 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5449 dc->mem_idx, MO_TEUL);
5450 gen_store_fpr_F(dc, rd, cpu_dst_32);
5451 break;
5452 case 0x21: /* ldfsr, V9 ldxfsr */
5453 #ifdef TARGET_SPARC64
5454 gen_address_mask(dc, cpu_addr);
5455 if (rd == 1) {
5456 TCGv_i64 t64 = tcg_temp_new_i64();
5457 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5458 dc->mem_idx, MO_TEQ);
5459 gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5460 tcg_temp_free_i64(t64);
5461 break;
5463 #endif
5464 cpu_dst_32 = get_temp_i32(dc);
5465 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5466 dc->mem_idx, MO_TEUL);
5467 gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5468 break;
5469 case 0x22: /* ldqf, load quad fpreg */
5470 CHECK_FPU_FEATURE(dc, FLOAT128);
5471 gen_address_mask(dc, cpu_addr);
5472 cpu_src1_64 = tcg_temp_new_i64();
5473 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5474 MO_TEQ | MO_ALIGN_4);
5475 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5476 cpu_src2_64 = tcg_temp_new_i64();
5477 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5478 MO_TEQ | MO_ALIGN_4);
5479 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5480 tcg_temp_free_i64(cpu_src1_64);
5481 tcg_temp_free_i64(cpu_src2_64);
5482 break;
5483 case 0x23: /* lddf, load double fpreg */
5484 gen_address_mask(dc, cpu_addr);
5485 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5486 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5487 MO_TEQ | MO_ALIGN_4);
5488 gen_store_fpr_D(dc, rd, cpu_dst_64);
5489 break;
5490 default:
5491 goto illegal_insn;
5493 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5494 xop == 0xe || xop == 0x1e) {
5495 TCGv cpu_val = gen_load_gpr(dc, rd);
5497 switch (xop) {
5498 case 0x4: /* st, store word */
5499 gen_address_mask(dc, cpu_addr);
5500 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
5501 break;
5502 case 0x5: /* stb, store byte */
5503 gen_address_mask(dc, cpu_addr);
5504 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
5505 break;
5506 case 0x6: /* sth, store halfword */
5507 gen_address_mask(dc, cpu_addr);
5508 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
5509 break;
5510 case 0x7: /* std, store double word */
5511 if (rd & 1)
5512 goto illegal_insn;
5513 else {
5514 TCGv_i64 t64;
5515 TCGv lo;
5517 gen_address_mask(dc, cpu_addr);
5518 lo = gen_load_gpr(dc, rd + 1);
5519 t64 = tcg_temp_new_i64();
5520 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5521 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
5522 tcg_temp_free_i64(t64);
5524 break;
5525 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5526 case 0x14: /* sta, V9 stwa, store word alternate */
5527 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5528 break;
5529 case 0x15: /* stba, store byte alternate */
5530 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5531 break;
5532 case 0x16: /* stha, store halfword alternate */
5533 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5534 break;
5535 case 0x17: /* stda, store double word alternate */
5536 if (rd & 1) {
5537 goto illegal_insn;
5539 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5540 break;
5541 #endif
5542 #ifdef TARGET_SPARC64
5543 case 0x0e: /* V9 stx */
5544 gen_address_mask(dc, cpu_addr);
5545 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5546 break;
5547 case 0x1e: /* V9 stxa */
5548 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5549 break;
5550 #endif
5551 default:
5552 goto illegal_insn;
5554 } else if (xop > 0x23 && xop < 0x28) {
5555 if (gen_trap_ifnofpu(dc)) {
5556 goto jmp_insn;
5558 switch (xop) {
5559 case 0x24: /* stf, store fpreg */
5560 gen_address_mask(dc, cpu_addr);
5561 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5562 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5563 dc->mem_idx, MO_TEUL);
5564 break;
5565 case 0x25: /* stfsr, V9 stxfsr */
5567 #ifdef TARGET_SPARC64
5568 gen_address_mask(dc, cpu_addr);
5569 if (rd == 1) {
5570 tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
5571 break;
5573 #endif
5574 tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
5576 break;
5577 case 0x26:
5578 #ifdef TARGET_SPARC64
5579 /* V9 stqf, store quad fpreg */
5580 CHECK_FPU_FEATURE(dc, FLOAT128);
5581 gen_address_mask(dc, cpu_addr);
5582 /* ??? While stqf only requires 4-byte alignment, it is
5583 legal for the cpu to signal the unaligned exception.
5584 The OS trap handler is then required to fix it up.
5585 For qemu, this avoids having to probe the second page
5586 before performing the first write. */
5587 cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5588 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5589 dc->mem_idx, MO_TEQ | MO_ALIGN_16);
5590 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5591 cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5592 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5593 dc->mem_idx, MO_TEQ);
5594 break;
5595 #else /* !TARGET_SPARC64 */
5596 /* stdfq, store floating point queue */
5597 #if defined(CONFIG_USER_ONLY)
5598 goto illegal_insn;
5599 #else
5600 if (!supervisor(dc))
5601 goto priv_insn;
5602 if (gen_trap_ifnofpu(dc)) {
5603 goto jmp_insn;
5605 goto nfq_insn;
5606 #endif
5607 #endif
5608 case 0x27: /* stdf, store double fpreg */
5609 gen_address_mask(dc, cpu_addr);
5610 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5611 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5612 MO_TEQ | MO_ALIGN_4);
5613 break;
5614 default:
5615 goto illegal_insn;
5617 } else if (xop > 0x33 && xop < 0x3f) {
5618 switch (xop) {
5619 #ifdef TARGET_SPARC64
5620 case 0x34: /* V9 stfa */
5621 if (gen_trap_ifnofpu(dc)) {
5622 goto jmp_insn;
5624 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5625 break;
5626 case 0x36: /* V9 stqfa */
5628 CHECK_FPU_FEATURE(dc, FLOAT128);
5629 if (gen_trap_ifnofpu(dc)) {
5630 goto jmp_insn;
5632 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5634 break;
5635 case 0x37: /* V9 stdfa */
5636 if (gen_trap_ifnofpu(dc)) {
5637 goto jmp_insn;
5639 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5640 break;
5641 case 0x3e: /* V9 casxa */
5642 rs2 = GET_FIELD(insn, 27, 31);
5643 cpu_src2 = gen_load_gpr(dc, rs2);
5644 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5645 break;
5646 #else
5647 case 0x34: /* stc */
5648 case 0x35: /* stcsr */
5649 case 0x36: /* stdcq */
5650 case 0x37: /* stdc */
5651 goto ncp_insn;
5652 #endif
5653 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5654 case 0x3c: /* V9 or LEON3 casa */
5655 #ifndef TARGET_SPARC64
5656 CHECK_IU_FEATURE(dc, CASA);
5657 #endif
5658 rs2 = GET_FIELD(insn, 27, 31);
5659 cpu_src2 = gen_load_gpr(dc, rs2);
5660 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5661 break;
5662 #endif
5663 default:
5664 goto illegal_insn;
5666 } else {
5667 goto illegal_insn;
5670 break;
5672 /* default case for non jump instructions */
5673 if (dc->npc == DYNAMIC_PC) {
5674 dc->pc = DYNAMIC_PC;
5675 gen_op_next_insn();
5676 } else if (dc->npc == JUMP_PC) {
5677 /* we can do a static jump */
5678 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5679 dc->is_br = 1;
5680 } else {
5681 dc->pc = dc->npc;
5682 dc->npc = dc->npc + 4;
5684 jmp_insn:
5685 goto egress;
5686 illegal_insn:
5687 gen_exception(dc, TT_ILL_INSN);
5688 goto egress;
5689 unimp_flush:
5690 gen_exception(dc, TT_UNIMP_FLUSH);
5691 goto egress;
5692 #if !defined(CONFIG_USER_ONLY)
5693 priv_insn:
5694 gen_exception(dc, TT_PRIV_INSN);
5695 goto egress;
5696 #endif
5697 nfpu_insn:
5698 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5699 goto egress;
5700 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5701 nfq_insn:
5702 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5703 goto egress;
5704 #endif
5705 #ifndef TARGET_SPARC64
5706 ncp_insn:
5707 gen_exception(dc, TT_NCP_INSN);
5708 goto egress;
5709 #endif
5710 egress:
5711 if (dc->n_t32 != 0) {
5712 int i;
5713 for (i = dc->n_t32 - 1; i >= 0; --i) {
5714 tcg_temp_free_i32(dc->t32[i]);
5716 dc->n_t32 = 0;
5718 if (dc->n_ttl != 0) {
5719 int i;
5720 for (i = dc->n_ttl - 1; i >= 0; --i) {
5721 tcg_temp_free(dc->ttl[i]);
5723 dc->n_ttl = 0;
5727 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5729 SPARCCPU *cpu = sparc_env_get_cpu(env);
5730 CPUState *cs = CPU(cpu);
5731 target_ulong pc_start, last_pc;
5732 DisasContext dc1, *dc = &dc1;
5733 int num_insns;
5734 int max_insns;
5735 unsigned int insn;
5737 memset(dc, 0, sizeof(DisasContext));
5738 dc->tb = tb;
5739 pc_start = tb->pc;
5740 dc->pc = pc_start;
5741 last_pc = dc->pc;
5742 dc->npc = (target_ulong) tb->cs_base;
5743 dc->cc_op = CC_OP_DYNAMIC;
5744 dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
5745 dc->def = env->def;
5746 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5747 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5748 dc->singlestep = (cs->singlestep_enabled || singlestep);
5749 #ifndef CONFIG_USER_ONLY
5750 dc->supervisor = (tb->flags & TB_FLAG_SUPER) != 0;
5751 #endif
5752 #ifdef TARGET_SPARC64
5753 dc->fprs_dirty = 0;
5754 dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5755 #ifndef CONFIG_USER_ONLY
5756 dc->hypervisor = (tb->flags & TB_FLAG_HYPER) != 0;
5757 #endif
5758 #endif
5760 num_insns = 0;
5761 max_insns = tb->cflags & CF_COUNT_MASK;
5762 if (max_insns == 0) {
5763 max_insns = CF_COUNT_MASK;
5765 if (max_insns > TCG_MAX_INSNS) {
5766 max_insns = TCG_MAX_INSNS;
5769 gen_tb_start(tb);
5770 do {
5771 if (dc->npc & JUMP_PC) {
5772 assert(dc->jump_pc[1] == dc->pc + 4);
5773 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5774 } else {
5775 tcg_gen_insn_start(dc->pc, dc->npc);
5777 num_insns++;
5778 last_pc = dc->pc;
5780 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5781 if (dc->pc != pc_start) {
5782 save_state(dc);
5784 gen_helper_debug(cpu_env);
5785 tcg_gen_exit_tb(0);
5786 dc->is_br = 1;
5787 goto exit_gen_loop;
5790 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5791 gen_io_start();
5794 insn = cpu_ldl_code(env, dc->pc);
5796 disas_sparc_insn(dc, insn);
5798 if (dc->is_br)
5799 break;
5800 /* if the next PC is different, we abort now */
5801 if (dc->pc != (last_pc + 4))
5802 break;
5803 /* if we reach a page boundary, we stop generation so that the
5804 PC of a TT_TFAULT exception is always in the right page */
5805 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5806 break;
5807 /* if single step mode, we generate only one instruction and
5808 generate an exception */
5809 if (dc->singlestep) {
5810 break;
5812 } while (!tcg_op_buf_full() &&
5813 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5814 num_insns < max_insns);
5816 exit_gen_loop:
5817 if (tb->cflags & CF_LAST_IO) {
5818 gen_io_end();
5820 if (!dc->is_br) {
5821 if (dc->pc != DYNAMIC_PC &&
5822 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5823 /* static PC and NPC: we can use direct chaining */
5824 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5825 } else {
5826 if (dc->pc != DYNAMIC_PC) {
5827 tcg_gen_movi_tl(cpu_pc, dc->pc);
5829 save_npc(dc);
5830 tcg_gen_exit_tb(0);
5833 gen_tb_end(tb, num_insns);
5835 tb->size = last_pc + 4 - pc_start;
5836 tb->icount = num_insns;
5838 #ifdef DEBUG_DISAS
5839 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5840 && qemu_log_in_addr_range(pc_start)) {
5841 qemu_log_lock();
5842 qemu_log("--------------\n");
5843 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5844 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5845 qemu_log("\n");
5846 qemu_log_unlock();
5848 #endif
5851 void gen_intermediate_code_init(CPUSPARCState *env)
5853 static int inited;
5854 static const char gregnames[32][4] = {
5855 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5856 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5857 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5858 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5860 static const char fregnames[32][4] = {
5861 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5862 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5863 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5864 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5867 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5868 #ifdef TARGET_SPARC64
5869 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5870 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5871 #else
5872 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5873 #endif
5874 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5875 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5878 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5879 #ifdef TARGET_SPARC64
5880 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5881 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5882 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5883 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5884 "hstick_cmpr" },
5885 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5886 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5887 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5888 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5889 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5890 #endif
5891 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5892 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5893 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5894 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5895 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5896 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5897 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5898 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5899 #ifndef CONFIG_USER_ONLY
5900 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5901 #endif
5904 unsigned int i;
5906 /* init various static tables */
5907 if (inited) {
5908 return;
5910 inited = 1;
5912 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5913 tcg_ctx.tcg_env = cpu_env;
5915 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5916 offsetof(CPUSPARCState, regwptr),
5917 "regwptr");
5919 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5920 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5923 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5924 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5927 TCGV_UNUSED(cpu_regs[0]);
5928 for (i = 1; i < 8; ++i) {
5929 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5930 offsetof(CPUSPARCState, gregs[i]),
5931 gregnames[i]);
5934 for (i = 8; i < 32; ++i) {
5935 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5936 (i - 8) * sizeof(target_ulong),
5937 gregnames[i]);
5940 for (i = 0; i < TARGET_DPREGS; i++) {
5941 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5942 offsetof(CPUSPARCState, fpr[i]),
5943 fregnames[i]);
5947 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5948 target_ulong *data)
5950 target_ulong pc = data[0];
5951 target_ulong npc = data[1];
5953 env->pc = pc;
5954 if (npc == DYNAMIC_PC) {
5955 /* dynamic NPC: already stored */
5956 } else if (npc & JUMP_PC) {
5957 /* jump PC: use 'cond' and the jump targets of the translation */
5958 if (env->cond) {
5959 env->npc = npc & ~3;
5960 } else {
5961 env->npc = pc + 4;
5963 } else {
5964 env->npc = npc;